�����    �   huggingface�{"info": {"features": {"id": {"dtype": "string", "_type": "Value"}, "text": {"dtype": "string", "_type": "Value"}, "dataset_id": {"dtype": "string", "_type": "Value"}}}}p4���� dataset_id��������text���� id����  W   �� ��3�'�'��7!OV��V��V�X�V�y��� #(.4:?DJOTZ`flrx}����������������������� %+17=CIOU[agmsy��������������������� "(.4:@EKQW]bhntz���������������������� &+17<BHNTZ`flrx~���������������������� !'-39>DIOU[agmsx}����������������������  &+/39?EJPV\agmrx~��������������������� #)/5;?EKQW]ciou{���������������������� #(.4:@FKQW]cintz���������������������� $)/5;AGMSY_ekqv|����������������������      % + 1 7 = C H M S X ^ d j o u z � � � � � � � � � � � � � � � � � � � � � �       $ * 0 6 < B F L R X ^ c i o u { � � � � � � � � � � � � � � � � � � � � � �      $ * 0 5 ; A G L R X ^ d j p v | � � � � � � � � � � � � � � � � � � � � � �      % + 1 7 = C I O U [ a g m s y  � � � � � � � � � � � � � � � � � � � � � �      % + 0 6 < B H N T Z ` f l r x ~ � � � � � � � � � � � � � � � � � � � � � �  ")07>ELSZagnu{������������������� &-4;BIPV]dkqx�������������������  '-3:AHOV]bipw~������������������� !(/5<BIOV\cipv}������������������� %,39@GNU\cjqx������������������� !(/6=DKRY_fmt{�������������������#*18?FLSZahov}������������������� %,3:AHOV]dkry������������������� &,3:AHOV]djqw~������������������� %+29@GNU[bipw~������������������ %,3589740266346515082218671477412334878705764423711098333895039543687234817295778894350310101253108192672407846352372601139534833330862685416601735331239476958124889236346423462961910735824425518039758763618058836144824061503003207357272831021711466443613903551273234117431201766069043387954014845888595844691187084180865614764935144829724269666250312671223373731003681490876052475884665290414900064718161830424019204593431642045751123194573739601945571016483091613863565438622190761942404914425181315732772114743167591757865847823439605571600245544617415117512327117645478102164991264297101439276178102484316641218027152696154385414025252023139809155289582066569611374448122827403509481600170695318445176233183931162189158716274611652838194483604063430475457885634871228690784023334895514544431795501536217641431626562433385386149449671350328114818124465594478348058418067126330283400437683514778381104117381309144418561285456186695343972237052495075795037934016899816445876214747959376463020710368123163643308939729150844316467465698229676862850115422111908653309851174249168941007322755571266452424765201534585121633763433413196332942366647172769754462516649798664650387655452214318114172873131192696613399101233975670245954120306944294742883120028734598615103645593102799394475287130494250608780187161306323606253133788912344521473759063838201505085037932387929926227137139278104619111965738411178606365821247146522045730281886044021501221826032223598752120821360152217422126472554452014153206591201141125458402234465545942440304402389493373590501078465372091051024939753196703439903137116007534345493256393807500057572853344795568023311600366066466673262213568858780146983272816298522417333913732373040832664671598539564158256092602292467934568292447930194019312347282699109826401415204666325573559966142323492389169739157070671433636763142305712195737635692629802641354613401140545746766811598443016650022482748660923264761336379455332348713579243299524302865519530105587516212615931569094291541056384834184230663301488894645369450594573866119293385783016265181801713621241073569594500917291049162623611123369863731244576117085264239451902547451457689439087968441255678223278217582306019559364365200645652093166206243277253648253695047637517234356086651279165563837339010589347444563683623907536356001474265691021401761066145345242218974711253129558045477364328913929073404510884918245510317001215002019887347104318524944725176852026343269184338364763714215043748793649283037475171515361531200315353306653343495385403503265353434201342319076474054203265243680220898240958367886057225640335285040096627575239107052671412905712306099155476584093135009764832663202438193022036727444823988243301010886660344028754716158241589958782447435115074750651601572548530585131779596384306979224667370775563658657087447379397502208593975691763016161876624364738081449023143107774247116162617110042197190459922421152412999207937464455368310535440159857358820435410281534150451681262534565557164205134698503207288815497480200226220441904571382855589355116501192362580479334433473261842165556649529322566122662839325059750731461024811155463623028062199249675810057542531747156245539764577953119523253114643935279965421048302334447751855696330343706162070203767261200666593935373871456407126271797310120618428129673411832352586213043473524533080576355447388100428321078651879983495671111739628881242189941235932739660573944451719635945239934135256890121604833492529836429879742019855654234634471173115306367016438333769902548161222215148451538425244615462474029054414693627893999195770717249632576730783373212054153987316445083162428194622033797414717805124257442618652909214324211735672352182373566633339560916834321952683960172148039517145944073021255472141188732097991085335198043536109272625414547654369831107144722617739321721303380434761938735731937311281865742896101584264179214721761593820697407465611246252007793169153314309742640661602194178651145105845953383010446471167937185983343199234892935067337602324689386908927235750121058913315827859557470778187363236143044743210294070912504888220435134132900548390717034742199431781418024421009955480516787819126573687917110867422549214639221355498769316295300832636391698240318965395157237422982700039313286233308583319425409194621644805676693212627134758031459652705211379842836067401388417432160047383656352822629283626359275737128420484910951659471117546060828024316091355241066304673885434940183374618586267729001162358430994257946337433284549309392174610797001341004314029262399391524022995874468787106948381170433783704725647220780236282053350988954362662169319863381003094400323263744722039254339212224184540115412502219982425684444516909178782018287041876984157735610658202553247439787837831839884564421637370883647459772000844303524082779033943284292608983583150429122133830642152828351471486432259932658914393881330793046604713551513472319207747112559862088987110774212336584000803153392337864012909315619753195429728831004543381401767931544519954672714399359375475045240284232635450566193322341410134863501723284300032617271655302361197441566019728659159742004984981897410344108713122118761509594282044347020083160908386994140199393598362284028406701236450331625337440556430574241404205143079828874153058303266031511367163464464474544266143219959463234082161471661336920434882151929405627683060645428041824441499209967021874764265393817227002744143449777440770239059943328604339690425853814277625437674842361533109935135879225509112042574306179624285374339787365584146597423558277408515155760330527631113942179913721561001885469397561223235821839632189965617264612730296234492941338719365791964256302437222343534345299551086250955527095691509277148603310404271522595344615919111595965503953277572448432467622120378445482913018408642012568242474135820583142538380165274017727529031601692100416221813023001521118154263325348944653658453392626898804453773283765939832052789767648732108156033336813421521309944715859111582598866827391432525808442685312366899423418942738986269779047004634418267458624521165054655713309406921105413230932565125337794545693511672884329914922920671225132342596846284512456439293495127314683501209174908812605233430145�s4�N�i�uB}2�;�"���Իs����l �Y!^\i�{�� �f��������u�?�C�\l|R����u�Y�M�&�@n<� #C�� �;��C2V; <Wp�x�~������������2$0Q|\sh�t[�Ƭ��z4 NI \S w $� C� w� �� `� �� �� 2� �� � )- d w� � ?� �� �1 D �O �Z �f �i +� �� ] l s) rO i �v � 9� )� �� 6 � * D{ �� �� +� 3� �� !8!xGV�|[���ݽ�m�������Ma6W[�u�����9��U�&�7�B�uԍ�f��ɵ$�C)`4Ep�[�5�Rb�c3���F�9�a��� �5� A!2ؑK���q�v��J�m���i�����;(2�Q�|;�p���8������GN:jL�Ջf�T�m��F�g2�:EEI�c�ŵ���Ra<���5���*���h�m�+g(n�o�~�����%�/�F���)���&�.[TI\?j�s@�%�.��� ��^(:;]�sb����(�h'S�\�^�+�U �* XK �Q �� ؑ �� 1� �� (� �� �! !�!�9!��0J�0E�0O=1�A1�[1�q1��1�1�1��1��1a�1A2�!2u2P�2��2��2f�2��4� 5�55&(5 .5u75�P5�c5�i5)v5“5��5�5�z6�|6�6��6ư6�6�7�07):7�<7#>7[7`]7�7ˌ7ߖ78L(8�A8VV8 (9$59�<9O>9�i9[t9y9b|9~(;?;_G<$�<� =�==UD=�O=ch=vu=Ѡ=� >�C>hV>ْ>s�>��>��>��>� ?6?�G?u�?��?N�?l�?��?��?Q�?l@�C@>]@Sh@=�@ �@��@��@��@��@6A�A�!A�JA�-B�OBc~B�B��B��B�6C�AC iC�uC7�C��CN�C��CpD�!D'DB-D�=DXD�`DF�Dk�D�E3#E�|E �EJ�E+�E.jF��F�Fg�FvG�%GWGG?oG��G�G��G��G9H� HBH *HJ=HHH�_H��H6�H%�H|�H>�H� I��IBJ�MJ�rJŀJ��J��J��Jt�J �JR�J�K0K�sKN|K��KƣK~�K��K��Kq�K��K�LhOL�dLo|L��L͟LH�L��LO�LB�L��L/�LsWMu`M�tMV|M��MJ�M�M�M��Ma�M��M�N�NEN�6N�P?P`BP�BPgCPEEP@GPJHP�HPfIPpZP�jPxlP�~P݂P��PC�P!�PP�P��P��P'�P��P��P�P��P��P��PQ�Pp�P��P��P��P�PQ�P�P��PP�P8�P�QC Q� QQ�Q|Q0Q�Q�(Q>.Q�0Q�2Q!7Q~:QDQ�QQ�RQ�XQ�`QDaQ�dQ�rQ�vQ[xQ�zQ�|Q�}Q>�QI�Q�Q�Qg�QҜQk�Q��Qe�Q�QQ�QG�Qo�Q��Q'�Q��QC�Q��Q3�Q2�Qd�Q��Q@�Q��Q �QR�Q�Q��QJ�Q_R� R� RFR�R� R*"R$*R�*RL1R�1R�2R 4R�ERyFR HR�RRTR�TR�]R`R�dR�kR>sR�vR�wR�R�R�Rs�RʮR��R��R��R=�R��RP�RM�R��R�R��RU�R��R��R��R��R�Rm�R��Ra�Rw�RwS4S�S�SbS(!S�!S�&S�)S+S�4SZBS5ESDGS�JSLS�MS8QSeSSbUS�US�_S�eS�mSnS�qSL{S��Sa�SҐS��S}�S�S��SʨSd�S��S�S��Ss�S?�S��S��S��S��S�S:�S��S��S��S��SnTIT� TkT�T�T�TL"T�(T 1T�1T�6T�=T#@TuETWRT|WT�`TaT bT�cT�fT�lT�mT�qTtTuT��T9�T��Tc�T��T��T��T.�Tz�T��T�TR�T��T��Tv�T��T�T��T��T��T �T��T'�T��T�U�UYU�U�UY U U�U)U�U�U� U!U$U7,UJ.U�3UBUdBU�DU6FUGU�JU�MU NUzOUWQURSU�UU�WU�aU�mUeuU�vUj�U݂Ux�UA�U�U�UM�UɡU �UͥU��U�U�Uh�U^�U@�U=�U��U��U�U�U�U��U��UX�UB�U��U��U]�U6�U5VU V� V) V�V�V�V�%Vn'V�(V�*V.Vj7V 8V�;VLV�MV!OVFirst let's import the modules we're going to use for this Linear Regression problemimport pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt %matplotlib inlineNow let's load the data setdata = pd.read_csv("http://bit.ly/w-data") print("Data imported successfully")Data imported successfullyPlotting the Data to get a sense of what we're dealing withdata.plot(x = "Hours", y = "Scores", style = 'o') plt.title("Hours vs Scores") plt.xlabel("Hours") plt.ylabel("Scores")Splitting the Data into inputs (X) and Outputs (y)X = data.iloc[:, :-1].values y = data.iloc[:, 1].valuesSplitting the Dataset into training set and test setX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .2, random_state = 0)Fitting the linear model (creating an instance and training)model = LinearRegression() model.fit(X_train, y_train) print("Training Completed")Training CompletedPlotting the resulting lineline = model.coef_ * X + model.intercept_ plt.scatter(X, y) plt.plot(X, line)Testing the resultsy_pred = model.predict(X_test) df = pd.DataFrame({"Actual" : y_test, "Predicted" : y_pred}) print(df)Actual Predicted 0 20 16.884145 1 27 33.732261 2 69 75.357018 3 30 26.794801 4 62 60.491033Making predictions on our own valueshours = 9.25 own_pred = model.predict([[hours]]) print(f"Number of Hours = {hours}\nPredicted Score = {own_pred[0]}")Number of Hours = 9.25 Predicted Score = 93.69173248737535from google.colab import drive drive.mount('/content/drive') train_df = pd.read_csv('/content/drive/MyDrive/COURSES/CS231/train_split.txt', sep=" ", header=None) train_df.columns = ['patient id', 'file_paths', 'labels', 'data source'] train_df = train_df.drop(['patient id', 'data source'], axis=1) train_df.head() test_df = pd.read_csv('/content/drive/MyDrive/COURSES/CS231/test_split.txt', sep=" ", header=None) test_df.columns = ['patient id', 'file_paths', 'labels', 'data source'] test_df = test_df.drop(['patient id', 'data source'], axis=1) test_df.head() TRAIN_PATH = "/content/drive/MyDrive/COURSES/CS231/data/train" TEST_PATH = "/content/drive/MyDrive/COURSES/CS231/data/test"Balancing Classestrain_df['labels'].value_counts() file_count = 4649 samples = [] for category in train_df['labels'].unique(): category_slice = train_df.query("labels == @category") samples.append(category_slice.sample(file_count, replace=False, random_state=1)) train_df = pd.concat(samples, axis=0).sample(frac=1.0, random_state=1).reset_index(drop=True) print(train_df['labels'].value_counts()) print(len(train_df))normal 4649 COVID-19 4649 pneumonia 4649 Name: labels, dtype: int64 13947Spliting train_df into train_df and valid_dftrain_df, valid_df = train_test_split(train_df, train_size=0.9, random_state=0) print(train_df.labels.value_counts()) print(valid_df.labels.value_counts()) print(test_df.labels.value_counts())COVID-19 4213 normal 4189 pneumonia 4150 Name: labels, dtype: int64 pneumonia 499 normal 460 COVID-19 436 Name: labels, dtype: int64 COVID-19 274 pneumonia 105 normal 100 Name: labels, dtype: int64Image Data Generatorsbatch_size = 32 img_height = 224 img_width = 224 target_size = (img_height, img_width) train_datagen = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet_v2.preprocess_input, horizontal_flip=True, zoom_range=0.1) test_datagen = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet_v2.preprocess_input) train_generator = train_datagen.flow_from_dataframe(train_df, directory=TRAIN_PATH, x_col='file_paths', y_col='labels', target_size=target_size, batch_size=batch_size, color_mode='rgb', class_mode='categorical') valid_generator = test_datagen.flow_from_dataframe(valid_df, directory=TRAIN_PATH, x_col='file_paths', y_col='labels', target_size=target_size, batch_size=batch_size, color_mode='rgb', class_mode='categorical') test_generator = test_datagen.flow_from_dataframe(test_df, directory=TEST_PATH, x_col='file_paths', y_col='labels', target_size=target_size, batch_size=batch_size, color_mode='rgb', class_mode='categorical', shuffle = False)Found 12552 validated image filenames belonging to 3 classes. Found 1395 validated image filenames belonging to 3 classes. Found 479 validated image filenames belonging to 3 classes.Create Modelbase_model = ResNet50V2(include_top=False, weights="imagenet", input_shape=(img_height, img_width, 3)) for layer in base_model.layers[:190]: layer.trainable = False for i, layer in enumerate(base_model.layers): print(i, layer.name, "-", layer.trainable) model = tf.keras.Sequential([ base_model, Flatten(), BatchNormalization(), Dense(256, activation='relu'), Dropout(0.5), BatchNormalization(), Dense(128, activation='relu'), Dropout(0.5), BatchNormalization(), Dense(64, activation='relu'), Dropout(0.5), BatchNormalization(), Dense(3, activation='softmax'), ]) lr = 5e-3 model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=lr), metrics=['accuracy']) model.summary()Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= resnet50v2 (Functional) (None, 7, 7, 2048) 23564800 _________________________________________________________________ flatten (Flatten) (None, 100352) 0 _________________________________________________________________ batch_normalization (BatchNo (None, 100352) 401408 _________________________________________________________________ dense (Dense) (None, 256) 25690368 _________________________________________________________________ dropout (Dropout) (None, 256) 0 _________________________________________________________________ batch_normalization_1 (Batch (None, 256) 1024 ________________________________________________________[...]Callbackspatience = 10 # stop_patience = 10 factor = 0.1 callbacks = [ ModelCheckpoint("resnet50v2-final.h5", save_best_only=True, verbose = 0), # EarlyStopping(patience=stop_patience, monitor='val_loss', verbose=1), ReduceLROnPlateau(monitor='val_loss', factor=factor, patience=patience, min_lr=1e-6, verbose=1) ]Model Trainingepochs = 50 history = model.fit(train_generator, validation_data=valid_generator, epochs=epochs, callbacks=callbacks, verbose=1) train_loss = [0.6487, 0.4469, 0.4074, 0.3849, 0.3576, 0.3427, 0.3471, 0.3380, 0.3410, 0.3383, 0.3361, 0.2940, 0.2783, 0.2717, 0.26, 0.2624, 0.2369, 0.2470, 0.2358, 0.2311, 0.2263, 0.2218, 0.2233, 0.2167, 0.2231, 0.2227, 0.2213, 0.2096, 0.2241, 0.2239, 0.2176, 0.2176, 0.2072, 0.2219, 0.2164, 0.2101, 0.2049, 0.2178, 0.2090, 0.2152, 0.2185, 0.2181, 0.2128, 0.2176, 0.2096, 0.2130, 0.2160, 0.2083, 0.2108, 0.2143] val_loss = [0.3612, 0.3654, 0.6374, 0.3819, 0.5943, 1.1585, 0.4505, 0.4302, 0.5506, 0.6574, 1.1695, 1.3079, 1.7884, 3.1584, 5.1392, 4.6225, 4.8016, 4.9733, 4.8234, 5.7820, 6.4980, 4.4179, 4.2063, 4.1806, 4.2003, 5.5932, 1.5663, 1.1069, 3.2203, 2.6253, 3.3542, 4.0708, 4.2337, 5.4792, 4.8195, 3.8897, 4.0073, 4.3476, 5.2787, 5.0320, 5.5412, 3.6614, 3.8046, 4.0843, 3.6718, 3.9051, 4.3147, 4.5132, 6.02, 4.8454] plt.plot(train_loss, label='Loss (training data)') plt.plot(val_loss, label='Loss (validation data)') plt.title('Loss for Training') plt.ylabel('Loss') plt.xlabel('No. epoch') plt.legend(['train', 'validation'], loc="upper left") plt.savefig('/content/drive/MyDrive/COURSES/CS231/results/resnet50_50-1') plt.show() plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig("plot/resnet50_plot.png") plt.show()Predictions on Test Setbest_model = model best_model.load_weights('/content/drive/MyDrive/COURSES/CS231/resnet50v2-final.h5') best_model.evaluate(test_generator) preds = best_model.predict(test_generator) def print_info( test_gen, preds, print_code, save_dir, subject ): class_dict=test_gen.class_indices labels= test_gen.labels file_names= test_gen.filenames error_list=[] true_class=[] pred_class=[] prob_list=[] new_dict={} error_indices=[] y_pred=[] for key,value in class_dict.items(): new_dict[value]=key # dictionary {integer of class number: string of class name} # store new_dict as a text fine in the save_dir classes=list(new_dict.values()) # list of string of class names dict_as_text=str(new_dict) dict_name= subject + '-' +str(len(classes)) +'.txt' dict_path=os.path.join(save_dir, dict_name) with open(dict_path, 'w') as x_file: x_file.write(dict_as_text) errors=0 for i, p in enumerate(preds): pred_index=np.argmax(p) true_index=labels[i] # labels are integer values if pred_index != true_index: # a misclassification has occurred error_list.append(file_names[i]) true_class.append(new_dict[true_index]) pred_class.append(new_dict[pred_index]) prob_list.append(p[pred_index]) error_indices.append(true_index) errors=errors + 1 y_pred.append(pred_index) if print_code !=0: if errors>0: if print_code>errors: r=errors else: r=print_code msg='{0:^28s}{1:^28s}{2:^28s}{3:^16s}'.format('Filename', 'Predicted Class' , 'True Class', 'Probability') print_in_color(msg, (0,255,0),(55,65,80)) for i in range(r): msg='{0:^28s}{1:^28s}{2:^28s}{3:4s}{4:^6.4f}'.format(error_list[i], pred_class[i],true_class[i], ' ', prob_list[i]) print_in_color(msg, (255,255,255), (55,65,60)) #print(error_list[i] , pred_class[i], true_class[i], prob_list[i]) else: msg='With accuracy of 100 % there are no errors to print' print_in_color(msg, (0,255,0),(55,65,80)) if errors>0: plot_bar=[] plot_class=[] for key, value in new_dict.items(): count=error_indices.count(key) if count!=0: plot_bar.append(count) # list containg how many times a class c had an error plot_class.append(value) # stores the class fig1=plt.figure() fig1.set_figheight(len(plot_class)/3) fig1.set_figwidth(10) plt.style.use('fivethirtyeight') for i in range(0, len(plot_class)): c=plot_class[i] x=plot_bar[i] plt.barh(c, x, ) plt.title( ' Errors by Class on Test Set') if len(classes)<= 30: # create a confusion matrix and a test report y_true= np.array(labels) y_pred=np.array(y_pred) cm = confusion_matrix(y_true, y_pred ) clr = classification_report(y_true, y_pred, target_names=classes) length=len(classes) if length<8: fig_width=8 fig_height=8 else: fig_width= int(length * .5) fig_height= int(length * .5) fig2 = plt.figure(figsize=(fig_width, fig_height)) sns.heatmap(cm, annot=True, vmin=0, fmt='g', cmap='Blues', cbar=False) plt.xticks(np.arange(length)+.5, classes, rotation= 90) plt.yticks(np.arange(length)+.5, classes, rotation=0) plt.xlabel("Predicted") plt.ylabel("Actual") plt.title("Confusion Matrix") plt.savefig("/content/drive/MyDrive/COURSES/CS231/results/resnet50_50-4.png", dpi = 100) plt.show() print("Classification Report:\n----------------------\n", clr) fig1.savefig("/content/drive/MyDrive/COURSES/CS231/results/resnet50_50-3.png", dpi = 100) save_dir = '/content/drive/MyDrive/COURSES/CS231' subject = "kq" print_code = 0 print_info(test_generator, preds, print_code, save_dir, subject)**Monitoring and Optimizing Quantum Circuits**import numpy as np # Importing standard Qiskit libraries from qiskit import QuantumCircuit, transpile, Aer, IBMQ, execute from qiskit.tools.jupyter import * from qiskit.visualization import * from ibm_quantum_widgets import * from qiskit.providers.aer import QasmSimulator # Loading your IBM Quantum account(s) provider = IBMQ.load_account()**Monitoring and Tracking Jobs**# Import the Qiskit Jupyter tools from qiskit.tools import jupyter # Initialize the job tracker to automatically track all jobs %qiskit_job_watcher # Let's run a simple circuit on the least busy quantum device # and check the job watcher widget. from qiskit.providers.ibmq import least_busy backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= (2) and not x.configuration().simulator and x.status().operational==True)) #Create a simple circuit qc = QuantumCircuit(1) qc.h(0) qc.measure_all() #Execute the circuit on the backend job = execute(qc, backend) #Disable the job watcher %qiskit_disable_job_watcher #Display the list of all available backends and provide #a brief overview of each %qiskit_backend_overview**Transpiling a Circuit**# Import the transpiler passes object from qiskit.transpiler import passes # List out all the passes available print(dir(passes)) #Basic Toffoli gate, qc = QuantumCircuit(3) qc.ccx(0,1,2) qc.draw() qc_decomposed = qc.decompose() qc_decomposed.draw() #Basic circuit with a single and multi-qubit gates qc = QuantumCircuit(4) qc.h(0) qc.cx(0,1) qc.cx(0,2) qc.cx(0,3) qc.draw() #Print the depth of both inital and decomposed circuit print('Initial circuit depth: ', qc.depth()) print('Decomposed circuit depth: ', qc_decomposed.depth()) #Get the number of operators in initial circuit print('Initial circuit operation count: ', qc.count_ops()) #Get the number of operators in decomposed circuit print('Decomposed circuit operation count: ', qc_decomposed.count_ops())Initial circuit depth: 4 Decomposed circuit depth: 11 Initial circuit operation count: OrderedDict([('cx', 3), ('h', 1)]) Decomposed circuit operation count: OrderedDict([('cx', 6), ('t', 4), ('tdg', 3), ('h', 2)])**Configuration and Optimization**# Get the backend device: ibmq_santiago backend_santiago = provider.get_backend('ibmq_santiago') # Launch backend viewer of ibmq_santiago backend_santiago # Get the backend device: ibmq_lima backend_lima = provider.get_backend('ibmq_lima') # Launch backend viewer of ibmq_lima backend_lima # Visualize the coupling directional map between the qubits plot_gate_map(backend_santiago, plot_directed=True) # Visualize the coupling directional map between the qubits plot_gate_map(backend_lima, plot_directed=True) # Quantum circuit with a single and multi-qubit gates qc = QuantumCircuit(4) qc.h(0) qc.cx(0,1) qc.cx(0,2) qc.cx(0,3) qc.draw() # Transpile the circuit with an optimization level = 0 qc_santiago_0 = transpile(qc, backend_santiago, seed_transpiler=10258, optimization_level=0) # Print out the depth of the circuit print('Depth:', qc_santiago_0.depth()) # Plot the resulting layout of the quantum circuit after Layout plot_circuit_layout(qc_santiago_0, backend_santiago) # Draw the transpiled circuit pertaining to Santiago qc_santiago_0.draw() # View the transpiled circuit with an optimization level = 0 qc_lima_0 = transpile(qc, backend_lima, seed_transpiler=10258, optimization_level=0) print('Depth:', qc_lima_0.depth()) plot_circuit_layout(qc_lima_0, backend_lima) # Draw the transpiled circuit pertaining to Lima qc_lima_0.draw() # Transpile the circuit with the optimization level = 3 qc_transpiled_santiago = transpile(qc, backend_santiago, optimization_level=3) # Print the depth of the transpiled circuit print('Depth:', qc_transpiled_santiago.depth()) # Print the number of operations of the transpiled circuit print('Ops count: ', qc_transpiled_santiago.count_ops()) # Plot the layout mapping of the transpiled circuit plot_circuit_layout(qc_transpiled_santiago, backend_santiago) # Redraw the transpiled circuit at new level qc_transpiled_santiago.draw() # Transpile the quantum circuit with the optimization level = 3 qc_transpiled_lima = transpile(qc, backend_lima, optimization_level=3) # Get the depth and operation count of the transpiled circuit. print('Depth:', qc_transpiled_lima.depth()) print('Ops count: ', qc_transpiled_lima.count_ops()) # Print the circuit layout plot_circuit_layout(qc_transpiled_lima, backend_lima) # View the ibmq_quito backend device configuration and properties backend = provider.get_backend('ibmq_quito') backend # View the backend coupling map, displayed as CNOTs (Control-Target) backend = provider.get_backend('ibmq_quito') # Extract the coupling map from the backend ibmqquito_coupling_map = backend.configuration().coupling_map # List out the extracted coupling map ibmqquito_coupling_map # Transpile a custom circuit using only the coupling map. # Set the backend to ‘None’ so it will force using the coupling map provided. qc_custom = transpile(qc, backend=None, coupling_map=ibmqquito_coupling_map) # Draw the resulting custom topology circuit. qc_custom.draw() # Create our own coupling map (custom topology) custom_linear_topology = [[0,1],[1,2],[2,3],[3,4]] # Set the coupling map to our custom linear topology qc_custom = transpile(qc, backend=None, coupling_map=custom_linear_topology) # Draw the resulting circuit. qc_custom.draw() # Import the PassManager and a few Passes from qiskit.transpiler import PassManager, CouplingMap from qiskit.transpiler.passes import TrivialLayout, BasicSwap # Create a TrivialLayout based on the ibmqx2 coupling map trivial = TrivialLayout(CouplingMap(ibmqquito_coupling_map)) pm = PassManager() # Append the TrivialLayout to the PassManager pm.append(trivial) # Run the PassManager and draw the resulting circuit tv_qc = pm.run(qc) tv_qc.draw() # Create a BasicSwap based on the ibmq_quito coupling map we used earlier basic_swap = BasicSwap(CouplingMap(ibmqquito_coupling_map)) #Add the BasicSwap to the PassManager pm = PassManager(basic_swap) # Run the PassManager and draw the results new_qc = pm.run(qc) new_qc.draw() # Sample quantum circuit qc = QuantumCircuit(4) qc.h(0) qc.cx(0,1) qc.barrier() qc.cx(0,2) qc.cx(0,3) qc.barrier() qc.cz(3,0) qc.h(0) qc.measure_all() # Draw the circuit using the default renderer qc.draw() qc.draw('latex')How to use `DataAssistants`* A `DataAssistant` enables you to quickly profile your data by providing a thin API over a pre-constructed `RuleBasedProfiler` configuration.* As a result of the profiling, you get back a result object consisting of * `Metrics` that describe the current state of the data * `Expectations` that are able to alert you if the data deviates from the expected state in the future. * `DataAssistant` results can also be plotted to help you understand their data visually.* There are multiple `DataAssistants` centered around a theme (volume, nullity etc), and this notebook walks you through an example `VolumeDataAssistant` to show the capabilities and potential of this new interface. What is a `VolumeDataAssistant`?* The `VolumeDataAssistant` allows you to automatically build a set of Expectations that alerts you if the volume of records significantly deviates from the norm. More specfically, the `VolumeDataAssistant` profiles the data and outputs an `ExpectationSuite` containing 2 `Expecation` types * `expect_table_row_count_to_be_between`* `expect_column_unique_value_count_to_be_between`with automatically selected values for upper and lower bound. The ranges are selected using a bootstrapping step on the sample `Batches`. This allows the `DataAssistant` to account for outliers, allowing it to obtain a more accurate estimate of the true ranges by taking into account the underlying distribution.import great_expectations as ge from great_expectations.core.yaml_handler import YAMLHandler from great_expectations.core.batch import BatchRequest from great_expectations.core import ExpectationSuite from great_expectations.core.expectation_configuration import ExpectationConfiguration from great_expectations.validator.validator import Validator from great_expectations.rule_based_profiler.data_assistant import ( DataAssistant, VolumeDataAssistant, ) from great_expectations.rule_based_profiler.types.data_assistant_result import ( VolumeDataAssistantResult, ) from typing import List yaml = YAMLHandler()/Users/work/Development/ENVs/supercon_ge/lib/python3.8/site-packages/snowflake/connector/options.py:94: UserWarning: You have an incompatible version of 'pyarrow' installed (7.0.0), please install a version that adheres to: 'pyarrow<3.1.0,>=3.0.0; extra == "pandas"' warn_incompatible_dep(Set-up: Adding `taxi_data` `Datasource`* Add `taxi_data` as a new `Datasource`* We are using an `InferredAssetFilesystemDataConnector` to connect to data in the `test_sets/taxi_yellow_tripdata_samples` folder and get one `DataAsset` (`yellow_tripdata_sample`) that has 36 Batches, corresponding to one batch per month from 2018-2020.data_context: ge.DataContext = ge.get_context() data_path: str = "../../../../test_sets/taxi_yellow_tripdata_samples" datasource_config: dict = { "name": "taxi_data_all_years", "class_name": "Datasource", "module_name": "great_expectations.datasource", "execution_engine": { "module_name": "great_expectations.execution_engine", "class_name": "PandasExecutionEngine", }, "data_connectors": { "inferred_data_connector_all_years": { "class_name": "InferredAssetFilesystemDataConnector", "base_directory": data_path, "default_regex": { "group_names": ["data_asset_name", "year", "month"], "pattern": "(yellow_tripdata_sample)_(2018|2019|2020)-(\\d.*)\\.csv", }, }, }, } data_context.test_yaml_config(yaml.dump(datasource_config)) # add_datasource only if it doesn't already exist in our configuration try: data_context.get_datasource(datasource_config["name"]) except ValueError: data_context.add_datasource(**datasource_config)Configure `BatchRequest` In this example, we will be using a `BatchRequest` that will return all 36 batches of data from the `taxi_data` dataset. We will refer to the `Datasource` and `DataConnector` configured in the previous step.multi_batch_all_years_batch_request: BatchRequest = BatchRequest( datasource_name="taxi_data_all_years", data_connector_name="inferred_data_connector_all_years", data_asset_name="yellow_tripdata_sample", ) batch_request: BatchRequest = multi_batch_all_years_batch_requestRun the `VolumeDataAssistant` * The `VolumeDataAssistant` can be run directly from the `DataContext` by specifying `assistants` and `volume`, and passing in the `BatchRequest` from the previous step.result: VolumeDataAssistantResult = data_context.assistants.volume.run(batch_request=batch_request)Explore `DataAssistantResult` by plotting The resulting `DataAssistantResult` can be best explored by plotting. For each `Domain` considered (`Table` and `Column` in our case), the plots will display the value for each `Batch` (36 in total).result.plot_metrics()An additional layer of information that can be retrieved from the `DataAssistantResult` is the `prescriptive` information, which corresponds to the range values of the `Expectations` that result from the `DataAssistant` run. For example the `vendor_id` plot will show that the range of distinct `vendor_id` values ranged from 2-3 across all of our `Batches`, as indicated by the blue band around the plotted values. These values correspond to the `max_value` and `min_value` for the resulting `Expectation`, `expect_column_unique_value_count_to_be_between`.result.plot_expectations_and_metrics()Save `ExpectationSuite` Finally, we can save the `ExpectationConfiguration` objext resulting from the `DataAssistant` in our `ExpectationSuite` and then use the `DataContext`'s `save_expectation_suite()` method to pass in our `ExpectationSuite`, updated with the `DataAssistant`.suite: ExpectationSuite = ExpectationSuite(expectation_suite_name="taxi_data_suite") resulting_configurations: List[ExpectationConfiguration] = suite.add_expectation_configurations(expectation_configurations=result.expectation_configurations) data_context.save_expectation_suite(expectation_suite=suite)Optional: Clean-up Directory As part of running this notebook, the `DataAssistant` will create a number of ExpectationSuite configurations in the `great_expectations/expectations/tmp` directory. Optionally run the following cell to clean up the directory.#import shutil, os #shutil.rmtree("great_expectations/expectations/tmp") #os.remove("great_expectations/expectations/.ge_store_backend_id") #os.remove("great_expectations/expectations/taxi_data_suite.json")Traveling Salesperson ProblemThe canonical [Traveling Salesperson Problem](https://en.wikipedia.org/wiki/Travelling_salesman_problem) is stated as:> "Given a list of cities and the distances between each pair of cities, what is the shortest possible route that visits each city and returns to the origin city?"This is generalizable to finding the shortest [Hamiltonian cycle](http://mathworld.wolfram.com/HamiltonianCycle.html) on a fully connected graph (i.e. all nodes can be reached from all other nodes).This problem is [NP-hard](https://en.wikipedia.org/wiki/P_versus_NP_problem), meaning it is not possible for an algorithm to solve all instances of the problem quickly (i.e. in polynomial time). However, there are many approximate and heuristic approaches which can give reasonable solutions in shorter time.place_name = 'New York City, NY, United States' place_roads = ox.graph_from_place(place_name) # save graph to file for reuse ox.io.save_graphml(place_roads, 'nyc_osmnx.graphml') # loading graph from a file place_roads = ox.io.load_graphml('nyc_osmnx.graphml') place_roads_nodes, place_roads_edges = ox.graph_to_gdfs(place_roads) fig = plt.figure(figsize=[10,10]) ax = fig.add_subplot(1,1,1) place_roads_edges.plot(ax=ax, color=[0, 0, 0], linewidth=0.5)Let's say you wanted to do a ice cream crawl: you want to visit every ice cream shop in a city. What is the shortest route that you would take that takes you to every ice cream shop in a city and brings you back to your starting point?place_ice_cream = ox.geometries.geometries_from_place(place_name, tags={"amenity":"ice_cream"}) #some of the ice cream shops return polygons instead of points, so we need to take their centroids place_ice_cream = place_ice_cream.to_crs("epsg:3857") #projecting to Web-Mercator for more accurate centroids place_ice_cream["geometry"] = place_ice_cream["geometry"].centroid place_ice_cream = place_ice_cream.to_crs("epsg:4326") #projecting back to lat/long place_ice_cream place_ice_cream ice_cream_nodes = ox.distance.nearest_nodes(place_roads, place_ice_cream.geometry.x, place_ice_cream.geometry.y) ice_cream_nodesExercisePlot the locations of the ice cream shops on the map of the roads Compute shortest path matrixshortest_path_matrix = np.zeros([len(ice_cream_nodes),len(ice_cream_nodes)]) for idx_i, orig in enumerate(ice_cream_nodes): shortest_paths = nx.single_source_dijkstra_path_length(place_roads, orig, weight='length') for idx_j, dest in enumerate(ice_cream_nodes): shortest_path_matrix[idx_i, idx_j] = shortest_paths[dest] shortest_path_matrix ice_cream_graph = nx.from_numpy_matrix(shortest_path_matrix, create_using=nx.MultiDiGraph) # new graph indexes from 0 ice_cream_graph.nodes # rename node labels using original labels ice_cream_graph = nx.relabel_nodes(ice_cream_graph,{k:v for k, v in zip(ice_cream_graph.nodes, ice_cream_nodes)}) ice_cream_graph.nodesConvolutional Network Model Train for 5 epochsObserve the loss decreasing while training.%%capture !python ConvolutionalNetwork.py --max_epochs=5Inspect logs⚠️ Make sure to **Stop** the cell execution after observing the logsAfter only **5 epochs**, the model achieves **98.5% accuracy** on the MNIST test data set.It performs better than the previous neural network model.!tensorboard --logdir=./lightning_logs^CLoad trained modelCheckpoints are saved incrementally for each training session under `./lightning_logs/version_X`.__import__("sys").path.append("..") import utils from ConvolutionalNetwork import ConvolutionalNetwork model = utils.load_trained_model_for_evaluation(ConvolutionalNetwork, 3) print(model)ConvolutionalNetwork( (seq): Sequential( (0): Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1)) (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (2): ReLU() (3): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1)) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): ReLU() (6): Flatten(start_dim=1, end_dim=-1) (7): Linear(in_features=800, out_features=10, bias=True) (8): LogSoftmax(dim=1) ) )Predict MNIST labelsimport torch import random test_len = model.mnist_test.data.shape[0] for i in range(0, 10): orig_img, orig_label = model.mnist_test[random.randint(0, test_len)] orig_img = orig_img.view(1, 1, 28, 28) classes = model(orig_img) predicted_label = torch.max(classes.data, 1).indices.item() print("Original label: ", orig_label, "Predicted label: ", predicted_label) utils.plot_images([orig_img.view(28, 28)])Original label: 8 Predicted label: 8Imagesimport matplotlib.pyplot as plt import numpy as npCharger une imagePour charger des images, nous importons la fonction `imread`.from matplotlib.image import imreadLa fonction `imread()` peut importer deux types d'images- en couleur (RGB)- en noir et blanc (0..255)Voici une image en couleurimg = imread('img/Bachalpsee.jpg') plt.imshow(img);Voici les dimensions de la matrice:- 188 pixelx en hauteur- 250 pixels en largeur- 3 octets pour définir une couleurimg.shapeVoici le pixel à la position (25, 25). C'est dans la région ciel et de couleur cyan. Les composant bleu et vert sont au maximum.img[25, 25]Indexer des pixelsNous pouvons extraire une tranche d'une image. Dans une matrice, le premier indice est l'axe vertical (y) et le deuxième indice l'axe horizontal (x).plt.imshow(img[70:120, 100:200]);Annoter l'imageNous pouvons superposer du texte à une image.plt.imshow(img); plt.text(150, 25, 'peak', fontsize=16) plt.text(100, 110, 'lake', fontsize=16);Afficher sans axesLa fonction `axis('off')` permet de ne pas afficher les axes.plt.imshow(img) plt.axis('off');Noir et blancLes images noir-et-blancs sont affichées avec une **colormap**.img = imread('img/lake.jpg') plt.imshow(img, cmap='gray');L'image est de taille 220x170 et du type `ndarray`.img.shape type(img)Color mapsLes images en noir et blanc (niveaux de gris) peuvent être coloriées avec des colormaps.maps = ['winter', 'summer', 'autumn', 'gray'] plt.subplots(figsize=(8, 6)) for i in range(4): plt.subplot(2, 2, i+1) plt.imshow(img, cmap=maps[i]) plt.axis('off') plt.title(maps[i])Barre des couleursLa fonction `colorbar()` affiche une barre de couleur qui indique la correspondance des valeurs numériques et des couleurs.plt.imshow(img, cmap='hot') plt.axis('off') plt.colorbar(); plt.imshow(img, cmap='terrain') plt.colorbar();InverserPour inverser la direction de l'image, nous utilisons l'option `origin`.plt.imshow(img, origin='lower', cmap='hot');JuxtaposerAvec la commande `hstack` (horizontal stack) nous pouvons juxtaposer des images.a = np.hstack([img, img]) plt.imshow(a);SuperposerAvec la commande `vstack` (horizontal stack) nous pouvons superposer des images.a = np.vstack([img, img]) plt.imshow(a);MandelbrotPour terminer, calculons un fractal de Mandelbrot.def mandelbrot(h, w, maxit=20): """Returns an image of the Mandelbrot fractal of size (h,w).""" y, x = np.ogrid[-1.4:1.4:h*1j, -2:0.8:w*1j] c = x + y*1j z = c divtime = maxit + np.zeros(z.shape, dtype=int) for i in range(maxit): z = z**2 + c diverge = z*np.conj(z) > 2**2 # who is diverging div_now = diverge & (divtime==maxit) # who is diverging now divtime[div_now] = i # note when z[diverge] = 2 # avoid diverging too much return divtime plt.imshow(mandelbrot(400, 400));Analysis - boxplotsdef boxplots(data_list, which, sort=True, force_finite=False): """ Given a list of results dictionaries and an attribute, makes a boxplot of each of the dictionary's data. Note: does not call plt.show() Parameters: data_list: list of results dictionaries. which (str): which item in the dictionary to plot. sort (bool): default True; if True, sort the datasets in descending order by median. force_finite (bool): default False; if True, remove any Inf or NaN values in the datasets before plotting. """ names = np.array([str(item['id'])+': '+' '.join(item['experiment'][1:]) for item in data_list]) data = [item[which] for item in data_list] if force_finite: #Remove any inf or nan values data = [np.array(item) for item in data] data = [item[np.isfinite(item)] for item in data] firstlen = len(data[0]) same_len = np.all([firstlen == len(item) for item in data]) if same_len: data_array = np.array(data) if sort: means = np.median(data_array,axis=1) order = np.argsort(means).astype(int) else: order = np.arange(data_array.shape[0]).astype(int) plt.boxplot(data_array[order,:].T, labels=names[order], vert=False) else: if sort: means = [np.median(item) for item in data] order = np.argsort(means).astype(int) data = [data[i] for i in order] names = [names[i] for i in order] plt.boxplot(data, labels=names, vert=False) boxplots(results_split['lorenz'], 'continue') plt.suptitle('Lorenz system - continue prediction') plt.show() boxplots(results_split['lorenz'], 'random') plt.suptitle('Lorenz system - random prediction') plt.show() boxplots(results_split['rossler'], 'continue') plt.suptitle('Rossler system - continue prediction') plt.show() boxplots(results_split['rossler'], 'random') plt.suptitle('Rossler system - random prediction') plt.show() boxplots(results_split['rossler'], 'lyapunov',force_finite=True) plt.suptitle('Rossler system Lyapunov exponent estimate') plt.xlim(-0.5,0.5) plt.show() boxplots(results_split['thomas'], 'continue') plt.suptitle('Thomas system - continue prediction') plt.show() boxplots(results_split['thomas'], 'random') plt.suptitle('Thomas system - random prediction') plt.show() boxplots(results_split['softrobot'], 'continue') plt.suptitle('Soft robot system - continue prediction') plt.show() boxplots(results_split['softrobot'], 'random') plt.suptitle('Soft robot system - random prediction') plt.show() boxplots(results_split['softrobot'], 'lyapunov') plt.suptitle('Soft robot Lyapunov exponent estimate') plt.xlim(-0.2,0.2) plt.show()Base de données: interroger une base de données MySQL Importsimport os import sqlite3 import zipfile from pathlib import Path import requests # Créer un dossier data s'il n'existe pas encore if not os.path.exists("../data"): os.mkdir("../data")Créer une base de données SQLite# Créer un répertoire DB s'il n'existe pas encore db_path = '../data/db' if not os.path.exists(db_path): os.mkdir(db_path) # Récupérer le fichier ZIP qui contient la DB de test url = "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" filename = url.split("/")[-1] # Récupérer le fichier zip dans la RAM response = requests.get(url) # Ecrire le fichier sur le disque with open(os.path.join(db_path, filename), 'wb') as f: f.write(response.content) # Extraire le zip pour obtenir la db with zipfile.ZipFile(os.path.join(db_path, filename), 'r') as zip_ref: zip_ref.extractall(db_path)Schéma de la base de données Se connecter à la base de donnéesconn = sqlite3.connect(os.path.join(db_path, "chinook.db")) c = conn.cursor()Trouver l'identifiant de artist = '' query1 = f""" SELECT artistId FROM artists WHERE name = '{artist}'; """ c.execute(query1) artist_id = c.fetchone()[0] print(artist_id)68Trouver les identifiants d'albums de query2 = f""" SELECT albumId FROM albums WHERE artistId = '{artist_id}'; """ albums_ids = [] for row in c.execute(query2): albums_ids.append(str(row[0])) print(albums_ids)['48', '49', '157']Trouver les morceaux des albums de Miles Davisquery3 = f""" SELECT name, albumId FROM tracks WHERE albumId IN ({",".join(albums_ids)}); """ songs = set() for row in c.execute(query3): songs.add(row[0]) print(f"\n{len(songs)} distinct songs found:\n") for song in sorted(songs): print(song)36 distinct songs found: 'Round Midnight Black Satin Blues For Pablo Blues For Pablo (Alternate Take) Bye Bye Blackbird Compulsion E.S.P. Generique I Don't Wanna Be Kissed (By Anyone But You) I Don't Wanna Be Kissed (By Anyone But You) (Alternate Take) (Live) Jeru Lament Little Church (Live) Miles Ahead Miles Runs The Voodoo Down My Funny Valentine (Live) My Ship Nefertiti New Rhumba Now's The Time Petits Machins (Little Stuff) Portia So What Someday My Prince Will Come Springsville Springsville (Alternate Take) Summertime Tempus Fugit The Duke The Maids Of Cadiz The Meaning Of The Blues The Meaning Of The Blues/Lament (Alternate Take) The Pan Piper Time After Time Walkin'En une seule requête...query4 = f""" SELECT tracks.name, tracks.albumId FROM artists JOIN albums ON artists.artistId = albums.artistId JOIN tracks ON albums.albumId = tracks.albumId WHERE artists.name = '{artist}'; """ songs = set() for row in c.execute(query4): songs.add(row[0]) print(f"\n{len(songs)} distinct songs found:\n") for song in sorted(songs): print(song)36 distinct songs found: 'Round Midnight Black Satin Blues For Pablo Blues For Pablo (Alternate Take) Bye Bye Blackbird Compulsion E.S.P. Generique I Don't Wanna Be Kissed (By Anyone But You) I Don't Wanna Be Kissed (By Anyone But You) (Alternate Take) (Live) Jeru Lament Little Church (Live) Miles Ahead Miles Runs The Voodoo Down My Funny Valentine (Live) My Ship Nefertiti New Rhumba Now's The Time (Little Stuff) Portia So What Someday My Prince Will Come Springsville Springsville (Alternate Take) Summertime Tempus Fugit The Duke The Maids Of Cadiz The Meaning Of The Blues The Meaning Of The Blues/Lament (Alternate Take) The Pan Piper Time After Time Walkin'I. Numpy Импортируйте NumPyimport numpy as np # Здесь и далее "print" используется для удоства, не везде закомментирован также умышленно. # Так проще проверять самого себяСоздайте одномерный массив размера 10, заполненный нулями и пятым элемент равным 1. Трансформируйте в двумерный массив.# Т.к. элементов много, можно создать как руками, сразу предзаполнив 5й элемент единицей, так и автоматом. # Показано оба способа manual_arr = np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0]) auto_arr = np.zeros(10) auto_arr[4] = 1 # Короткая запись замены по индексу np.put(auto_arr, [4], [1]) # Более длинная запись, годится когда массив сложный, в этом случае - будет читабельнее # Трансформируйте в двумерный массив # Не стал захламлять сразу в одну строку, но знаю как auto_arr.resize((2, 5)) print(auto_arr) print(auto_arr.shape)[[0. 0. 0. 0. 1.] [0. 0. 0. 0. 0.]] (2, 5)Создайте одномерный массив со значениями от 10 до 49 и разверните его (первый элемент становится последним). Найдите в нем все четные элементы.# Представлено пару способов. ini_array1 = np.flipud(np.arange(start=10, stop=50)) ini_array2 = np.arange(start=10, stop=50)[::-1] print(ini_array1) print(ini_array2)[49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10] [49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]Создайте двумерный массив 3x3 со значениями от 0 до 8#Запись можно укоротить, что и сделал arr2D = np.arange(9).reshape(3,3) print(arr2D)[[0 1 2] [3 4 5] [6 7 8]]Создайте массив 4x3x2 со случайными значениями. Найти его минимум и максимум.# Запись можно укоротить, но оставил в 2 шага в угоду читабельности # Выбраны числа, так как даже рандомные числа работают быстрее чем любой другой тип данных arr3D = np.random.randint(0, 100, size=(4, 3, 2)) print(arr3D) # Вывел кортежем для удобства min_max = arr3D.max(), arr3D.min() print(min_max)[[[52 10] [88 20] [54 90]] [[87 95] [46 65] [17 91]] [[82 63] [ 0 48] [81 83]] [[24 43] [84 30] [16 32]]] (95, 0)Создайте два двумерных массива размерами 6x4 и 4x3 и произведите их матричное умножение.# Запись в последней строке такая более читабельна, видно множители и конкретный метод и сложнее ошибиться arr1 = np.arange(24).reshape(6,4) arr2 = np.arange(12).reshape(4,3) print(np.dot(arr1, arr2))[[ 42 48 54] [114 136 158] [186 224 262] [258 312 366] [330 400 470] [402 488 574]]Создайте случайный двумерный массив 7x7, найти у него среднее и стандартное оклонение. Нормализуйте этот массив.rnd_arr = np.random.randint(0, 49, size=(7, 7)) print(rnd_arr) # Среднее отклонение print(np.mean(rnd_arr)) # Стандартное отклонение print(np.std(rnd_arr)) # Нормализованный массив, с использованием данных что получил выше (так быстрее) по формуле* # * Для инфо:Нормализация осуществляется относительно среднего значения. # Вычисляется как отношение разности между исходным значением и средней выборки # к среднему квадратическому отклонению. # У нормализованной матрицы средняя равна нулю (или очень близка ней), # а среднее квадратическое отклонение равно 1 (или также очень близко к этому значению). norms_rnd_arr = (rnd_arr - np.mean(rnd_arr) / np.std(rnd_arr)) print(norms_rnd_arr)[[45 17 17 0 21 7 9] [ 9 4 26 7 39 6 41] [17 48 19 39 39 45 28] [26 1 7 36 6 17 19] [44 28 36 7 46 47 33] [10 7 23 38 29 43 37] [46 5 3 46 40 1 3]] 23.816326530612244 15.965106683125605 [[43.50822628 15.50822628 15.50822628 -1.49177372 19.50822628 5.50822628 7.50822628] [ 7.50822628 2.50822628 24.50822628 5.50822628 37.50822628 4.50822628 39.50822628] [15.50822628 46.50822628 17.50822628 37.50822628 37.50822628 43.50822628 26.50822628] [24.50822628 -0.49177372 5.50822628 34.50822628 4.50822628 15.50822628 17.50822628] [42.50822628 26.50822628 34.50822628 5.50822628 44.50822628 45.50822628 31.50822628] [ 8.50822628 5.50822628 21.50822628 36.50822628 27.50822628 41.50822628 35.50822628] [44.50822628 3.50822628 1.50822628 44.50822628 38.50822628 -0.49177372 1.50822628]]II. Pandas Импортируйте: pandas, matplotlib, seabornimport pandas as pd import matplotlib.pyplot as plt import seaborn as snsЗагрузите датасет Tips из набора датасетов seaborn# Зададим сразу переменную df = sns.load_dataset('tips')Посмотрите на первые 5 строчек# Можно так df[:5] #А можно так df.head()Узнайте сколько всего строчек и колонок в данныхdf.shapeПроверьте есть ли пропуски в данных# Пропусков нет # Можно так - df.isnull().sum() # Еще короче запись df.isna().sum()Посмотрите на распределение числовых признаков# Через MatPlotLib df.boxplot() # Напрямую через Pandas df.hist(legend = True) # Через Seaborn. Совместил все признаки и поставил прозрачность на 50% sns.histplot(df, alpha = 0.5);Найдите максимальное значение 'total_bill'# Синтаксис упрощён df['total_bill'].max()Найдите количество курящих людейdf[['smoker']].eq('Yes').sum() # Интересно что данное поле не булёвое, поэтому поиск по строке, плохой датасет)Узнайте какой средний 'total_bill' в зависимости от 'day'df.groupby('day', as_index=False)['total_bill'].mean()Отберите строчки с 'total_bill' больше медианы и узнайте какой средний 'tip' в зависимости от 'sex'# Немного запутанно написано, но вроде читабельно df[df.total_bill> df.total_bill.mean()].groupby('sex', as_index=False)['tip'].mean() #df.total_bill.mean()Преобразуйте признак 'smoker' в бинарный (0-No, 1-Yes)# Задаем эквиваленты bool_dt = {'Yes': True, 'No': False} # Пробегаем df['smoker'].map(bool_dt)III. Visualization Постройте гистограмму распределения признака 'total_bill'sns.histplot(df['total_bill'], alpha = 1);Постройте scatterplot, представляющий взаимосвязь между признаками 'total_bill' и 'tip'# Поменял цвет в угоду фону df.plot.scatter(x = 'total_bill', y = 'tip', s = 10, c = 'g');Постройте pairplotsns.pairplot(df);Постройте график взаимосвязи между признаками 'total_bill' и 'day'# Без Seaborn'а df.plot.scatter(x = 'day', y = 'total_bill', c = 'total_bill', cmap = 'viridis'); # Вывел так, чтобы увидеть заодно зависимость и по размеру чаевых удобнее. sns.jointplot(x = 'day', y = 'total_bill', data = df);Постройте две гистограммы распределения признака 'tip' в зависимости от категорий 'time'# Вывел самым простым способом sns.displot(df, x="tip", col="time");Постройте два графика scatterplot, представляющих взаимосвязь между признаками 'total_bill' и 'tip' один для Male, другой для Female и раскрасьте точки в зависимоти от признака 'smoker'# Согласно офиц.документации (https://seaborn.pydata.org/generated/seaborn.scatterplot.html#seaborn.scatterplot) # relplot безопаснее для сохранения семантической синхронизации между графиками чем # FacetGrid, показанный на занятии, поэтому использовал его. Да и синтаксис понятнее. sns.relplot( data=df, x="total_bill", y="tip", col="sex", hue="smoker", kind="scatter" );Сделайте выводы по анализу датасета и построенным графикам. По желанию можете продолжить анализ данных и также отразить это в выводах.''' Выводы: 1. Датасет годится как sampledata - нет пустых значений и слишком странных выбросов. 2. На выходных больше чаевых, на 15-20% 3. Средний размер чаевых при чеке выше медианы у мужчин и женщин практически одинаков. 4. Размер чаевых не растет линейно при выходе размера чека за медиану примерно (если не учитывать аномалии/выбросы) и остается на одном уровне примерно. как при чеке 30, так и выше (цифры условны) 5. Размер чаевых гораздо выше и т.д. в 17-18 часов, чем в 12 (ланч) 6. Курящие мужчины дают меньше чаевых по размеру по отношению к общему чеку, у курящих график более линейный, у женщин нет явной зависимости между этими параметрами. '''E2E ML on GCP: MLOps stage 4 : evaluation: get started with Vertex AI Model Evaluation Run in Colab View on GitHub Open in Vertex AI Workbench OverviewThis tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 4 : evaluation: get started with Vertex AI Model Evaluation. Datasets**AutoML image model**The dataset used for this tutorial is the [Flowers dataset](https://www.tensorflow.org/datasets/catalog/tf_flowers) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip.**BigQuery ML tabular model**The dataset used for this tutorial is the Penguins dataset from [BigQuery public datasets](https://cloud.google.com/bigquery/public-data). This version of the dataset is used to predict the species of penguins from the available features like culmen-length, flipper-depth etc.**Custom model**This tutorial uses a pre-trained image classification model from TensorFlow Hub, which is trained on ImageNet dataset.Learn more about [ResNet V2 pretained model](https://tfhub.dev/google/imagenet/resnet_v2_101/classification/5). **Pipeline**BLAHThe dataset used for this tutorial is the [Bank Marketing](https://pantheon.corp.google.com/storage/browser/_details/cloud-ml-tables-data/bank-marketing.csv) . This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. ObjectiveIn this tutorial, you learn how to use `Vertex AI Model Evaluation`.This tutorial uses the following Google Cloud ML services:- `Vertex AI AutoML`- `BigQuery ML`- `Vertex AI Training`- `Vertex AI Batch Prediction`- `Vertex AI Model Evaluation`- `Google Cloud Pipeline Components`The steps performed include:**SDK**- Evaluate an `AutoML` model. - Train an `AutoML` image classification model. - Retrieve the default evaluation metrics from training. - Do a batch evaluation for a custom evaluation slice.- Evaluate a BigQuery ML model. - Train a `BigQuery ML` tabular classification model. - Retrieve the default evaluation metrics from training. - Do a batch evaluation for a custom evaluation slice.- Evaluate a custom model. - Do a batch evaluation for a custom evaluation slice. - Add an evaluation to the `Model Registry` for the `Model` resource. **Pipeline Components**- Evaluate an `AutoML` model. - Train an `AutoML` image classification model. - Retrieve the default evaluation metrics from training. - Do a batch evaluation for a custom evaluation slice.- Evaluate a BigQuery ML model. - Train a `BigQuery ML` tabular classification model. - Retrieve the default evaluation metrics from training. - Do a batch evaluation for a custom evaluation slice.- Evaluate a custom model. - Do a batch evaluation for a custom evaluation slice. - Add an evaluation to the `Model Registry` for the `Model` resource. InstallationsInstall the packages required for executing this notebook.import os # The Vertex AI Workbench Notebook product has specific requirements IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME") IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists( "/opt/deeplearning/metadata/env_version" ) # Vertex AI Notebook requires dependencies to be installed with '--user' USER_FLAG = "" if IS_WORKBENCH_NOTEBOOK: USER_FLAG = "--user" # Install the packages ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG -q ! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG -q ! pip3 install --upgrade google-cloud-bigquery $USER_FLAG -q ! pip3 install --upgrade tensorflow $USER_FLAG -q ! pip3 install --upgrade tensorflow-hub $USER_FLAG -qRestart the kernelOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True)Before you begin GPU runtime*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)3. [Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com)4. If you are running this notebook locally, you will need to install the [Cloud SDK]((https://cloud.google.com/sdk)).5. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_IDRegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).REGION = "[your-region]" # @param {type: "string"} if REGION == "[your-region]": REGION = "us-central1"TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")Authenticate your Google Cloud account**If you are using Vertex AI Workbench Notebooks**, your environment is already authenticated. Skip this step.**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.**Click Create service account**.In the **Service account name** field, enter a name, and click **Create**.In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.Click Create. A JSON file that contains your key downloads to your local environment.Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.# If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Vertex AI Workbench, then don't execute this code IS_COLAB = False if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv( "DL_ANACONDA_HOME" ): if "google.colab" in sys.modules: IS_COLAB = True from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS ''Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"} BUCKET_URI = f"gs://{BUCKET_NAME}" if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]": BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP BUCKET_URI = "gs://" + BUCKET_NAME**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.! gsutil mb -l $REGION $BUCKET_URIFinally, validate access to your Cloud Storage bucket by examining its contents:! gsutil ls -al $BUCKET_URIService Account**If you don't know your service account**, try to get your service account using `gcloud` command by executing the second cell below.SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"} if ( SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]" ): # Get your service account from gcloud if not IS_COLAB: shell_output = !gcloud auth list 2>/dev/null SERVICE_ACCOUNT = shell_output[2].replace("*", "").strip() if IS_COLAB: shell_output = ! gcloud projects describe $PROJECT_ID project_number = shell_output[-1].split(":")[1].strip().replace("'", "") SERVICE_ACCOUNT = f"{_" print("Service Account:", SERVICE_ACCOUNT)Set service account access for Vertex AI PipelinesRun the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account.! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_URI ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_URISet up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constantsimport json import google.cloud.aiplatform as aiplatform import tensorflow as tf import tensorflow_hub as hub from kfp import dsl from kfp.v2 import compiler from kfp.v2.dsl import componentImport BigQueryImport the BigQuery package into your Python environment.from google.cloud import bigqueryInitialize Vertex AI SDK for PythonInitialize the Vertex AI SDK for Python for your project and corresponding bucket.aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)Create BigQuery clientCreate the BigQuery client.bqclient = bigquery.Client()Set hardware acceleratorsYou can set hardware accelerators for prediction.Set the variable `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)Otherwise specify `(None, None)` to use a container image to run on a CPU.Learn more [here](https://cloud.google.com/vertex-ai/docs/general/locationsaccelerators) hardware accelerator support for your regionimport os if os.getenv("IS_TESTING_DEPLOY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPLOY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)Set pre-built containersSet the pre-built Docker container image for prediction.- Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers).if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = "2-5".replace(".", "-") if TF[0] == "2": if DEPLOY_GPU: DEPLOY_VERSION = "tf2-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf2-cpu.{}".format(TF) else: if DEPLOY_GPU: DEPLOY_VERSION = "tf-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf-cpu.{}".format(TF) DEPLOY_IMAGE = "{}-docker.pkg.dev/vertex-ai/prediction/{}:latest".format( REGION.split("-")[0], DEPLOY_VERSION ) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU)Set machine typeNext, set the machine type to use for prediction.- Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VM you will use for prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE)Introduction to Vertex AI Model Evaluation for AutoML models.For AutoML models, you can retrieve the model evaluation metrics that were obtained during training from the dataset split into train and test, using the `Vertex AI Model Evaluation` service. Additionally, you can further evaluate the model with custom evaluation slices. Location of Cloud Storage training data.Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.IMPORT_FILE = ( "gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv" )Create the DatasetNext, create the `Dataset` resource using the `create` method for the `ImageDataset` class, which takes the following parameters:- `display_name`: The human readable name for the `Dataset` resource.- `gcs_source`: A list of one or more dataset index files to import the data items into the `Dataset` resource.- `import_schema_uri`: The data labeling schema for the data items.This operation may take several minutes.dataset = aiplatform.ImageDataset.create( display_name="Flowers" + "_" + TIMESTAMP, gcs_source=[IMPORT_FILE], import_schema_uri=aiplatform.schema.dataset.ioformat.image.single_label_classification, ) print(dataset.resource_name)Create and run training pipelineTo train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline. Create training pipelineAn AutoML training pipeline is created with the `AutoMLImageTrainingJob` class, with the following parameters:- `display_name`: The human readable name for the `TrainingJob` resource.- `prediction_type`: The type task to train the model for. - `classification`: An image classification model. - `object_detection`: An image object detection model.- `multi_label`: If a classification task, whether single (`False`) or multi-labeled (`True`).- `model_type`: The type of model for deployment. - `CLOUD`: Deployment on Google Cloud - `CLOUD_HIGH_ACCURACY_1`: Optimized for accuracy over latency for deployment on Google Cloud. - `CLOUD_LOW_LATENCY_`: Optimized for latency over accuracy for deployment on Google Cloud. - `MOBILE_TF_VERSATILE_1`: Deployment on an edge device. - `MOBILE_TF_HIGH_ACCURACY_1`:Optimized for accuracy over latency for deployment on an edge device. - `MOBILE_TF_LOW_LATENCY_1`: Optimized for latency over accuracy for deployment on an edge device.- `base_model`: (optional) Transfer learning from existing `Model` resource -- supported for image classification only.The instantiated object is the DAG (directed acyclic graph) for the training job.dag = aiplatform.AutoMLImageTrainingJob( display_name="flowers_" + TIMESTAMP, prediction_type="classification", multi_label=False, model_type="CLOUD", base_model=None, ) print(dag)Run the training pipelineNext, you run the DAG to start the training job by invoking the method `run`, with the following parameters:- `dataset`: The `Dataset` resource to train the model.- `model_display_name`: The human readable name for the trained model.- `training_fraction_split`: The percentage of the dataset to use for training.- `test_fraction_split`: The percentage of the dataset to use for test (holdout data).- `validation_fraction_split`: The percentage of the dataset to use for validation.- `budget_milli_node_hours`: (optional) Maximum training time specified in unit of millihours (1000 = hour).- `disable_early_stopping`: If `True`, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements.The `run` method when completed returns the `Model` resource.The execution of the training pipeline will take upto 20 minutes.model = dag.run( dataset=dataset, model_display_name="flowers_" + TIMESTAMP, training_fraction_split=0.8, validation_fraction_split=0.1, test_fraction_split=0.1, budget_milli_node_hours=8000, disable_early_stopping=False, )Retrieving the default evaluation for `AutoML Model` resourceBLAH GAPIC PlaceholderAfter your model has finished training, you can review the evaluation scores for it.First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.model_evaluations = model.list_model_evaluations() for model_evaluation in model_evaluations: print(model_evaluation.to_dict())Evaluating on a custom evaluation slicePLACEHOLDER - BLAH Make the batch input fileNow make a batch input file, which you store in your local Cloud Storage bucket. The batch input file must be in JSONL format. In For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:- `content`: The Cloud Storage path to the image.- `mime_type`: The content type. In our example, it is a `jpeg` file.For example: {'content': '[your-bucket]/file1.jpg', 'mime_type': 'jpeg'} For demonstration purposes, to create an evaluation slice, you use a portion of the training data -- as if it was separate (non-training) data, such as instances seen in production.EVAL_SLICE = BUCKET_URI + "/flowers_eval.jsonl" ! gsutil cat {IMPORT_FILE} | head -n 200 >tmp.csv import csv entries = [] with open("tmp.csv", "r") as f: reader = csv.reader(f) for row in reader: path = row[0] label = row[1] file = path.split("/")[-1] new_path = BUCKET_URI + "/flowers/" + file ! gsutil cp {path} {new_path} >/dev/null entries.append({"content": new_path, "mime_type": "jpeg"}) import json with open("tmp.jsonl", "w") as f: for entry in entries: f.write(json.dumps(entry) + "\n") ! gsutil cp tmp.jsonl {EVAL_SLICE} #! rm tmp.csv tmp.jsonlMake the batch prediction requestNow that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:- `job_display_name`: The human readable name for the batch prediction job.- `instances_format`: The format of the prediction request; can only be JSONL (default).- `gcs_source`: A list of one or more batch request input files.- `gcs_destination_prefix`: The Cloud Storage location for storing the batch prediction resuls.- `sync`: If set to True, the call will block while waiting for the asynchronous batch job to complete.batch_predict_job = model.batch_predict( job_display_name="flowers_" + TIMESTAMP, instances_format="jsonl", gcs_source=EVAL_SLICE, gcs_destination_prefix=BUCKET_URI, sync=True, ) print(batch_predict_job)TODO: Get batch results Get the predictionsNext, get the results from the completed batch prediction job.The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:- `content`: The prediction request.- `prediction`: The prediction response. - `ids`: The internal assigned unique identifiers for each prediction request. - `displayNames`: The class names for each class label. - `confidences`: The predicted confidence, between 0 and 1, per class label.import json import tensorflow as tf bp_iter_outputs = batch_predict_job.iter_outputs() prediction_results = list() for blob in bp_iter_outputs: if blob.name.split("/")[-1].startswith("prediction"): prediction_results.append(blob.name) tags = list() for prediction_result in prediction_results: gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}" with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile: for line in gfile.readlines(): line = json.loads(line) print(line) breakDelete temporary resourcesNext, you delete all the temporary resources created by this example.try: dag.delete() model.delete() batch_predict_job.delete() except Exception as e: print(e)Introduction to Vertex AI Model Evaluation for BigQuery ML models.For BigQuery ML models, you can retrieve the model evaluation metrics that were obtained during training from the dataset split into train and test, using the `Vertex AI Model Evaluation` service. Additionally, you can further evaluate the model with custom evaluation slices. Location of the BigQuery training dataNow set the variable `IMPORT_FILE` and `BQ_TABLE` to the location of the training data in `BigQuery`.IMPORT_FILE = "bq://bigquery-public-data.ml_datasets.penguins" BQ_TABLE = "bigquery-public-data.ml_datasets.penguins"Create BQ dataset resourceFirst, you create an empty dataset resource in your project.BQ_DATASET_NAME = "penguins" DATASET_QUERY = f"""CREATE SCHEMA {BQ_DATASET_NAME} """ job = bqclient.query(DATASET_QUERY)Setting permissions to automatically register the modelYou need to set some additional IAM permissions for BigQuery ML to automatically upload and register the model after training. Depending on your service account, the setting of the permissions below may fail. In this case, we recommend executing the permissions in a Cloud Shell.! gcloud projects add-iam-policy-binding $PROJECT_ID \ --member='serviceAccount:' \ --role='roles/aiplatform.admin' ! gcloud projects add-iam-policy-binding $PROJECT_ID \ --member='user:' \ --role='roles/aiplatform.admin'Training and registering the BigQuery ML modelNext, you create and train a BigQuery ML tabular classification model from the public dataset penguins and store the model in your project using the `CREATE MODEL` statement. The model configuration is specified in the `OPTIONS` statement as follows:- `model_type`: The type and archictecture of tabular model to train, e.g., DNN classification.- `labels`: The column which are the labels.- `model_registry`: Set to "vertex_ai" to indicate automatic registation to `Vertex AI Model Registry`.- `vertex_ai_model_id`: The human readable display name for the registered model.- `vertex_ai_model_version_aliases`: Alternate names for the model.Learn more about [The CREATE MODEL statement](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create).MODEL_NAME = "penguins" MODEL_QUERY = f""" CREATE OR REPLACE MODEL `{BQ_DATASET_NAME}.{MODEL_NAME}` OPTIONS( model_type='DNN_CLASSIFIER', labels = ['species'], model_registry="vertex_ai", vertex_ai_model_id="bqml_model_{TIMESTAMP}", vertex_ai_model_version_aliases=["1"] ) AS SELECT * FROM `{BQ_TABLE}` """ job = bqclient.query(MODEL_QUERY) print(job.errors, job.state) while job.running(): from time import sleep sleep(30) print("Running ...") print(job.errors, job.state) tblname = job.ddl_target_table tblname = "{}.{}".format(tblname.dataset_id, tblname.table_id) print("{} created in {}".format(tblname, job.ended - job.started))Evaluate the trained BigQuery model using BigQueryNext, retrieve the model evaluation from within BigQuery for the trained BigQuery ML model.Learn more about [The ML.EVALUATE function](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-evaluate).EVAL_QUERY = f""" SELECT * FROM ML.EVALUATE(MODEL {BQ_DATASET_NAME}.{MODEL_NAME}) ORDER BY roc_auc desc LIMIT 1""" job = bqclient.query(EVAL_QUERY) results = job.result().to_dataframe() print(results)Find the model in the `Vertex AI Model Registry`Finally, you can use the `Vertex AI Model` list() method with a filter query to find the automatically registered model.models = aiplatform.Model.list(filter="display_name=bqml_model_" + TIMESTAMP) model = models[0] print(model.gca_resource)Retrieving the default evaluation for `BigQuery ML Model` resource from `Vertex AI Model Registry`BLAH GAPIC PlaceholderAfter your model has finished training, you can review the evaluation scores for it.First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.# Get a reference to the Model Service client client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"} model_service_client = aiplatform.gapic.ModelServiceClient( client_options=client_options ) model_evaluations = model_service_client.list_model_evaluations( parent=model.resource_name ) model_evaluation = list(model_evaluations)[0] print(model_evaluation)Evaluating on a custom evaluation slicePLACEHOLDER - BLAHBLAH - BATCH FILE FORMAT Delete temporary resourcesNext, you delete all the temporary resources created by this example.try: model.delete() batch_predict_job.delete() except Exception as e: print(e) try: # Delete the created BigQuery dataset ! bq rm -r -f $PROJECT_ID:$BQ_DATASET_NAME except Exception as e: print(e) MODEL_QUERY = f""" DROP MODEL `{BQ_DATASET_NAME}.{MODEL_NAME}` """ job = bqclient.query(MODEL_QUERY)Introduction to Vertex AI Model Evaluation for custom models.For custom models, you can retrieve the model evaluation metrics that were BLAHobtained during training from the dataset split into train and test, using the `Vertex AI Model Evaluation` service. Additionally, you can further evaluate the model with custom evaluation slices. Get pretrained model from TensorFlow HubFor demonstration purposes, this tutorial uses a pretrained model from TensorFlow Hub (TFHub), which is then uploaded to a `Vertex AI Model` resource. Once you have a `Vertex AI Model` resource, the model can be deployed to a `Vertex AI Endpoint` resource. Download the pretrained modelFirst, you download the pretrained model from TensorFlow Hub. The model gets downloaded as a TF.Keras layer. To finalize the model, in this example, you create a `Sequential()` model with the downloaded TFHub model as a layer, and specify the input shape to the model.tfhub_model = tf.keras.Sequential( [hub.KerasLayer("https://tfhub.dev/google/imagenet/resnet_v2_101/classification/5")] ) tfhub_model.build([None, 224, 224, 3]) tfhub_model.summary()Save the model artifactsAt this point, the model is in memory. Next, you save the model artifacts to a Cloud Storage location.*Note:* For TF Serving, the MODEL_DIR must end in a subfolder that is a number, e.g., 1.MODEL_DIR = BUCKET_URI + "/model/1" tfhub_model.save(MODEL_DIR)Upload the model for servingNext, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex AI, your serving function ensures that the data is decoded on the model server before it is passed as input to your model. How does the serving function workWhen you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.The serving function consists of two parts:- `preprocessing function`: - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). - Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.- `post-processing function`: - Converts the model output to format expected by the receiving application -- e.q., compresses the output. - Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. Serving function for image data PreprocessingTo pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes, and then preprocessed to match the model input requirements, before it is passed as input to the deployed model.To resolve this, you define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU).When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model:- `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB).- `image.convert_image_dtype` - Changes integer pixel values to float 32, and rescales pixel data between 0 and 1.- `image.resize` - Resizes the image to match the input shape for the model.At this point, the data can be passed to the model (`m_call`), via a concrete function. The serving function is a static graph, while the model is a dynamic graph. The concrete function performs the tasks of marshalling the input data from the serving function to the model, and marshalling the prediction result from the model back to the serving function.CONCRETE_INPUT = "numpy_inputs" def _preprocess(bytes_input): decoded = tf.io.decode_jpeg(bytes_input, channels=3) decoded = tf.image.convert_image_dtype(decoded, tf.float32) resized = tf.image.resize(decoded, size=(224, 224)) return resized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def preprocess_fn(bytes_inputs): decoded_images = tf.map_fn( _preprocess, bytes_inputs, dtype=tf.float32, back_prop=False ) return { CONCRETE_INPUT: decoded_images } # User needs to make sure the key matches model's input @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(bytes_inputs): images = preprocess_fn(bytes_inputs) prob = m_call(**images) return prob m_call = tf.function(tfhub_model.call).get_concrete_function( [tf.TensorSpec(shape=[None, 224, 224, 3], dtype=tf.float32, name=CONCRETE_INPUT)] ) tf.saved_model.save(tfhub_model, MODEL_DIR, signatures={"serving_default": serving_fn})Get the serving function signatureYou can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.For your purpose, you need the signature of the serving function. Why? Well, when we send our data for prediction as a HTTP request packet, the image data is base64 encoded, and our TF.Keras model takes numpy input. Your serving function will do the conversion from base64 to a numpy array.When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.loaded = tf.saved_model.load(MODEL_DIR) serving_input = list( loaded.signatures["serving_default"].structured_input_signature[1].keys() )[0] print("Serving function input:", serving_input)Upload the TensorFlow Hub model to a `Vertex AI Model` resourceFinally, you upload the model artifacts from the TFHub model into a `Vertex AI Model` resource.model = aiplatform.Model.upload( display_name="example_" + TIMESTAMP, artifact_uri=MODEL_DIR, serving_container_image_uri=DEPLOY_IMAGE, ) print(model)BLAH Do batch prediction on custom model BLAH register the custom evaluation metrics Model evaluation using `Vertex AI Pipeline` componentsIn this section, you perform model evaluations on `AutoML`, `BigQuery ML` and custom models using `Vertex AI Pipeline` components AutoML model evaluation pipeline componentBLAHAdditionally, you can evaluate an AutoML model with custom evaluation slices using the combination of BatchPredictionOp and ModelEvaluationOp components, as:- The custom evaluation slice data contains the label values (ground truths).- Perform a batch prediction on the custom evaluation slice.- Perform a model evaluation with the batch prediction results and label values. Location of Cloud Storage training data.Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.IMPORT_FILE = "gs://cloud-ml-tables-data/bank-marketing.csv" ! gsutil cat {IMPORT_FILE} | head -n 40000 > train.csv ! gsutil cat {IMPORT_FILE} | head -n 1 >eval.csv ! gsutil cat {IMPORT_FILE} | tail -n 5200 >> eval.csv IMPORT_TRAIN = BUCKET_NAME + "/train.csv" IMPORT_EVAL = BUCKET_NAME + "/eval.csv" ! gsutil cp train.csv {IMPORT_TRAIN} ! gsutil cp eval.csv {IMPORT_EVAL} ! rm -f train.csv eval.csvCreate AutoML model evaluation componentThe Vertex AI pre-built pipeline components does not currently have a component for retrieiving the model evaluations for a AutoML model. So, you will first write your own component, as follows:- Takes as input the region and Model artifacts returned from an AutoML training component.- Create a client interface to the Vertex AI Model service (`metadata["resource_name"]).- Construct the resource ID for the model from the model artifact parameter.- Retrieve the model evaluation- Return the model evaluation as a string.from kfp.v2.dsl import Artifact, Input, Model @component(packages_to_install=["google-cloud-aiplatform"]) def evaluateAutoMLModelOp(model: Input[Artifact], region: str) -> str: import logging import google.cloud.aiplatform.gapic as gapic # Get a reference to the Model Service client client_options = {"api_endpoint": f"{region}-aiplatform.googleapis.com"} model_service_client = gapic.ModelServiceClient(client_options=client_options) model_id = model.metadata["resourceName"] model_evaluations = model_service_client.list_model_evaluations(parent=model_id) model_evaluation = list(model_evaluations)[0] logging.info(model_evaluation) return str(model_evaluation)Construct pipeline for AutoML training, and batch model evaluationNext, construct the pipeline with the following tasks:- Create a Vertex AI Dataset resource.- Train a AutoML tabular classification model.- Retrieve the AutoML evaluation statistics.- Make a batch prediction with the AutoML model, using an evaluation slice that was not used during training.- Evaluate the AutoML model using the results from the batch prediction.PIPELINE_ROOT = "{}/pipeline_root/automl_lbn_training".format(BUCKET_NAME) @dsl.pipeline( name="automl-lbn-training", description="AutoML tabular classification training" ) def pipeline( import_file: str, batch_files: list, display_name: str, bucket: str = PIPELINE_ROOT, project: str = PROJECT_ID, region: str = REGION, ): from google_cloud_pipeline_components import aiplatform as gcc_aip from google_cloud_pipeline_components.experimental.evaluation import \ ModelEvaluationOp from google_cloud_pipeline_components.v1.batch_predict_job import \ ModelBatchPredictOp dataset_op = gcc_aip.TabularDatasetCreateOp( project=project, display_name=display_name, gcs_source=import_file ) training_op = gcc_aip.AutoMLTabularTrainingJobRunOp( project=project, display_name=display_name, optimization_prediction_type="classification", dataset=dataset_op.outputs["dataset"], model_display_name=display_name, training_fraction_split=0.8, validation_fraction_split=0.1, test_fraction_split=0.1, budget_milli_node_hours=8000, optimization_objective="minimize-log-loss", target_column="Deposit", ) eval_op = evaluateAutoMLModelOp(model=training_op.outputs["model"], region=region) batch_op = ModelBatchPredictOp( project=project, job_display_name="batch_predict_job", model=training_op.outputs["model"], gcs_source_uris=batch_files, gcs_destination_output_uri_prefix=bucket, instances_format="csv", predictions_format="jsonl", model_parameters={}, machine_type=DEPLOY_COMPUTE, starting_replica_count=1, max_replica_count=1, ).after(eval_op) batch_eval_op = ModelEvaluationOp( project=project, root_dir=bucket, problem_type="classification", classification_type="multiclass", ground_truth_column="Deposit", class_names=["0", "1"], predictions_format="jsonl", batch_prediction_job=batch_op.outputs["batchpredictionjob"], )Compile and execute the AutoML training, and batch model evaluation pipelineNext, you compile the pipeline and then execute it. The pipeline takes the following parameters, which are passed as the dictionary `parameter_values`:- `import_file`: The Cloud Storage location of the training data.- `batch_files`: A list of one or more Cloud Storage locations of evaluation data.- `display_name`: Display name for Vertex AI Model and Endpoint resources.- `project`: The project ID.- `region`: The region.compiler.Compiler().compile( pipeline_func=pipeline, package_path="automl_lbn_training.json" ) pipeline = aip.PipelineJob( display_name="automl_lbn_training", template_path="automl_lbn_training.json", pipeline_root=PIPELINE_ROOT, parameter_values={ "import_file": IMPORT_TRAIN, "batch_files": [IMPORT_EVAL], "display_name": "bank" + TIMESTAMP, "project": PROJECT_ID, "region": REGION, }, ) pipeline.run() ! rm -f automl_lbn_training.jsonView the AutoML training and batch evaluation pipeline resultsPROJECT_NUMBER = pipeline.gca_resource.name.split("/")[1] print(PROJECT_NUMBER) def print_pipeline_output(job, output_task_name): JOB_ID = job.name print(JOB_ID) for _ in range(len(job.gca_resource.job_detail.task_details)): TASK_ID = job.gca_resource.job_detail.task_details[_].task_id EXECUTE_OUTPUT = ( PIPELINE_ROOT + "/" + PROJECT_NUMBER + "/" + JOB_ID + "/" + output_task_name + "_" + str(TASK_ID) + "/executor_output.json" ) GCP_RESOURCES = ( PIPELINE_ROOT + "/" + PROJECT_NUMBER + "/" + JOB_ID + "/" + output_task_name + "_" + str(TASK_ID) + "/gcp_resources" ) if tf.io.gfile.exists(EXECUTE_OUTPUT): ! gsutil cat $EXECUTE_OUTPUT break elif tf.io.gfile.exists(GCP_RESOURCES): ! gsutil cat $GCP_RESOURCES break return EXECUTE_OUTPUT print("tabular-dataset-create") artifacts = print_pipeline_output(pipeline, "tabular-dataset-create") print("\n\n") print("automl-tabular-training-job") artifacts = print_pipeline_output(pipeline, "automl-tabular-training-job") print("\n\n") print("evaluateautomlmodelop") artifacts = print_pipeline_output(pipeline, "evaluateautomlmodelop") output = !gsutil cat $artifacts output = json.loads(output[0]) metrics = output["parameters"]["Output"]["stringValue"] print("\n") print(metrics) print("\n\n") print("model-batch-predict") artifacts = print_pipeline_output(pipeline, "model-batch-predict") output = !gsutil cat $artifacts output = json.loads(output[0]) print("\n\n") print( output["artifacts"]["batchpredictionjob"]["artifacts"][0]["metadata"][ "gcsOutputDirectory" ] ) print("model-evaluation") artifacts = print_pipeline_output(pipeline, "model-evaluation")Delete a pipeline jobAfter a pipeline job is completed, you can delete the pipeline job with the method `delete()`. Prior to completion, a pipeline job can be canceled with the method `cancel()`.pipeline.delete()Introduction to Vertex AI Model Evaluation for BigQuery ML models.For BigQuery ML models, you can retrieve the model evaluation metrics that were obtained during training from the dataset split into train and test, using the `BigQuery ML` service.Additionally, you can evaluate an BigQuery ML model with custom evaluation slices using the combination of BLAH`BatchPredictionOp` and `ModelEvaluationOp` components, as: - The custom evaluation slice data contains the label values (ground truths). - Perform a batch prediction on the custom evaluation slice. - Perform a model evaluation with the batch prediction results and label values.IMPORT_FILE = "bq://bigquery-public-data.ml_datasets.penguins" BQ_TABLE = "bigquery-public-data.ml_datasets.penguins" BQ_TABLE = "bigquery-public-data.ml_datasets.penguins" BQ_DATASET = BQ_TABLE.split(".")[1] def get_data(slice_name, limit): query = f""" CREATE OR REPLACE TABLE `{slice_name}` AS ( WITH penguins AS ( SELECT island, sex, culmen_length_mm, culmen_depth_mm, flipper_length_mm, body_mass_g, species FROM `{BQ_TABLE}` ) SELECT island, sex, culmen_length_mm, culmen_depth_mm, flipper_length_mm, body_mass_g, species FROM penguins LIMIT {limit} ) """ response = bqclient.query(query) _ = response.result() BQ_TABLE_EVAL = f"{PROJECT_ID}.{BQ_DATASET}.penguins_eval" IMPORT_EVAL = f"bq://{BQ_TABLE_EVAL}" LIMIT = 44 get_data(BQ_TABLE_EVAL, LIMIT) BQ_TABLE_TRAIN = f"{PROJECT_ID}.{BQ_DATASET}.penguins_train" IMPORT_TRAIN = f"bq://{BQ_TABLE_TRAIN}" LIMIT = "300 OFFSET 44" get_data(BQ_TABLE_TRAIN, LIMIT)Construct pipeline for BigQuery ML training, and batch model evaluationNext, construct the pipeline with the following tasks:- Create a BigQuery ML Dataset resource.- Train a BigQuery ML tabular classification model.- Retrieve the BigQuery ML evaluation statistics.- Make a batch prediction with the BigQuery ML model, using an evaluation slice that was not used during training.- Evaluate the BigQuery ML model using the results from the batch prediction.PIPELINE_ROOT = f"{BUCKET_NAME}/bq_query" @dsl.pipeline(name="bq-hello-world", pipeline_root=PIPELINE_ROOT) def pipeline( bq_train_table: str, bq_eval_table: str, label: str, class_names: list, dataset: str, model: str, artifact_uri: str, # num_trials: int, deploy_image: str, machine_type: str, min_replica_count: int, max_replica_count: int, display_name: str, bucket: str, accelerator_type: str = "", accelerator_count: int = 0, project: str = PROJECT_ID, location: str = "US", region: str = "us-central1", ): from google_cloud_pipeline_components.experimental.evaluation import \ ModelEvaluationOp from google_cloud_pipeline_components.v1.batch_predict_job import \ ModelBatchPredictOp from google_cloud_pipeline_components.v1.bigquery import ( BigqueryCreateModelJobOp, BigqueryEvaluateModelJobOp, BigqueryExportModelJobOp, BigqueryQueryJobOp) from google_cloud_pipeline_components.v1.model import ModelUploadOp bq_dataset = BigqueryQueryJobOp( project=project, location="US", query=f"CREATE SCHEMA {dataset}" ) bq_model = BigqueryCreateModelJobOp( project=project, location=location, query=f"CREATE OR REPLACE MODEL {dataset}.{model} OPTIONS (model_type='dnn_classifier', labels=['{label}']) AS SELECT * FROM `{bq_train_table}` WHERE body_mass_g IS NOT NULL AND sex IS NOT NULL", ).after(bq_dataset) bq_eval = BigqueryEvaluateModelJobOp( project=PROJECT_ID, location="US", model=bq_model.outputs["model"] ).after(bq_model) bq_export = BigqueryExportModelJobOp( project=project, location=location, model=bq_model.outputs["model"], model_destination_path=artifact_uri, ).after(bq_model) model_upload = ModelUploadOp( display_name=display_name, artifact_uri=artifact_uri, serving_container_image_uri=deploy_image, project=project, location=region, ).after(bq_export) batch_predict = ModelBatchPredictOp( project=project, job_display_name="batch_predict_job", model=model_upload.outputs["model"], bigquery_source_input_uri=bq_eval_table, bigquery_destination_output_uri=f"bq://{project}", instances_format="bigquery", predictions_format="bigquery", model_parameters={}, machine_type=DEPLOY_COMPUTE, starting_replica_count=min_replica_count, max_replica_count=max_replica_count, accelerator_type=accelerator_type, accelerator_count=accelerator_count, ).after(model_upload) batch_eval = ModelEvaluationOp( project=project, root_dir=bucket, problem_type="classification", classification_type="multiclass", ground_truth_column=label, class_names=class_names, predictions_format="jsonl", batch_prediction_job=batch_predict.outputs["batchpredictionjob"], )Compile and execute the BigQuery ML training, and batch model evaluation pipelineNext, you compile the pipeline and then execute it. The pipeline takes the following parameters, which are passed as the dictionary `parameter_values`:- `bq_train_table`: The BigQuery table containing the training data.- `bq_eval_table`: The BigQuery table containing the evaluation data.- `label`: The corresponding label for the BigQuery dataset.- `dataset`: The BigQuery dataset component name.- `model`: The BigQuery model component name.- `artifact_uri`: The Cloud Storage location to export the BigQuery model artifacts.- `num_trials`: If greater than one, will perform hyperparameter tuning for the specified number of trials using the Vertex AI Vizier service.- `deploy_image`: The container image for serving predictions.- `machine_type`: The VM for serving predictions.- `min_replica_count`/`max_replica_count`: The number of virtual machines for auto-scaling predictions.- `display_name`: Display name for Vertex AI Model resource.- `project`: The project ID.- `region`: The region.MODEL_DIR = BUCKET_NAME + "/bqmodel" compiler.Compiler().compile(pipeline_func=pipeline, package_path="bqml.json") pipeline = aip.PipelineJob( display_name="bqml", template_path="bqml.json", pipeline_root=PIPELINE_ROOT, parameter_values={ "bq_train_table": BQ_TABLE_TRAIN, "bq_eval_table": IMPORT_EVAL, "label": "species", "class_names": [ "Adelie Penguin (Pygoscelis adeliae)", "Chinstrap penguin (Pygoscelis antarctica)", "Gentoo penguin (Pygoscelis papua)", ], "dataset": "bqml_tutorial", "model": "penguins_model", "artifact_uri": MODEL_DIR, #'num_trials': 1, "deploy_image": DEPLOY_IMAGE, "display_name": "penguins", "machine_type": DEPLOY_COMPUTE, "min_replica_count": 1, "max_replica_count": 1, "accelerator_type": DEPLOY_GPU.name, "accelerator_count": 1, "bucket": BUCKET_NAME, "project": PROJECT_ID, "location": "US", }, # enable_caching=False ) pipeline.run() ! rm -rf bqml.jsonView the BigQuery ML training and batch evaluation pipeline resultsPROJECT_NUMBER = pipeline.gca_resource.name.split("/")[1] print(PROJECT_NUMBER) def print_pipeline_output(job, output_task_name): JOB_ID = job.name print(JOB_ID) for _ in range(len(job.gca_resource.job_detail.task_details)): TASK_ID = job.gca_resource.job_detail.task_details[_].task_id EXECUTE_OUTPUT = ( PIPELINE_ROOT + "/" + PROJECT_NUMBER + "/" + JOB_ID + "/" + output_task_name + "_" + str(TASK_ID) + "/executor_output.json" ) GCP_RESOURCES = ( PIPELINE_ROOT + "/" + PROJECT_NUMBER + "/" + JOB_ID + "/" + output_task_name + "_" + str(TASK_ID) + "/gcp_resources" ) if tf.io.gfile.exists(EXECUTE_OUTPUT): ! gsutil cat $EXECUTE_OUTPUT break elif tf.io.gfile.exists(GCP_RESOURCES): ! gsutil cat $GCP_RESOURCES break return EXECUTE_OUTPUT print("bigquery-query-job") artifacts = print_pipeline_output(pipeline, "bigquery-query-job") print("\n\n") print("bigquery-create-model-job") artifacts = print_pipeline_output(pipeline, "bigquery-create-model-job") print("\n\n") print("bigquery-evaluate-model-job") artifacts = print_pipeline_output(pipeline, "bigquery-evaluate-model-job") print("\n\n") print("bigquery-export-model-job") artifacts = print_pipeline_output(pipeline, "bigquery-export-model-job") print("\n\n") print("model-upload") artifacts = print_pipeline_output(pipeline, "model-upload") print("\n\n") print("model-batch-predict") artifacts = print_pipeline_output(pipeline, "model-batch-predict") output = !gsutil cat $artifacts output = json.loads(output[0]) print("\n\n") print( output["artifacts"]["batchpredictionjob"]["artifacts"][0]["metadata"][ "gcsOutputDirectory" ] ) print("model-evaluation") artifacts = print_pipeline_output(pipeline, "model-evaluation")Delete a pipeline jobAfter a pipeline job is completed, you can delete the pipeline job with the method `delete()`. Prior to completion, a pipeline job can be canceled with the method `cancel()`.pipeline.delete()Delete the BigQuery model and datasetNext, delete the BigQuery model and dataset.try: job = bqclient.delete_model("bqml_tutorial.penguins_model") except: pass job = bqclient.delete_dataset("bqml_tutorial", delete_contents=True)Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial.- Dataset- Pipeline- Model- Endpoint- AutoML Training Job- Batch Job- Custom Job- Hyperparameter Tuning Job- Cloud Storage Bucketdelete_all = True if delete_all: # Delete the dataset using the Vertex dataset object try: if "dataset" in globals(): dataset.delete() except Exception as e: print(e) # Delete the model using the Vertex model object try: if "model" in globals(): model.delete() except Exception as e: print(e) # Delete the endpoint using the Vertex endpoint object try: if "endpoint" in globals(): endpoint.undeploy_all() endpoint.delete() except Exception as e: print(e) # Delete the AutoML or Pipeline training job try: if "dag" in globals(): dag.delete() except Exception as e: print(e) # Delete the custom training job try: if "job" in globals(): job.delete() except Exception as e: print(e) # Delete the batch prediction job using the Vertex batch prediction object try: if "batch_predict_job" in globals(): batch_predict_job.delete() except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object try: if "hpt_job" in globals(): hpt_job.delete() except Exception as e: print(e) if "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME단어와 문자의 원-핫 인코딩이 노트북은 [케라스 창시자에게 배우는 딥러닝](https://tensorflow.blog/케라스-창시자에게-배우는-딥러닝/) 책의 6장 1절의 코드 예제입니다. 책에는 더 많은 내용과 그림이 있습니다. 이 노트북에는 소스 코드에 관련된 설명만 포함합니다. 이 노트북의 설명은 케라스 버전 2.2.2에 맞추어져 있습니다. 케라스 최신 버전이 릴리스되면 노트북을 다시 테스트하기 때문에 설명과 코드의 결과가 조금 다를 수 있습니다.----원-핫 인코딩은 토큰을 벡터로 변환하는 가장 일반적이고 기본적인 방법입니다. 3장에서 IMDB와 로이터 예제에서 이를 보았습니다(단어의 원-핫 인코딩을 사용했습니다). 모든 단어에 고유한 정수 인덱스를 부여하고 이 정수 인덱스 i를 크기가 N(어휘 사전의 크기)인 이진 벡터로 변환합니다. 이 벡터는 i번째 원소만 1이고 나머지는 모두 0입니다.물론 원-핫 인코딩은 문자 수준에서도 적용할 수 있습니다. 원-핫 인코딩이 무엇이고 어떻게 구현하는지 명확하게 설명하기 위해 단어와 문자에 대한 간단한 예를 만들었습니다. 단어 수준의 원-핫 인코딩(간단한 예):import numpy as np # 초기 데이터: 각 원소가 샘플입니다 # (이 예에서 하나의 샘플이 하나의 문장입니다. 하지만 문서 전체가 될 수도 있습니다) samples = ['The cat sat on the mat.', 'The dog ate my homework.'] # 데이터에 있는 모든 토큰의 인덱스를 구축합니다 token_index = {} for sample in samples: # split() 메서드를 사용해 샘플을 토큰으로 나눕니다. # 실전에서는 구둣점과 특수 문자도 사용합니다. for word in sample.split(): if word not in token_index: # 단어마다 고유한 인덱스를 할당합니다. token_index[word] = len(token_index) + 1 # 인덱스 0은 사용하지 않습니다. # 샘플을 벡터로 변환합니다. # 각 샘플에서 max_length 까지 단어만 사용합니다. max_length = 10 # 결과를 저장할 배열입니다 results = np.zeros((len(samples), max_length, max(token_index.values()) + 1)) for i, sample in enumerate(samples): for j, word in list(enumerate(sample.split()))[:max_length]: index = token_index.get(word) results[i, j, index] = 1.문자 수준 원-핫 인코딩(간단한 예)import string samples = ['The cat sat on the mat.', 'The dog ate my homework.'] characters = string.printable # 출력 가능한 모든 아스키(ASCII) 문자 token_index = dict(zip(characters, range(1, len(characters) + 1))) max_length = 50 results = np.zeros((len(samples), max_length, max(token_index.values()) + 1)) for i, sample in enumerate(samples): for j, character in enumerate(sample[:max_length]): index = token_index.get(character) results[i, j, index] = 1.케라스에는 원본 텍스트 데이터를 단어 또는 문자 수준의 원-핫 인코딩으로 변환해주는 유틸리티가 있습니다. 특수 문자를 제거하거나 빈도가 높은 N개의 단어만을 선택(입력 벡터 공간이 너무 커지지 않도록 하기 위한 일반적인 제한 방법입니다)하는 등 여러 가지 중요한 기능들이 있기 때문에 이 유틸리티를 사용하는 것이 좋습니다. 케라스를 사용한 단어 수준의 원-핫 인코딩:from keras.preprocessing.text import Tokenizer samples = ['The cat sat on the mat.', 'The dog ate my homework.'] # 가장 빈도가 높은 1,000개의 단어만 선택하도록 Tokenizer 객체를 만듭니다. tokenizer = Tokenizer(num_words=1000) # 단어 인덱스를 구축합니다. tokenizer.fit_on_texts(samples) # 문자열을 정수 인덱스의 리스트로 변환합니다. sequences = tokenizer.texts_to_sequences(samples) # 직접 원-핫 이진 벡터 표현을 얻을 수 있습니다. # 원-핫 인코딩 외에 다른 벡터화 방법들도 제공합니다! one_hot_results = tokenizer.texts_to_matrix(samples, mode='binary') # 계산된 단어 인덱스를 구합니다. word_index = tokenizer.word_index print('Found %s unique tokens.' % len(word_index))Found 9 unique tokens.원-핫 인코딩의 변종 중 하나는 원-핫 해싱 기법입니다. 이 방식은 어휘 사전에 있는 고유한 토큰의 수가 너무 커서 모두 다루기 어려울 때 사용합니다. 각 단어에 명시적으로 인덱스를 할당하고 이 인덱스를 딕셔너리에 저장하는 대신에 단어를 해싱하여 고정된 크기의 벡터로 변환합니다. 일반적으로 간단한 해싱 함수를 사용합니다. 이 방식의 주요 장점은 명시적인 단어 인덱스가 필요 없기 때문에 메모리를 절약하고 온라인 방식으로 데이터를 인코딩할 수 있습니다(전체 데이터를 확인하지 않고 토큰을 생성할 수 있습니다). 한 가지 단점은 해시 충돌입니다. 두 개의 단어가 같은 해시를 만들면 이를 바라보는 머신 러닝 모델은 단어 사이의 차이를 인식하지 못합니다. 해싱 공간의 차원이 해싱될 고유 토큰의 전체 개수보다 훨씬 크면 해시 충돌의 가능성은 감소합니다. 해싱 기법을 사용한 단어 수준의 원-핫 인코딩(간단한 예):samples = ['The cat sat on the mat.', 'The dog ate my homework.'] # 단어를 크기가 1,000인 벡터로 저장합니다. # 1,000개(또는 그이상)의 단어가 있다면 해싱 충돌이 늘어나고 인코딩의 정확도가 감소될 것입니다 dimensionality = 1000 max_length = 10 results = np.zeros((len(samples), max_length, dimensionality)) for i, sample in enumerate(samples): for j, word in list(enumerate(sample.split()))[:max_length]: # 단어를 해싱하여 0과 1,000 사이의 랜덤한 정수 인덱스로 변환합니다. index = abs(hash(word)) % dimensionality results[i, j, index] = 1.Surface Determination Workflowimport numpy as np import matplotlib.pyplot as plt import os import h5py import sys from tomo_encoders import Patches, DataFile import tensorflow as tf import time, glob import open3d as o3d sys.path.append('/data02/MyArchive/aisteer_3Dencoders/TomoEncoders/scratchpad/surface_determination/trainer') from tomo_encoders.neural_nets.surface_segmenter import SurfaceSegmenter from tomo_encoders.misc.feature_maps_vis import view_midplanes from tomo_encoders.misc.voxel_processing import normalize_volume_gpu from tomo_encoders.misc.viewer import view_midplanes from tqdm import tqdm gt_path = '/data02/MyArchive/tomo_datasets/ZEISS_try2/GT_VOLS/Sample2' ct_path = '/data02/MyArchive/tomo_datasets/ZEISS_try2/Sample2_CT' psize = (16,16,16) size_lab = '16x16' ds_gt = DataFile(gt_path, tiff = True, VERBOSITY = 0) vol_gt = ds_gt.read_full() # t00 = time.time() # p = Patches(vol_gt.shape, initialize_by = 'regular-grid', patch_size = psize) # vols = p.extract(vol_gt, psize) # ystd = np.std(vols, axis = (1,2,3)) # idxs = np.where(ystd > 0.0)[0] # vols = vols[idxs,...] # p = p.select_by_indices(idxs) # p.dump(f'/data02/MyArchive/tomo_datasets/ZEISS_try2/sample2_edge_patches_{size_lab}.hdf5') # print('time taken: {time.time() - t00}') p = Patches(None, initialize_by = 'file', fpath = f'/data02/MyArchive/tomo_datasets/ZEISS_try2/sample2_edge_patches_{size_lab}.hdf5') y_true = p.extract(vol_gt, psize) vol_out = np.zeros(p.vol_shape) vols = np.ones((len(p),) + psize) p.fill_patches_in_volume(vols, vol_out) fig, ax = plt.subplots(1,3, figsize = (16,8)) view_midplanes(vol_out, ax = ax)Measure IoU in datasetsfrom tomo_encoders.misc.img_stats import calc_SNR, calc_jac_acc, calc_dice_coeff from tomo_encoders import DataFile import pandas as pd flist = glob.glob('/data02/MyArchive/tomo_datasets/ZEISS_try2/Sample2_SEG/*') ct_path = '/data02/MyArchive/tomo_datasets/ZEISS_try2/Sample2_CT' cols = ['X', 'CT-algo', 'SURF-model', 'IoU', 'SNR', 'ftag'] df = pd.DataFrame(columns = cols) for ii, fpath in enumerate(flist): ftag = os.path.split(fpath)[-1] r1, r2, r3 = ftag.split('_') if 'FDK' in r1: ct_algo = 'FDK' else: ct_algo = 'DR' X = int(r1.split(ct_algo)[-1].split('X')[0]) surf_model = r3.split('a0')[-1] # IoU ds = DataFile(fpath, tiff = True, VERBOSITY = 0) vol_seg = ds.read_full() y_seg = p.extract(vol_seg, psize) iou = calc_jac_acc(y_true, y_seg) # SNR ct_fname = os.path.join(ct_path, f'{ct_algo}{X}X') ds_ct = DataFile(ct_fname, tiff = True, VERBOSITY = 0) vol_ct = ds_ct.read_full() snr = calc_SNR(vol_ct[150:-150,300:-300,300:-300], vol_gt[150:-150,300:-300,300:-300]) # Dice # calc_dice_coeff(y_true, y_seg) new_item = dict(zip(cols, [X,ct_algo,surf_model,iou,snr, ftag])) print(f'{new_item}') df = df.append(new_item,ignore_index = True) df.to_csv('/home/atekawade/Dropbox/Arg/transfers/ZEISS_v2/stats.csv', index = False) df # pc = np.asarray(np.where(vol_dr != vol_fdk)).T # pcd = o3d.geometry.PointCloud() # pcd.points = o3d.utility.Vector3dVector(pc[:,::-1]) # o3d.visualization.draw_geometries([pcd]) # from skimage import measure # verts, faces, normals, values = measure.marching_cubes(vol_fdk[::4,::4,::4], 0) # mesh = o3d.geometry.TriangleMesh() # mesh.vertices = o3d.utility.Vector3dVector(verts) # mesh.triangles = o3d.utility.Vector3iVector(faces) # mesh.compute_vertex_normals() # o3d.visualization.draw_geometries([mesh])Observations and Insights Dependencies and starter code# Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st import numpy as np from scipy.stats import linregress # Study data files mouse_metadata = "data/Mouse_metadata.csv" study_results = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata) study_results = pd.read_csv(study_results) mouse_metadata.head(5) study_results.head(5) # Combine the data into a single dataset trial_data = pd.merge(mouse_metadata, study_results, on='Mouse ID') trial_data.head(5)Summary statistics# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen tumor_volume_mean = trial_data.groupby('Drug Regimen')['Tumor Volume (mm3)'].mean() tumor_volume_median = trial_data.groupby('Drug Regimen')['Tumor Volume (mm3)'].median() tumor_volume_std = trial_data.groupby('Drug Regimen')['Tumor Volume (mm3)'].std() tumor_volume_sem = trial_data.groupby('Drug Regimen')['Tumor Volume (mm3)'].sem() tumor_study_summary = pd.DataFrame({ "Tumor Volume Mean" : tumor_volume_mean, "Tumor Volume Median" : tumor_volume_median, "Tumor Volume STD" : tumor_volume_std, "Tumor Volume SEM" : tumor_volume_sem}) tumor_study_summaryBar plots# Generate a bar plot showing number of data points for each treatment regimen using pandas treatment_count_trim = trial_data[["Mouse ID","Drug Regimen"]] treatment_count_rename = treatment_count_trim.rename(columns={"Mouse ID":"Treatment Count"}) treatment_count_df = treatment_count_rename.groupby("Drug Regimen").count() treatment_count_df treatment_chart = treatment_count_df.plot(kind="bar", figsize=(10,7)) plt.title("Drug Regimen Treatment Totals") treatment_chart.set_xlabel("Treatment Type") treatment_chart.set_ylabel("Number of Treatments") plt.show() # Generate a bar plot showing number of data points for each treatment regimen using pyplot x_axis = x_axis = np.arange(len(treatment_count_df)) tick_locations = [value for value in x_axis] y_axis = treatment_count_df["Treatment Count"] plt.figure(figsize=(10,7)) plt.bar(x_axis, y_axis, color='r', align='center') plt.xticks(tick_locations, ["Capomulin", "Ceftamin", "Infubinol", "Ketapril", "Naftisol", "Placebo", "Propriva", "Ramicane", "Stelasyn", "Zoniferol"],rotation='vertical') plt.title("Drug Regimen Treatment Totals") plt.xlabel("Treatment Type") plt.ylabel("Number of Treatments") plt.show()Pie plots# Generate a pie plot showing the distribution of female versus male mice using pandas gender_count_trim = trial_data[["Mouse ID","Sex"]] gender_count_index = gender_count_trim.set_index("Sex") gender_count_rename = gender_count_index.rename(columns={"Mouse ID":"Treatment Count"}) gender_count_df = gender_count_rename.groupby("Sex").count() gender_list = gender_count_df.keys() gender_count_df gender_count_chart = gender_count_df.plot(kind="pie", y=gender_list, title=("Treatment by Sex")) gender_count_chart.set_ylabel("") plt.show() # Generate a pie plot showing the distribution of female versus male mice using pyplot sex = list(gender_count_df.index.values) count = gender_count_df['Treatment Count'] colors = ['yellowgreen','red'] plt.title("Treatment by Sex") plt.pie(count, labels=sex, colors=colors, autopct="%1.1f%%") plt.axis("equal") plt.show()Quartiles, outliers and boxplotsgroup_by_mouse_max = trial_data.groupby("Mouse ID").max() group_by_mouse_max_index = group_by_mouse_max.reset_index() mouse_max = group_by_mouse_max_index[["Mouse ID", "Timepoint"]] merge_df = pd.merge(mouse_max, trial_data, on =["Mouse ID", "Timepoint"], how="left") top_performers = merge_df.loc[(merge_df["Drug Regimen"] == "Capomulin") | (merge_df["Drug Regimen"] == "Ramicane") | (merge_df["Drug Regimen"] == "Infubinol") | (merge_df["Drug Regimen"] == "Ceftamin")] top_performers.head(10) tumor_size = top_performers["Tumor Volume (mm3)"] quartiles = round(tumor_size.quantile([.25,.5,.75])) lowerq = round(quartiles[0.25]) upperq = round(quartiles[0.75]) iqr = round(upperq-lowerq) print(f"The lower quartile of Tumor Size (mm3) is: {lowerq}") print(f"The upper quartile of Tumor Size (mm3) is: {upperq}") print(f"The interquartile range of Tumor Size (mm3) is: {iqr}") print(f"The the median of Tumor Size (mm3) is: {quartiles[0.5]} ") lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Values below {lower_bound} could be outliers.") print(f"Values above {upper_bound} could be outliers.") # Generate a box plot of the final tumor volume of each mouse across four regimens of interest plt.figure(figsize=(5,8)) capomulin = top_performers.loc[top_performers["Drug Regimen"] == "Capomulin"]["Tumor Volume (mm3)"] ramicane = top_performers.loc[top_performers["Drug Regimen"] == "Ramicane"] ["Tumor Volume (mm3)"] infubinol = top_performers.loc[top_performers["Drug Regimen"] == "Infubinol"]["Tumor Volume (mm3)"] ceftamin = top_performers.loc[top_performers["Drug Regimen"] == "Ceftamin"]["Tumor Volume (mm3)"] labels = ["Capomulin", "Ramicane", "Infubinol", "drugs"] plt.boxplot([capomulin,ramicane,infubinol,ceftamin], labels=labels) plt.title("Treatment Comparison") plt.ylabel("Final Tumor Volume (mm3)") plt.show()Line and scatter plots# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin line_plot_columns = trial_data[["Mouse ID", "Drug Regimen", "Timepoint","Tumor Volume (mm3)"]] capomulin_only = line_plot_columns.loc[(line_plot_columns["Drug Regimen"]=="Capomulin") & (line_plot_columns["Mouse ID"]=="s185")] line_graph_columns = capomulin_only[["Timepoint", "Tumor Volume (mm3)"]] line_graph_index = line_graph_columns.set_index("Timepoint") line_plot_graph = line_graph_index.plot.line(title="Capomulin Treatment of Mouse s185") plt.xlabel("Timepoint") plt.ylabel("Tumor Volume (mm3)") plt.show() # Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen scatter_plot_columns = trial_data[["Mouse ID","Drug Regimen", "Weight (g)", "Tumor Volume (mm3)"]] capomulin_reg_only = scatter_plot_columns.loc[line_plot_columns["Drug Regimen"]=="Capomulin",:] scatter_plot_avg = capomulin_reg_only.groupby(['Mouse ID']).mean() scatter_plot_avg.plot(kind="scatter", x='Weight (g)', y='Tumor Volume (mm3)') # Calculate the correlation coefficient and linear regression model for mouse weight and average tumor volume for the Capomulin regimen x_values = scatter_plot_avg["Weight (g)"] y_values = scatter_plot_avg["Tumor Volume (mm3)"] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.scatter(x_values,y_values) plt.plot(x_values,regress_values,"r-") plt.xlabel('Weight (g)') plt.ylabel('Tumor Volume') plt.show() corr_matrix_df = scatter_plot_avg[["Weight (g)","Tumor Volume (mm3)"]] corr_matrix_df.columns = corr_matrix_df.columns.str.replace("[_]", " ") corr_matrix_df.corr(method='pearson')Create templateN3_LICENSE_SYMBOL = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' N4_LICENSE_SYMBOL = '0123456789012345678901234567890123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' PLATE_WIDTH = 190 PLATE_HEIGHT = 140 PLATE_COLOR_MAX = [254, 254, 254] PLATE_COLOR_MIN = [245, 245, 235] PLATE_NUMBER_FONT = ImageFont.truetype('./Soxe2banh-v2.ttf', 62) PLATE_NUMBER_COLOR_MAX = [30, 30, 30] PLATE_NUMBER_COLOR_MIN = [0, 0, 0] IMAGE_SIZE = (512, 512) ROTATE_X_MAX = 10 ROTATE_Y_MAX = 10 ROTATE_Z_MAX = 10 # TRANSLATE_X_MAX = 50 # TRANSLATE_Y_MAX = 50 # TRANSLATE_Z_MAX = 50 TRANSLATE_X_MAX = 0 TRANSLATE_Y_MAX = 0 TRANSLATE_Z_MAX = 0 augmentor = iaa.Sometimes(0.8,[ iaa.Affine(scale=(0.8, 1.4), translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)}), iaa.OneOf([ iaa.FastSnowyLandscape(lightness_multiplier=2.0), iaa.Clouds(), iaa.Fog(), iaa.GammaContrast(gamma=3.0), iaa.MotionBlur(k=15), iaa.CoarseDropout(p=0.2, size_percent=1.0), iaa.CoarseDropout(p=0.2, size_percent=1.0, per_channel=True), iaa.JpegCompression(compression=99) ])]) def random_license_number(): n1 = np.random.randint(0, 10) n2 = np.random.randint(0, 10) n3 = random.choice(N3_LICENSE_SYMBOL) n4 = random.choice(N4_LICENSE_SYMBOL) n5 = np.random.randint(0, 10) n6 = np.random.randint(0, 10) n7 = np.random.randint(0, 10) n8 = np.random.randint(0, 10) n9 = np.random.randint(0, 10) return '{}{}_{}{}\n{}{}{}.{}{}'.format(n1, n2, n3, n4, n5, n6, n7, n8, n9) def create_plate(number): plate_color = (np.random.randint(PLATE_COLOR_MIN[0], PLATE_COLOR_MAX[0]), np.random.randint(PLATE_COLOR_MIN[1], PLATE_COLOR_MAX[1]), np.random.randint(PLATE_COLOR_MIN[2], PLATE_COLOR_MAX[2])) image = Image.new('RGB', (PLATE_WIDTH, PLATE_HEIGHT), plate_color) d = ImageDraw.Draw(image) d.line([(0, 0), (PLATE_WIDTH - 1, 0), (PLATE_WIDTH - 1, PLATE_HEIGHT - 1), (0, PLATE_HEIGHT - 1), (0, 0)], fill=(0, 0, 0), width=10, joint='curve') plate_number_color = (np.random.randint(PLATE_NUMBER_COLOR_MIN[0], PLATE_NUMBER_COLOR_MAX[0]), np.random.randint(PLATE_NUMBER_COLOR_MIN[1], PLATE_NUMBER_COLOR_MAX[1]), np.random.randint(PLATE_NUMBER_COLOR_MIN[2], PLATE_NUMBER_COLOR_MAX[2])) d.multiline_text([4, 13], number, font=PLATE_NUMBER_FONT, fill=plate_number_color, align='center') return image def rotate_along_axis(image, theta=0, phi=0, gamma=0, dx=0, dy=0, dz=0): width, height = np.shape(image)[:2] # Get radius of rotation along 3 axes theta, phi, gamma = np.deg2rad([theta, phi, gamma]) # Get ideal focal length on z axis # NOTE: Change this section to other axis if needed d = np.sqrt(height**2 + width**2) focal = d / (2 * np.sin(gamma) if np.sin(gamma) != 0 else 1) dz = focal # Projection 2D -> 3D matrix A1 = np.array([ [1, 0, -width/2], [0, 1, -height/2], [0, 0, 1], [0, 0, 1]]) # Rotation matrices around the X, Y, and Z axis RX = np.array([ [1, 0, 0, 0], [0, np.cos(theta), -np.sin(theta), 0], [0, np.sin(theta), np.cos(theta), 0], [0, 0, 0, 1]]) RY = np.array([ [np.cos(phi), 0, -np.sin(phi), 0], [0, 1, 0, 0], [np.sin(phi), 0, np.cos(phi), 0], [0, 0, 0, 1]]) RZ = np.array([ [np.cos(gamma), -np.sin(gamma), 0, 0], [np.sin(gamma), np.cos(gamma), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) # Composed rotation matrix with (RX, RY, RZ) R = np.dot(np.dot(RX, RY), RZ) # Translation matrix T = np.array([ [1, 0, 0, dx], [0, 1, 0, dy], [0, 0, 1, dz], [0, 0, 0, 1]]) # Projection 3D -> 2D matrix A2 = np.array([ [focal, 0, width/2, 0], [0, focal, height/2, 0], [0, 0, 1, 0]]) # Final transformation matrix M = np.dot(A2, np.dot(T, np.dot(R, A1))) return transform.warp(image, M, mode='edge', preserve_range=True) def convert_white_to_transparent(image): new_image = [] for pixel in image.getdata(): if pixel == (255, 255, 255, 255): new_image.append((255, 255, 255, 0)) else: new_image.append(pixel) image.putdata(new_image) return image def add_noise(image): image = np.array(image) image = augmentor.augment_image(image) return image def add_random_bg(image): image = ImageOps.expand(image, border=200, fill='white') image = image.convert('RGBA') image = convert_white_to_transparent(image) idx = np.random.randint(0, len(bg_images)) path = bg_images[idx] bg_image = Image.open(path) bg_image = bg_image.resize(IMAGE_SIZE, Image.ANTIALIAS) bg_image.paste(image, (0, 0), image) image = bg_image image = np.array(image) r_theta = np.random.randint(-ROTATE_X_MAX, ROTATE_X_MAX) r_phi = np.random.randint(-ROTATE_Y_MAX, ROTATE_Y_MAX) r_gamma = np.random.randint(-ROTATE_Z_MAX, ROTATE_Z_MAX) r_dx = 50 r_dy = 50 r_dz = 0 image = rotate_along_axis(image, theta=r_theta, phi=r_phi, gamma=r_gamma, dx=r_dx, dy=r_dy, dz=r_dz) image = image.astype(np.uint8) image = Image.fromarray(image) return image n = random_license_number() a = create_plate(n) b = add_random_bg(a) b = add_noise(b) print(b.shape) plt.imshow(b) plt.show() s = b x = iaa.OneOf([ iaa.Affine(scale=(0.8, 1.2)), # iaa.FastSnowyLandscape(lightness_multiplier=2.0), # iaa.Clouds(), # iaa.Fog(), # iaa.GammaContrast(gamma=3.0), # iaa.MotionBlur(k=20), # iaa.CoarseDropout(p=0.2, size_percent=1.0), ]) s = x.augment_image(s) plt.imshow(s) plt.show()Generate license platesNUM_OF_SAMPLE = 50000 SAVE_DIR_1 = 'E:/Datasets/Vietnamese Motorbike License Plate/license_plate' for i in tqdm(range(NUM_OF_SAMPLE)): license = random_license_number() image = create_plate(license) image.save('{}/{}.jpg'.format(SAVE_DIR_1, license.replace('_', '').replace('\n', '').replace('.', '')))100%|███████████████████████████████████████████████████████████████████████████| 50000/50000 [03:33<00:00, 233.92it/s]Generate license plates in scenelicense_plates = os.listdir(SAVE_DIR_1) license_plates = [f for f in license_plates if f.endswith('.jpg')] SAVE_DIR_2 = 'E:/Datasets/Vietnamese Motorbike License Plate/images' generated = os.listdir(SAVE_DIR_2) license_plates = [f for f in license_plates if f not in generated] for plate in tqdm(license_plates): image = Image.open('{}/{}'.format(SAVE_DIR_1, plate)) image = add_random_bg(image) while True: try: image = add_noise(image) break except: pass image = Image.fromarray(image) image.save('{}/{}'.format(SAVE_DIR_2, plate)) SAVE_DIR_2 = 'E:/Datasets/Vietnamese Motorbike License Plate/images' from tqdm import tqdm import os for file in tqdm(os.listdir(SAVE_DIR_1)): a = file.split('.jpg')[0] a = a.replace('-', '').replace('.', '') a = a + '.jpg' os.rename(SAVE_DIR_1 + '/' + file, SAVE_DIR_1 + '/' + a)100%|███████████████████████████████████████████████████████████████████████████| 10000/10000 [00:11<00:00, 860.10it/s]选择 布尔类型、数值和表达式![](../Photo/33.png)- 注意:比较运算符的相等是两个等号,一个等到代表赋值- 在Python中可以用整型0来代表False,其他数字来代表True- 后面还会讲到 is 在判断语句中的用发fs = eval(input("输入你的分数: ")) if fs >= 90: print("您的成绩优秀") elif 75 <= fs <90: print("您的成绩良好") elif 60 <= fs < 75: print("您的成绩及格") else : print("您的成绩不合格")输入你的分数: 90 您的成绩优秀字符串的比较使用ASCII值 Markdown - https://github.com/younghz/Markdown EP:- - 输入一个数字,判断其实奇数还是偶数num = eval (input("输入一个数字")) if num % 2 == 0: print("这是一个偶数") else: print("这是一个奇数")输入一个数字5 这是一个奇数产生随机数字- 函数random.randint(a,b) 可以用来产生一个a和b之间且包括a和b的随机整数import random randint = random.randint(0,100) while True: x = eval ( input("请输入一个数字")) if x == randint: print("恭喜你,猜对了") break elif x > randint: print("太大了,请猜小一点") else : print("太小了,请猜大一点")请输入一个数字50 太小了,请猜大一点 请输入一个数字75 太小了,请猜大一点 请输入一个数字87 太小了,请猜大一点 请输入一个数字94 太大了,请猜小一点 请输入一个数字90 太大了,请猜小一点 请输入一个数字88 太小了,请猜大一点 请输入一个数字89 恭喜你,猜对了其他random方法- random.random 返回0.0到1.0之间前闭后开区间的随机浮点- random.randrange(a,b) 前闭后开 EP:- 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字的和,并判定其是否正确- 进阶:写一个随机序号点名程序import random num1 = random.randint(0,100) num2 = random.randint(0,100) num_ = num1 + num2 print("第一个数是 " + str(num1)) print("第二个数是 " + str(num2)) while True: x = eval ( input("请输入他们的和")) if x == num_: print("你真棒") break else : print("太笨啦,再答一次") name = random.randint(1,50) print(name) import random jibie = input("输入你的级别") if jibie == "高级用户": randint = random.randint(0,1) if randint == 0: print("恭喜你获得一等奖") else : print("恭喜你获得二等奖") elif jibie == "中级用户": randint = random.randint(0,50) if randint == 0: print("恭喜你获得一等奖") elif randint == 1: print("恭喜你获得二等奖") else : print("很遗憾未中奖,提升你的用户等级可获得更高的抽奖几率和抽奖次数") else : randint = random.randint(0,100) if randint == 0: print("恭喜你获得一等奖") elif randint == 1: print("恭喜你获得二等奖") else : print("很遗憾未中奖,提升你的用户等级可获得更高的抽奖几率和抽奖次数")输入你的级别普通用户 很遗憾未中奖,提升你的用户等级可获得更高的抽奖几率和抽奖次数if语句- 如果条件正确就执行一个单向if语句,亦即当条件为真的时候才执行if内部的语句- Python有很多选择语句:> - 单向if - 双向if-else - 嵌套if - 多向if-elif-else - 注意:当语句含有子语句的时候,那么一定至少要有一个缩进,也就是说如果有儿子存在,那么一定要缩进- 切记不可tab键和space混用,单用tab 或者 space- 当你输出的结果是无论if是否为真时都需要显示时,语句应该与if对齐age = eval ( input("年龄 ")) if age <= 30: looks = eval ( input("长相 ")) if looks == True: income = eval ( input("收入 ")) if income == True: print("去见面") else : print("不见面") else : print("不见面") else : print("不见面")年龄 31 不见面EP:- 用户输入一个数字,判断其实奇数还是偶数- 进阶:可以查看下4.5实例研究猜生日month = eval (input("输入月")) day = eval (input("输入日")) num1 = random.randint(1,12) num2 = random.randint(1,30)双向if-else 语句- 如果条件为真,那么走if内部语句,否则走else内部语句 EP:- 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字,并判定其是否正确,如果正确打印“you‘re correct”,否则打印正确错误 嵌套if 和多向if-elif-else![](../Photo/35.png) EP:- 提示用户输入一个年份,然后显示表示这一年的动物![](../Photo/36.png)- 计算身体质量指数的程序- BMI = 以千克为单位的体重除以以米为单位的身高![](../Photo/37.png)years = eval (input("输入一个年份")) if years % 12 == 0: print("今年是猴年") elif years % 12 == 1: print("今年是鸡年") elif years % 12 == 2: print("今年是狗年") elif years % 12 == 3: print("今年是猪年") elif years % 12 == 4: print("今年是鼠年") elif years % 12 == 5: print("今年是牛年") elif years % 12 == 6: print("今年是虎年") elif years % 12 == 7: print("今年是兔年") elif years % 12 == 8: print("今年是龙年") elif years % 12 == 9: print("今年是蛇年") elif years % 12 == 10: print("今年是马年") else: print("今年是羊年") height = eval (input ("请输入您的身高(单位为米)")) weight = eval (input ("请输入您的体重(单位为千克)")) BMI = weight / (height * height) if BMI < 18.5: print("您的体重超轻") elif 18.5 <= BMI <= 25.0: print("您的体重标准") elif 25.0 <= BMI <= 30.0: print("您的体重超重") else: print("您的体重痴肥")请输入您的身高(单位为米)1.87 请输入您的体重(单位为千克)90 您的体重超重逻辑运算符![](../Photo/38.png) ![](../Photo/39.png)![](../Photo/40.png) EP:- 判定闰年:一个年份如果能被4整除但不能被100整除,或者能被400整除,那么这个年份就是闰年- 提示用户输入一个年份,并返回是否是闰年- 提示用户输入一个数字,判断其是否为水仙花数years = eval (input ("输入一个年份")) if (years % 4 == 0) and (years % 100 != 0) or (years % 400 == 0): print(str(years) + "是闰年") else: print(str(years) + "不是闰年") shu = eval (input("输入一个3位数字")) ge = shu % 10 a = shu // 10 shi = a % 10 b = a // 10 bai = b % 10 if ge **3 + shi **3 + bai **3 == shu: print("这是一个水仙花数") else: print("这不是一个水仙花数")输入一个3位数字153 这是一个水仙花数实例研究:彩票![](../Photo/41.png)import random num = eval(input("输入一个两位数:")) randint = random.randint(10,99) a1 = randint % 10 #a1为随机数的各位数 b1 = randint // 10 #b1为随机数的十位数 a2 = num % 10 #a2为输入数的各位数 b2 = num // 10 #b2为输入数的十位数 print("中奖号码" + str(randint)) if (a1 == a2) and (b1 == b2): print("恭喜你,奖金为10000元") elif (a1 == b2) and (b1 == a2): print("恭喜你,奖金为3000元") elif (a1 == a2) and (b1 != b2) or (a1 == b2) and (b1 != a2) or (a1 != a2) and (b1 == b2) or (a1 != b2) and (b1 == a2): print("恭喜你,奖金为1000元") else: print("未中奖")输入一个两位数:20 中奖号码32 恭喜你,奖金为1000元Homework- 1![](../Photo/42.png)import math a,b,c = eval (input("输入3个数")) if b**2 - 4 * a * c > 0: r1 = (-b + math.sqrt(b**2 - 4 * a * c)) / 2 * a r2 = (-b - math.sqrt(b**2 - 4 * a * c)) / 2 * a print("这方程有两个根:" + str(r1) + " and " + str(r2)) elif b**2 - 4 * a * c == 0: r1 = (-b + math.sqrt(b**2 - 4 * a * c)) / 2 * a print("这方程有一个根:" + str(r1)) else : print("这个方程没有根")输入3个数1,2,3 这个方程没有根- 2![](../Photo/43.png)import random num1 = random.randint(0,99) num2 = random.randint(0,99) num_ = num1 + num2 x = eval ( input("请输入他们的和")) if x == num_: print("真") else : print("假")请输入他们的和50 假- 3![](../Photo/44.png)while True: day = eval(input("今天的编号是")) if day <= 6 : day2 = eval(input("今天之后到未来的多少天")) day_ = (day + day2) % 7 if day == 0: x = ("今天是星期日 ") elif day == 1: x = ("今天是星期一 ") elif day == 2: x = ("今天是星期二 ") elif day == 3: x = ("今天是星期三 ") elif day == 4: x = ("今天是星期四 ") elif day == 5: x = ("今天是星期五 ") else : x = ("今天是星期六 ") if day_ == 0: a = ("星期日") elif day_ == 1: a = ("星期一") elif day_ == 2: a = ("星期二") elif day_ == 3: a = ("星期三") elif day_ == 4: a = ("星期四") elif day_ == 5: a = ("星期五") else : a = ("星期六") print(str(x) + str(day2) + "天后是" + str(a)) break else : print("您输入的编号不正确,请重新输入,输入的范围为0-6")今天的编号是8 您输入的编号不正确,请重新输入,输入的范围为0-6 今天的编号是1 今天之后到未来的多少天3 今天是星期一 3天后是星期四- 4![](../Photo/45.png)a,b,c = eval (input("请输入三个整数")) if a >= b >= c: print(c,b,a) elif a >= c >= b: print(b,c,a) elif b >= c >= a: print(a,c,b) elif b >= a >= c: print(c,a,b) elif c >= a >= b: print(b,a,c) else : print(a,b,c)请输入三个整数5,5,3 3 5 5- 5![](../Photo/46.png)weight,price = eval (input("输入第一种大米的重量和价格:")) weight_,price_ = eval (input("输入第二种大米的重量和价格:")) a = price / weight b = price_ / weight_ if a < b: print("第一种大米更好") elif a > b: print("第二种大米更好") else : print("二种大米一样好")输入第一种大米的重量和价格:50,24.59 输入第二种大米的重量和价格:25,11.99 第二种大米更好- 6![](../Photo/47.png)month = eval (input("输入月")) years = eval (input("输入年")) if (years % 4 == 0) and (years % 100 != 0) or (years % 400 == 0): if month == 2: day = 29 elif month == 1 or 3 or 5 or 7 or 8 or 10 or 12: day = 31 else : day = 30 else: if month == 2: day = 28 elif month == 1 or 3 or 5 or 7 or 8 or 10 or 12: day = 31 else : day = 30 print(str(years) + "年" + str(month) + "月有" + str(day) + "天")输入月3 输入年2005 2005年3月有31天- 7![](../Photo/48.png)import random guess = eval (input("输入你的猜想(0代表正面,1代表反面)")) randint = random.randint(0,1) if guess == randint: print("猜对了,你真棒") else : print("猜错了")输入你的猜想(0代表正面,1代表反面)1 猜对了,你真棒- 8![](../Photo/49.png)import random randint = random.randint(0,2) guess = eval (input("输入你的猜想(0代表剪刀,1代表石头,2代表布)")) if (guess == 0) and (randint == 2) or (guess == 1) and (randint == 0) or (guess == 2) and (randint == 1): print("你赢了") elif (guess == 0) and (randint == 1) or (guess == 1) and (randint == 2) or (guess == 2) and (randint == 0): print("你输了") else : print("平局")输入你的猜想(0代表剪刀,1代表石头,2代表布)2 你赢了- 9![](../Photo/50.png)years = eval (input("输入一个年份:")) month = eval (input("输入一个月份:1-12:")) day = eval (input("输入一个日期:1-31:")) if month == 1: m = 13 y = years - 1 j = y / 100 // 1 k = y % 100 q = day h = (q + 26 * (m + 1) / 10 // 1 + k + k / 4 // 1 + j / 4 // 1 + 5 * j) % 7 elif month == 2: m = 14 y = years - 1 j = y / 100 // 1 k = y % 100 q = day h = (q + 26 * (m + 1) / 10 // 1 + k + k / 4 // 1 + j / 4 // 1 + 5 * j) % 7 else : m = month j = y / 100 // 1 k = y % 100 q = day h = (q + 26 * (m + 1) / 10 // 1 + k + k / 4 // 1 + j / 4 // 1 + 5 * j) % 7 if h == 0: a = "星期六" elif h == 1: a = "星期日" elif h == 2: a = "星期一" elif h == 3: a = "星期二" elif h == 4: a = "星期三" elif h == 5: a = "星期四" elif h == 6: a = "星期五" print("它是一周中的" + str(a))输入一个年份:2012 输入一个月份:1-12:5 输入一个日期:1-31:12 它是一周中的星期六- 10![](../Photo/51.png)import random s = ["Ace","2","3","4","5","6","7","8","9","10","Jack","Queen","King"] m = ["梅花","红桃","方块","黑桃"] randint1 = random.randint(0,12) randint2 = random.randint(0,3) print("这张牌是" + str(m[randint2]) + str(s[randint1]))这张牌是黑桃Jack- 11![](../Photo/52.png)num = eval(input("输入一个三位数:")) ge = num % 10 a = num // 10 shi = a %10 b = a // 10 bai = b % 10 if ge == bai: print("这是一个回文数") else: print("这不是一个回文数")输入一个三位数:123 这不是一个回文数- 12![](../Photo/53.png)a,b,c = eval (input("输入三角形三个边的边长")) #a,b,c是三角形的三个边 if c < a + b: s = a + b + c #s是三角形的周长 print("三角形的周长是" + str(s)) else : print("这个输入是非法的")输入三角形三个边的边长1,1,1 三角形的周长是3Warning: Before running below cells please make sure you have API key. Please see README.md for more info on API key.import os os.environ["LS_API_KEY"] = "MY-API-KEY" # replace your API key here. from here_map_widget import Map, Overlay, Rectangle, Bbox, FullscreenControl from ipywidgets import Image import os m = Map(api_key=os.environ["LS_API_KEY"]) m.center = [53.1, 13.1] m.zoom = 3 bbox = Bbox( top=70.72849153520343, left=-24.085683364175395, bottom=29.569664922291, right=44.216452317817016, ) overlay = Overlay( boundingBox=bbox, bitmap="https://heremaps.github.io/maps-api-for-javascript-examples/image-overlay/data/0.png", volatility=True, ) m.add_object(overlay) m.add_control(FullscreenControl()) m import time i = 0 while True: overlay.set_bitmap( "https://heremaps.github.io/maps-api-for-javascript-examples/image-overlay/data/{}.png".format( i ) ) i = i + 1 if i == 11: i = 0 time.sleep(0.25)Train test splitdf_train, df_test = train_test_split( data_sample, train_size=0.8, random_state=RANDOM_SEED, shuffle=True, stratify=data_sample['target']) df_val, df_test = train_test_split( df_test, train_size=0.5, random_state=RANDOM_SEED, shuffle=True, stratify=df_test['target']) print(df_train.shape, df_val.shape, df_test.shape) print(df_train.target.value_counts(normalize = True))1 0.541162 0 0.458838 Name: target, dtype: float64Train modelmodel_dir = "D:/Users/Nicholas/Projects/BERT_pretrained/biobert-base-cased-v1.1" model_config = { 'n_classes':2, 'add_linear':[512,256], 'attn_bias':False, 'freeze_layer_count':8 } trainer = training.Trainer(model_dir, model_config, epochs=4) for name, param in trainer.model.named_parameters(): print(name, param.requires_grad) trainer.fit(df_train, df_val)Epoch 1 / 4 -------------------- Batch Train loss: 0.6929 Class 1 prop: 0.4375 ACC: 0.4375 F1: 0.5909 AUC: 0.3058 Batch Train loss: 0.6932 Class 1 prop: 0.5000 ACC: 0.5000 F1: 0.6522 AUC: 0.4896 Batch Train loss: 0.6938 Class 1 prop: 0.5312 ACC: 0.5625 F1: 0.6957 AUC: 0.7066 Batch Train loss: 0.6889 Class 1 prop: 0.4062 ACC: 0.6875 F1: 0.6875 AUC: 0.7274 Batch Train loss: 0.6934 Class 1 prop: 0.5312 ACC: 0.7500 F1: 0.8000 AUC: 0.8648 Batch Train loss: 0.6938 Class 1 prop: 0.5312 ACC: 0.6875 F1: 0.7619 AUC: 0.7754 Batch Train loss: 0.6932 Class 1 prop: 0.5312 ACC: 0.7812 F1: 0.7742 AUC: 0.7791 Batch Train loss: 0.6941 Class 1 prop: 0.5625 ACC: 0.7188 F1: 0.7907 AUC: 0.7931 Batch Train loss: 0.6937 Class 1 prop: 0.5625 ACC: 0.6250 F1: 0.7391 AUC: 0.7620 Batch Train loss: 0.6914 Class 1 prop: 0.5000 ACC: 0.7188 F1: 0.7273 AUC: 0.73[...]plot historyhistory = trainer.history epochs = len(history['train_loss']) plt.plot(list(range(epochs)), history['train_loss'], label = 'train loss') plt.plot(list(range(epochs)), history['val_loss'], label = 'val loss') plt.legend() plt.plot(list(range(epochs)), history['train_f1'], label = 'train f1') plt.plot(list(range(epochs)), history['val_f1'], label = 'val f1') plt.legend() plt.plot(list(range(epochs)), history['train_auc'], label = 'train auc') plt.plot(list(range(epochs)), history['val_auc'], label = 'val auc') plt.legend()make predictionspredictor = training.Predictor(model_dir='trained_model') pred_dict = predictor.predict(df_test.head()) pred_dict df_test_ = df_test.drop('target', axis=1) pred_dict_ = predictor.predict(df_test_.head()) pred_dict_Databricks Graphistry Tutorial: Notebooks & Dashboards on IoT dataThis tutorial visualizes a set of sensors by clustering them based on lattitude/longitude and overlaying summary statisticsWe show how to load the interactive plots both with Databricks notebook and dashboard modes. The general flow should work in other PySpark environments as well.Steps:* Install Graphistry* Prepare IoT data* Plot in a notebook* Plot in a dashboard* Plot as a shareable URL Install & connect# Uncomment and run first time ! pip install graphistry #! pip install git+https://github.com/graphistry/pygraphistry.git@dev/databricks # Can sometimes help: #dbutils.library.restartPython() #Optional: Uncomment - We find this speeds up calls 10%+ on some datasets #spark.conf.set("spark.sql.execution.arrow.enabled", "true") import graphistry # if not yet available, install and/or restart Python kernel using the above # To specify Graphistry account & server, use: # graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com') # For more options, see https://github.com/graphistry/pygraphistry#configure graphistry.__version__Prepare IoT dataSample data provided by DatabricksWe create tables for different plots:* Raw table of device sensor reads* Summarized table: - rounded latitude/longitude - summarize min/max/avg for battery_level, c02_level, humidity, timestamp# Load the data from its source. devices = spark.read \ .format('json') \ .load('/databricks-datasets/iot/iot_devices.json') # Show the results. print('type: ', str(type(devices))) display(devices.take(10)) from pyspark.sql import functions as F from pyspark.sql.functions import concat_ws, col, round devices_with_rounded_locations = ( devices .withColumn( 'location_rounded1', concat_ws( '_', round(col('latitude'), 0).cast('integer'), round(col('longitude'), 0).cast('integer'))) .withColumn( 'location_rounded2', concat_ws( '_', round(col('latitude'), -1).cast('integer'), round(col('longitude'), -1).cast('integer'))) ) cols = ['battery_level', 'c02_level', 'humidity', 'timestamp'] id_cols = ['cca2', 'cca3', 'cn', 'device_name', 'ip', 'location_rounded1', 'location_rounded2'] devices_summarized = ( devices_with_rounded_locations.groupby('device_id').agg( *[F.min(col) for col in cols], *[F.max(col) for col in cols], *[F.avg(col) for col in cols], *[F.first(col) for col in id_cols] ) ) # [(from1, to1), ...] renames = ( [('device_id', 'device_id')] + [(f'first({col})', f'{col}') for col in id_cols] + [(f'min({col})', f'{col}_min') for col in cols] + [(f'max({col})', f'{col}_max') for col in cols] + [(f'avg({col})', f'{col}_avg') for col in cols] ) devices_summarized = devices_summarized.select(list( map(lambda old,new:F.col(old).alias(new),*zip(*renames)) )) display(devices_summarized.take(10))Notebook plot* Simple: Graph connections between `device_name` and `cca3` (country code)* Advanced: Graph multiple connections, like `ip -> device_name` and `locaation_rounded1 -> ip`displayHTML( graphistry .edges(devices.sample(fraction=0.1), 'device_name', 'cca3') .settings(url_params={'strongGravity': 'true'}) .plot() ) hg = graphistry.hypergraph( devices_with_rounded_locations.sample(fraction=0.1).toPandas(), ['ip', 'device_name', 'location_rounded1', 'location_rounded2', 'cca3'], direct=True, opts={ 'EDGES': { 'ip': ['device_name'], 'location_rounded1': ['ip'], 'location_rounded2': ['ip'], 'cca3': ['location_rounded2'] } }) g = hg['graph'] g = g.settings(url_params={'strongGravity': 'true'}) # this setting is great! displayHTML(g.plot())Dashboard plot* Make a `graphistry` object as usual...* ... Then disable the splash screen and optionally set custom dimensionsThe visualization will now load without needing to interact in the dashboard (`view` -> `+ New Dashboard`)displayHTML( g .settings(url_params={'splashAfter': 'false'}) # extends existing setting .plot(override_html_style=""" border: 1px #DDD dotted; width: 50em; height: 50em; """) )Plot as a Shareable URLurl = g.plot(render=False) urlIntro tutorialThis document explains how to use the package ``twoaxistracking`` for calculating self-shading of two-axis tracking solar collectors.Import necessary packages:import pandas as pd # The following libraries are not standard and have to be installed seperately. # It is recommended to install shapely using conda. from shapely import geometry import pvlib import twoaxistrackingNow, the first step is to define the location/site for where shading is to be calculated. The location is used to determine the solar position in the next steps.location = pvlib.location.Location(latitude=54.9788, longitude=12.2666, altitude=100)The second step involves deciding on the discrete time-steps for which shading shall be calculated. Generally the time series should cover one year (preferably not a leap year).The most **important parameter is the frequency**, e.g., '1min', '15min', '1hr'.It is also important to set the timezone as this affects the calculation of the solar position. It is recommended to consistently use UTC to avoid mix-ups.times = pd.date_range( start='2019-1-1 00:00', end='2019-12-31 23:59', freq='15min', # Edit the frequecy for a shorter or longer time step tz='UTC')Next, the solar position is calculated for all of the time time steps using the [`pvlib-python`](https://pvlib-python.readthedocs.io/en/stable/) package:df = location.get_solarposition(times) df.head() # Show the first 5 linesDefine the collector aperture geometryIn this step, the solar collector geometry is defined:# Change these parameters to suit your particular collector aperture collector_width = 5.697 collector_height = 3.075 collector_geometry = geometry.box( -collector_width/2, # left x-coordinate -collector_height/2, # bottom y-coordinate collector_width/2, # top y-coordinate collector_height/2) # right x-coordinate collector_geometrySimilarly, a cirular geometry can be defined as:radius = 2 circular_collector = geometry.Point(0, 0).buffer(radius) circular_collectorNote, the absolute dimensions do not matter, as the GCR parameter scales the distance between collectors according to the collector area.Derive properties from the collector geometry:total_collector_area = collector_geometry.area # Calculate the miminum distance between collectors # Note, L_min is also referred to as D_min by some authors L_min = 2 * collector_geometry.hausdorff_distance(geometry.Point(0, 0)) print("Collector area: %2.1f"% total_collector_area) print("Collector L_min: %1.2f"% L_min)Collector area: 17.5 Collector L_min: 6.47Field layout definitionOnce the collector aperture has been determined, the field layout can be defined. It is important to specify the ground cover ratio (GCR), which is the ratio of the collector are to the ground area. Neighbor orderThe neighbor order determines how many collectors to take into account - for a neighbor order of 1 the immidiate 8 collectors are considered, whereas for a neighbor order of 2, 24 shading collectors are considered. It is recommended to use atleast a neighbor order of 2. Standard vs. custom field layoutsIt is possible to choose from four different standard field layouts: `square`, `diagonal`, `hexagon_e_w`, and `hexagon_n_s`.It is also possible to specify a custom layout using the keywords: `aspect ratio`, `offset`, `rotation`, and `gcr`. For a description of the layout parameters, see the paper by [ (2014)](https://doi.org/10.1016/j.solener.2014.06.012) or check out the function documentation.X, Y, Z, tracker_distance, relative_azimuth, relative_slope = \ twoaxistracking.generate_field_layout( gcr=0.3, # Change this parameter according to your desired density of collectors neighbor_order=2, layout_type='square', L_min=L_min, # calculated from collector geometry - do not change total_collector_area=total_collector_area, # calculated from collector geometry - do not change plot=True)Calculate shading fractionNow that the collector geometry and field layout have been defined, it is time to do the actual shading calculations. This step is relatively computational intensive and is mainly affected by the time step, neighbor order, and computational resources available. Typical run times vary between 5 s and 3 min.%%time df['shaded_fraction'] = \ df.apply(lambda x: twoaxistracking.shaded_fraction( solar_azimuth=x['azimuth'], solar_elevation=x['elevation'], total_collector_geometry=collector_geometry, active_collector_geometry=collector_geometry, tracker_distance=tracker_distance, relative_azimuth=relative_azimuth, relative_slope=relative_slope, L_min=L_min, plot=False), axis=1)Wall time: 5.55 sVisualize the shading fractionPlot the shading fraction for one example day:axes = df.loc['2019-06-28':'2019-06-28', ['shaded_fraction','elevation']].plot(subplots=True, ylim=[0,None])Visualize the average daily shading fraction:df['shaded_fraction'].resample('1d').mean().plot()Peak Player Performance This notebook is based on the article [The Best Stats Measure](https://www.thecricketmonthly.com/story/1057899/the-best-stats-measure), written by who compares alternative ways of measuring the ability of cricketers. He defines here the Peak-33 metric:> Peak-33 is based on the 33 matches in which batsmen scored most runs and bowlers took most wickets, rather than the 33 in which they returned the best average. The search for great metrics for measuring cricket performance is continually evolving and the question about different eras is one that rages on. He does speak further about normalising the numbers for an era and for given matches, but that isn't something that I will explore here, at least initially. This acts as an implementation of taking averages and summaries for given time periods, matches and innings for a given cricketer to enable comparison of cricketers at their peaks using `BeautifulSoup` to scrape the stats online.import numpy as np import pandas as pd import requests from bs4 import BeautifulSoup import reI will begin with using as a working example.player_id = 311158 # ESPN CricInfo ID base_player_url = f'https://stats.espncricinfo.com/ci/engine/player/{player_id}.html' batting_innings_link = base_player_url + '?class=1;template=results;type=batting;view=innings' bowling_innings_link = base_player_url + '?class=1;template=results;type=bowling;view=innings' def get_innings_by_innings_table(url): soup = BeautifulSoup(requests.get(url).text, features="html.parser") for caption in soup.find_all('caption'): if caption.get_text() == 'Innings by innings list': main_table = caption.find_parent( 'table', {'class': 'engineTable'}) columns = [header.get_text() for header in main_table.find('thead').find_all('tr')[0].find_all('th')] rows = [] for innings in [ row for row in main_table.find('tbody').find_all('tr')]: rows.append([stat.get_text() for stat in innings.find_all('td')]) final_table = pd.DataFrame(rows, columns=columns).apply(pd.to_numeric, errors='ignore') # Remove blank columns return(final_table.loc[:,[i for i in final_table.columns if i != '']])We can get the list of test match batting and bowling innings.batting = get_innings_by_innings_table(batting_innings_link) bowling = get_innings_by_innings_table(bowling_innings_link) batting.head() bowling.head()Designing purR sequences (c) 2020 and . This work is licensed under a [Creative Commons Attribution License CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/). All code contained herein is licensed under an [MIT license](https://opensource.org/licenses/MIT).import wgregseq %load_ext autoreload %autoreload 2 import pandas as pdBad key savefig.frameon in file /Users/sbeeler/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 421 ('savefig.frameon : True') You probably need to get an updated matplotlibrc file from https://github.com/matplotlib/matplotlib/blob/v3.3.2/matplotlibrc.template or from the matplotlib source distribution Bad key verbose.level in file /Users/sbeeler/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 472 ('verbose.level : silent # one of silent, helpful, debug, debug-annoying') You probably need to get an updated matplotlibrc file from https://github.com/matplotlib/matplotlib/blob/v3.3.2/matplotlibrc.template or from the matplotlib source distribution Bad key verbose.fileo in file /Users/sbeeler/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 473 ('verbose.fileo : sys.stdout # a log filename, sys.stdout or sys.stderr') You probably need to g[...]Define the sequences we want to work with:- `lacUV5_purR_overlap`: TCGAG**TTTACA**CACGCAAACGTTTTCGTG**TATAAT**GTGTGG - `lacUV5_purR_downstream`: TCGAG**TTTACA**CTTTATGCTTCCGGCTCG**TATAAT**GTGTGGACGCAAACGTTTTCGTWith the -10 and -35 sites in bold for reference, and the purR binding site underlined.lacUV5_purR_overlap = "TCGAGTTTACACACGCAAACGTTTTCGTGTATAATGTGTGG" lacUV5_purR_downstream = "TCGAGTTTACACTTTATGCTTCCGGCTCGTATAATGTGTGGACGCAAACGTTTTCGT"Let's make sure to figure out which region of each sequence we want to mutate (i.e. just the purR binding site):lacUV5_purR_overlap[12:28] lacUV5_purR_downstream[-16:]Constructs LacUV5 with operator downstreammutants_single = wgregseq.mutations_det(lacUV5_purR_downstream, mut_per_seq=1, site_start=-16, keep_wildtype=True) downstream_df_single = pd.DataFrame({"seq":mutants_single}) downstream_df_single["description"] = "lacUV5 purR downstream single mutant" downstream_df_single.tail() mutants_double = wgregseq.mutations_det(lacUV5_purR_downstream, mut_per_seq=2, site_start=-16) downstream_df_double = pd.DataFrame({"seq":mutants_double}) downstream_df_double["description"] = "lacUV5 purR downstream double mutant" downstream_df_double.tail()LacUV5 with operator overlappingmutants_single = wgregseq.mutations_det(lacUV5_purR_overlap, mut_per_seq=1, site_start=12, site_end=28, keep_wildtype=True) overlap_df_single = pd.DataFrame({"seq":mutants_single}) overlap_df_single["description"] = "lacUV5 purR overlap single mutant" overlap_df_single.tail() mutants_double = wgregseq.mutations_det(lacUV5_purR_overlap, mut_per_seq=2, site_start=12, site_end=28) overlap_df_double = pd.DataFrame({"seq":mutants_double}) overlap_df_double["description"] = "lacUV5 purR overlap double mutant" overlap_df_double.tail()Combine them allpurR_df = pd.concat([downstream_df_single, downstream_df_double, overlap_df_single, overlap_df_double], ignore_index=True) purR_df.tail() purR_df.to_csv("purR_twist_order.csv")Import DataRead in the csv containing the merged data from the weather API and Nest data.data = pd.read_csv('merged_data.csv') data.head(10)Clean the DataRemove unnecessary columns with lots of missing values and check for ways to fix missing values (set to zero, median, mean etc.# update NA values to zero for certain columns data['precipAccumulation'] = data['precipAccumulation'].fillna(0) # if precipitation accumulation is NAN assume 0 data['precipIntensity'] = data['precipIntensity'].fillna(0) # if precipitation intensity is NAN assume 0 data['nearestStormBearing'] = data['nearestStormBearing'].fillna(0) # drop precipType since it is redundant to icon or summary column data.drop(['precipType'], axis = 1, inplace = True) data.drop(['precipIntensityError'], axis = 1, inplace = True) #data['precipType'] = data['precipType'].fillna('None') # if precipitation intensity is NAN assume 0 data.head(10)Convert Categorical Data to NumericalIn order to properly train a model we want to convert all of our dataframe columns that hold categorical data (ex. rainy, cloudy etc) into numerical data. There are several techniques to do so. The one I will be using is one hot encoding. This technique makes sure each different categorical value is weighted the same which the technique label encoding does not do.One hot encoding adds additional feature columns to the dataset.# summary column one hot encoding summary_encode = pd.get_dummies(data.summary, prefix='summary') # icon column one hot encoding icon_encode = pd.get_dummies(data.icon, prefix='icon') icon_encode.head() # concat the one hot encoded columns onto our data and drop the original column data = pd.concat([data, summary_encode], axis=1) data = pd.concat([data, icon_encode], axis=1) data.drop(['summary'], axis = 1, inplace=True) data.drop(['icon'], axis = 1, inplace=True) data.head()Data ExplorationLet's take a quick glance at the properties of our dataset in more detail.# let's replace all our empty string cells with NaN and then drop them data.replace('', np.nan, inplace=True) data = data.dropna(axis = 0) data.describe().transpose()Splitting the DataWill split the data into 70% training data, 15% for validation,and 15% for testing.data.drop(['time'], axis = 1, inplace = True) # was getting strange NAN values within these two columns (not sure why, need to look into it) data.drop(['summary_Rain'], axis = 1, inplace = True) data.drop(['summary_Light Snow'], axis = 1, inplace = True) column_indices = {name: i for i, name in enumerate(data.columns)} size = len(data) train_df = data[0:int(size * 0.7)] val_df = data[int(size * 0.7):int(size * 0.85)] test_df = data[int(size * 0.85):] num_features = data.shape[1]Normalizing the DataNormalizing the data is a useful step to bring all the features into similar ranges of value so that one doesn't have a greater impact on the data then the others.For normalization let's use the technique of subtracting the mean and dividing by the standard deviation of each feature. **This is commonly known as Z-score normalization.**The mean and standard deviation should only be computed using the training data so that the models have no access to the values in the validation and test sets.data.head() train_mean = train_df.mean() train_std = train_df.std() train_df = (train_df - train_mean) / train_std val_df = (val_df - train_mean) / train_std test_df = (test_df - train_mean) / train_std df_std = (data - train_mean) / train_std df_std = df_std.melt(var_name='Column', value_name='Normalized') plt.figure(figsize=(12, 6)) ax = sns.violinplot(x='Column', y='Normalized', data=df_std) _ = ax.set_xticklabels(data.keys(), rotation=90)Might want to look into removing the features with the long wicks. Window GenerationWe need to generate a window that we will make a prediction based off of and that will slide through our data.The models in this tutorial will make a set of predictions based on a window of consecutive samples from the data.class WindowGenerator(): def __init__(self, input_width, label_width, shift, train_df=train_df, val_df=val_df, test_df=test_df, label_columns = None): # store data self.train_df = train_df self.val_df = val_df self.test_df = test_df # Work out the label column indices. self.label_columns = label_columns if label_columns is not None: self.label_columns_indices = {name: i for i, name in enumerate(label_columns)} self.column_indices = {name: i for i, name in enumerate(train_df.columns)} # Work out the window parameters. self.input_width = input_width self.label_width = label_width self.shift = shift self.total_window_size = input_width + shift self.input_slice = slice(0, input_width) self.input_indices = np.arange(self.total_window_size)[self.input_slice] self.label_start = self.total_window_size - self.label_width self.labels_slice = slice(self.label_start, None) self.label_indices = np.arange(self.total_window_size)[self.labels_slice] def __repr__(self): return '\n'.join([ f'Total window size: {self.total_window_size}', f'Input indices: {self.input_indices}', f'Label indices: {self.label_indices}', f'Label column name(s): {self.label_columns}']) # split the features and labels for each sequence properly def split_window(self, features): inputs = features[:, self.input_slice, :] labels = features[:, self.labels_slice, :] if self.label_columns is not None: labels = tf.stack( [labels[:, :, self.column_indices[name]] for name in self.label_columns], axis=-1) # Slicing doesn't preserve static shape information, so set the shapes # manually. This way the `tf.data.Datasets` are easier to inspect. inputs.set_shape([None, self.input_width, None]) labels.set_shape([None, self.label_width, None]) return inputs, labels def make_dataset(self, data): data = np.array(data, dtype=np.float32) ds = tf.keras.preprocessing.timeseries_dataset_from_array( data=data, targets=None, sequence_length=self.total_window_size, sequence_stride=1, shuffle=True, batch_size=32,) ds = ds.map(self.split_window) return ds @property def train(self): return self.make_dataset(self.train_df) @property def val(self): return self.make_dataset(self.val_df) @property def test(self): return self.make_dataset(self.test_df) # example window window = WindowGenerator(input_width = 8, label_width = 1, shift = 1, label_columns = ['avg(temp)']) window # ability to loop through data and windows for example_inputs, example_labels in window.train.take(1): print(f'Inputs shape (batch, time, features): {example_inputs.shape}') print(f'Labels shape (batch, time, features): {example_labels.shape}')Inputs shape (batch, time, features): (32, 8, 46) Labels shape (batch, time, features): (32, 1, 1)LSTM ModelBuilding my LSTM model.MAX_EPOCHS = 20 def compile_and_fit(model, window, patience=2): early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, mode='min') model.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(), metrics=[tf.metrics.MeanAbsoluteError()]) history = model.fit(window.train, epochs=MAX_EPOCHS, validation_data=window.val, callbacks=[early_stopping]) return history val_performance = {} performance = {} lstm_model = tf.keras.models.Sequential([ # Shape [batch, time, features] => [batch, time, lstm_units] tf.keras.layers.LSTM(32, return_sequences=True), # Shape => [batch, time, features] tf.keras.layers.Dense(units=1) ]) history = compile_and_fit(lstm_model, window) val_performance['LSTM'] = lstm_model.evaluate(window.val) performance['LSTM'] = lstm_model.evaluate(window.test, verbose=0) performance['LSTM'] val_performance['LSTM']Stacking Windows (maybe useful)# Stack three slices, the length of the total window: example_window = tf.stack([np.array(train_df[:window.total_window_size]), np.array(train_df[200:200+window.total_window_size]), np.array(train_df[400:400+window.total_window_size])]) example_inputs, example_labels = window.split_window(example_window) print('All shapes are: (batch, time, features)') print(f'Window shape: {example_window.shape}') print(f'Inputs shape: {example_inputs.shape}') print(f'labels shape: {example_labels.shape}')All shapes are: (batch, time, features) Window shape: (3, 9, 48) Inputs shape: (3, 8, 48) labels shape: (3, 1, 1)Investigation One Univariate State AssignmentThe dataset in this application is from a temperature sensor on an industrial cooling water flow. Readings are in degrees Celsius. The data is read on a two second interval, and indexed $0,.., n$ as required by __trcrpm__.The $3000$ samples show mostly normal/expected readings from the sensor. Between $1500$ and $2000$ the readings fluctuate rapidly - this fluctuation is due to aging/corroding contacts in the signal path.The investigation is to determine whether __trcrpm__ is a valuable tool for identifying signal anomalies without an advance description of the signal characteristics, or some model of its dynamics.This code is based on the trcrpm tutorials. It is included to demonstrate the basic functionality of the code base, as well as to highlight intuitively, the trcrpm algorithm.# the required python libraries imported import pandas as pd import matplotlib import matplotlib.pyplot as plt import numpy as np from trcrpm import TRCRP_Mixture # indicates to jupyer how the plots are to be displayed and sized %matplotlib inline plt.rcParams['figure.figsize'] = [15, 5] # Read in the data from a CSV data_clean = pd.read_csv("../data/data.csv", index_col=0) data = data_clean # Plot the data fig, axes = plt.subplots(nrows=len(data.columns)) fig.suptitle('Cooling Water Flow Temperature Sensor Readings with Anomaly', fontsize=16) index = list(data.columns).index(data.columns[0]) x_observed = data.index y_observed = data.loc[:,data.columns[0]] axes.plot(x_observed, y_observed, label=data.columns[0], color='k', linewidth=1) axes.legend(loc='upper left', handletextpad=0) # Provide entropy ~ number of time series rng = np.random.RandomState(1) # Create TRCRP_Mixture model object, with 8 MCMC chains, and a time dependency over the previous 200 samples model = TRCRP_Mixture(chains=8, lag=10, variables=data.columns, rng=rng) # Incorporate the data set into the model model.incorporate(data) # Run MCMC inference the latent state for #steps full Gibbs sweeps through all kernels model.resample_all(steps=10) # Run empirical Bayes on variable hyperparameters for full Gibbs sweeps through all kernels model.resample_hyperparameters(steps=10) # Set up an index/grid, and number of samples desire per index/grid point to run a simulation probes = model.dataset.index numsamples = 20 # Generate predictions from the posterior distribution. samples = model.simulate(probes, model.variables, numsamples) # Helper function to visualize state sequence changes in the model def plot_latent_state_sequence(timesteps, values, states, ax): # sanity check assert len(timesteps) == len(states) # get unique state labels unique = sorted(set(states)) # plot states colors = matplotlib.cm.Set1(np.linspace(0, 1, len(unique))) y_low, y_high = ax.get_ylim() y_mid = np.mean([y_low, y_high]) y_height = 0.05 * (y_high - y_low) for state, color in zip(unique, colors): xs = timesteps[states==state] for x in xs: ax.fill_between([x-1, x], [y_mid-y_height]*2, [y_mid+y_height]*2, alpha=0.3, color=color) # Helper function to plot model predictions from the posterior def plot_predictions(simulations, variable, ax, states_from_chain=None): index = model.variables.index(variable) # Plot the observed data. x_observed = model.dataset.index y_observed = model.dataset.loc[:,variable] ax.plot(x_observed, y_observed, label=variable, color='k', linewidth=1) # Plot 25--75 percentile bands around the simulated data. samples = simulations[:,:,index] ax.fill_between( probes, np.percentile(samples, 25, axis=0), np.percentile(samples, 75, axis=0), color='gray', alpha=0.5) # Optionally plot latent temporal state at each timepoint, # according to a given chain in the model. if states_from_chain is not None: assert 0 <= states_from_chain < model.chains # Get a list of states attributed to each sample states = model.get_temporal_regimes(variable)[states_from_chain] plot_latent_state_sequence(x_observed, y_observed, states, ax) # Add the legend. ax.legend(loc='upper left', handletextpad=0) # Plot posterior predictive draws fig, axes = plt.subplots(nrows=len(data.columns)) fig.suptitle('Posterior Predictive Draws', fontsize=16) plot_predictions(samples, data.columns[0], axes) axes.set_xlim([min(probes), max(probes)]) # Plot the model States as they change chain = 0 samples_chain = samples[numsamples*chain:numsamples*(chain+1)] fig, axes = plt.subplots(nrows=1) fig.suptitle('Model States', fontsize=16) plot_predictions(samples_chain,data.columns[0], axes, states_from_chain=chain) axes.set_xlim([min(probes), max(probes)])Word2Vec SAT Load the SAT analogies data and the pruned Word2Vec model (based on GoogleNews-vectors-negative300.bin.gz from https://code.google.com/archive/p/word2vec/), but with only the words that appear in the analogies saved.import numpy import json from scipy.spatial.distance import cosine # Load the model with open("./vectors.json", "r") as f: model = json.load(f) # Load the analogies with open("./analogies.json", "r") as f: analogies = json.load(f) # Convert the model to use numpy vectors for key in model: model[key] = numpy.array(model[key]) def make_vector(word1, word2): try: return model[word1] - model[word2] except KeyError: return None def cosine_dist(vec1, vec2, digits=3): return round(cosine(vec1, vec2), digits) def print_results(key_words, right_words, wrong_words_list): results = [] key_vector = make_vector(*key_words) right_vector = make_vector(*right_words) # Fail if we can't find the word in our model if key_vector is None or right_vector is None: return results.append((cosine_dist(key_vector, right_vector), right_words, True)) for wrong_words in wrong_words_list: wrong_vector = make_vector(*wrong_words) # Fail if we can't find the word if wrong_vector is None: return results.append((cosine_dist(key_vector, wrong_vector), wrong_words, False)) results.sort() table = [("{} : {}".format(*key_words), "Distance")] for dist, (w1,w2), is_answer in results: str_dist = str(dist) if is_answer: string = "**{} : {}**".format(w1,w2) str_dist = "**{}**".format(str_dist) else: string = "{} : {}".format(w1,w2) table.append((string, str_dist)) return table # Make the tables for the blog, and also figure out how well we did from tabulate import tabulate result_place = [] for analogy in analogies: key_words = analogy["key"][0] right_words = analogy["right"][0] wrong_words_list = analogy["wrong"] table = print_results(key_words, right_words, wrong_words_list) if table is not None: print tabulate(table, headers="firstrow", tablefmt="pipe") print "" # What spot in this list is the right answer? for place,contents in enumerate(table): words = contents[0] if '**' in words: result_place.append(place) break| paltry : significance | Distance | |:------------------------|:-----------| | austere : landscape | 0.803 | | redundant : discussion | 0.829 | | **banal : originality** | **0.861** | | oblique : familiarity | 0.895 | | opulent : wealth | 0.984 | | runner : marathon | Distance | |:----------------------|:-----------| | referee : tournament | 0.939 | | **oarsman : regatta** | **0.945** | | martyr : massacre | 0.948 | | envoy : embassy | 0.979 | | horse : stable | 1.026 | | medicine : illness | Distance | |:------------------------|:-----------| | stimulant : sensitivity | 0.914 | | **law : anarchy** | **0.924** | | etiquette : discipline | 1.009 | | love : treason | 1.019 | | hunger : thirst | 1.157 | | paltry : significance | Distance | |:------------------------|:-----------| | austere : landscape | 0.803 | | redundant : discussion |[...]So how did Word2Vec do? Not so well...%matplotlib inline import matplotlib.pyplot as plt # Set the plot size WIDTH = 14 HEIGHT = 8 plt.figure(figsize=(WIDTH, HEIGHT)) # Plot the histogram plt.hist(result_place, bins=[1, 2, 3, 4, 5, 6], normed=True, histtype="stepfilled", align="left") # Set axis labels FONTSIZE="xx-large" plt.xlabel('Rank of Correct Answer', fontsize=FONTSIZE) plt.ylabel('Percent of Analogies', fontsize=FONTSIZE) plt.title("Word2Vec Results on SAT Analogies", fontsize=FONTSIZE) # Save and show the figure plt.savefig("/tmp/analogies_ranking.png", bbox_inches='tight') plt.savefig("/tmp/analogies_ranking.svg", bbox_inches='tight') plt.show()Conversational AIThink about how often you communicate with other people through instant messaging, social media, email, or other online technologies. For many of us, it's our go-to form of contact. When you have a question at work, you might reach out to a colleague using a chat message, which you can use on mobile devices, so you're always in touch.![A human and a robot having a conversation](./images/conversational_ai.jpg)Bots are AI agents that communicate using these kinds of channels, enabling a natural, conversational engagement with software services. Create a QnA Maker Knowledge BaseFor customer support scenarios, it's common to create a bot that can interpret and answer frequently asked questions through a website chat window, email, or voice interface. Underlying the bot interface is a knowledge base of questions and appropriate answers that the bot can search for suitable responses.The QnA Maker service is a cognitive service in Azure that enables you to quickly create a knowledge base, either by entering question and answer pairs or from an existing document or web page. It can then use some built-in natural language processing capabilities to interpret questions and find appropriate answers.1. Open another browser tab and go to the QnA Maker portal at https://qnamaker.ai. Sign in using the Microsoft account associated with your Azure subscription.2. In the QnA Maker portal, select **Create a knowledge base**.3. If you haven't previously created a QnA service resource, select **Create a QnA service**. The Azure portal will be opened in another tab so you can create a QnA Maker service in your subscription. Use the following settings: - **Managed (preview)**: Not selected. - **Subscription**: *Your Azure subscription* - **Resource group**: *Select an existing resource group or create a new one* - **Name**: *A unique name for your QnA resource* - **Pricing tier**: F0 - **Azure Search pricing tier**: F - **Azure Search location**: *Any available location* - **App name**: _Same as **Name** (".azurewebsites.net" will be appended automatically)_ - **Website location**: _Same as **Azure Search location**_ - **App insights**: Disable> **Note**: If you have already provisioned a free-tier **QnA Maker** or **Azure Search** resources, your quota may not allow you to create another one. In which case, select a tier other than **F0** / **F**.4. Wait for the deployment of the QnA Service and related resources to complete in the Azure portal.5. Return to the QnA Maker portal tab, and in the **Step 2** section, click **Refresh** to refresh the list of available QnA service resources.6. Connect your QnA service to your KB by selecting the following options: - **Microsoft Azure Directory ID**: *The Azure directory ID for your subscription* - **Azure subscription name**: *Your Azure subscription* - **Azure QnA service**: *The QnA service resource you created in the previous step* - **Language**: English (!) Check InIf a message stating that the role does not have permission to perform the action is displayed, refresh the browser page for the QnA Maker portal.7. In the **Step 3** section, enter the name **Margie's Travel KB**.8. In the **Step 4** section, in the **URL** box, type *https://github.com/MicrosoftDocs/ai-fundamentals/raw/master/data/qna_bot/margies_faq.docx* and click **+ Add URL**. Then under **Chit-chat**, select **Professional**.9. In the **Step 5** section, click **Create your KB**.10. Wait for a minute or so while your Knowledge base is created. Then review the questions and answers that have been imported from the FAQ document and the professional chit-chat pre-defined responses. Edit the Knowledge BaseYour knowledge base is based on the details in the FAQ document and some pre-defined responses. You can add custom question-and-answer pairs to supplement these.1. Click **+ Add QnA pair**.2. In the **Question** box, type `Hello`. Then click **+ Add alternative phrasing** and type `Hi`.3. In the **Answer** box, type `Hello`. Train and Test the Knowledge BaseNow that you have a knowledge base, you can test it in the QnA Maker portal.1. At the top right of the page, click **Save and train** to train your knowledge base. You may need to maximize your window to see the button.2. After training has completed, click **← Test** to open the test pane.3. In the test pane, at the bottom enter the message *Hi*. The response **Hello** should be returned.4. In the test pane, at the bottom enter the message *I want to book a flight*. An appropriate response from the FAQ should be returned.5. When you're done testing the knowledge base, click **→ Test** to close the test pane. Create a Bot for the Knowledge BaseThe knowledge base provides a back-end service that client applications can use to answer questions through some sort of user interface. Commonly, these client applications are bots. To make the knowledge base available to a bot, you must publish it as a service that can be accessed over HTTP. You can then use the Azure Bot Service to create and host a bot that uses the knowledge base to answer user questions.1. At the top of the QnA Make page, click **Publish**. Then in the **Margies Travel KB** page, click **Publish**.2. After the service has been deployed, click **Create Bot**. This opens the Azure portal in a new browser tab so you can create a Web App Bot in your Azure subscription.3. In the Azure portal, create a Web App Bot with the following settings (most of these will be pre-populated for you): - **Bot handle**: *A unique name for your bot* - **Subscription**: *Your Azure subscription* - **Resource group**: *The resource group containing your QnA Maker resource* - **Location**: *The same location as your QnA Maker service*. - **Pricing tier**: F0 - **App name**: *Same as the **Bot handle** with *.azurewebsites.net* appended automatically - **SDK language**: *Choose either C or Node.js* - **QnA Auth Key**: *This should automatically be set to the authentication key for your QnA knowledge base* - **App service plan/location**: *This should be set automatically to a suitable plan and location* - **Application Insights**: Off - **Microsoft App ID and password**: Auto create App ID and password.4. Wait for your bot to be created (the notification icon at the top right, which looks like a bell, will be animated while you wait). Then in the notification that deployment has completed, click **Go to resource** (or alternatively, on the home page, click **Resource groups*, open the resource group where you created the web app bot, and click it.)5. In the blade for your bot, view the **Test in Web Chat** page, and wait until the bot displays the message **Hello and welcome!** (it may take a few seconds to initialize).6. Use the test chat interface to ensure your bot answers questions from your knowledge base as expected. For example, try submitting *I need to cancel my hotel*. Access the Bot through a ChannelA bot can be used to provide an interface for users through one or more *channels*. For example, the same bot could support interactions through a web chat interface, email, and Microsoft Teams.1. In the Azure portal, in the blade for your bot, view the **Channels** page.2. Note that the **Web Chat** channel has been aded automatically, and that other channels for common communication platforms are available.3. Next to the **Web Chat** channel, click **Edit**. This opens a page with the settings you need to embed your bot in a web page. To embed your bot, you need the HTML embed code provided as well as one of the secret keys generated for your bot.4. Copy the **Embed code** and paste it into the cell below, replacing the comment ``.5. Click **Show** for one of your secret keys (it doesn't matter which one), and copy it. Then paste it in your HTML embed code below, replacing `YOUR_SECRET_HERE`.6. Change the **min-height** value in your HTML code to **200px** (instead of the default 500px). This will help ensure the HTML interface is visible without scrolling.7. Run the cell below by clicking the **Run cell** (&9655;) button on the left of the cell to render the HTML.8. In the HTML chat interface, test the bot by submitting a question, such as *Who is Margie?* or *What destinations can I go to?* (when the bot initializes, it may respond with the message *Hello and welcome* in addition to answering your question.).%%html pYPK1pYPK1 is a version of [pYPK0](pYPK0.ipynb) that has a CEN6/ARS yeast origin of replicationinstead of the 2µ origin of replication present in pYPK0.The URA3 marker is substituted for a bleomycin marker.from pydna.all import * pMEC1042 =read("pMEC1042.gb") gb =Genbank("") pCAPs = gb.nucleotide("AJ001614.1") from Bio.Restriction import PvuI, BamHI, EcoRI pCAPs_pvu = pCAPs.linearize(PvuI) stuffer, pMEC1042_EB = pMEC1042.cut(BamHI, EcoRI) pMEC1042_EB, stuffer asm=Assembly([pMEC1042_EB, pCAPs_pvu], limit=245) asm candidate = asm.assemble_circular()[0] candidate pYPK1 = candidate.synced(pCAPs) pYPK1.stamp() pYPK1.locus = "pYPK1" pYPK1.write("pYPK1.gb")[DOWNLOAD](pYPK1.gb)from pydna.all import * reloaded=read("pYPK1.gb") assert reloaded.cseguid() in reloaded.definitionWeek 2--- Basic SyntaxThis is where I tell you all hope is not lost if you don't understand this.# this is a comment, it will be ignored when this runs other_answer = "answer" answer = "other_answer" print(answer) other_answer = "answer" answer = "other_answer print(answer)Basic Functions# printing nothing print() # multiple arguments x = "Dave" y = "Thomas" print(x, y) # foreign characters work as well π = 3.14159 print(π) # keyword arguments print("This is the first group of text", end="") print("This is the second group of text") # putting quotes in strings using " inside of ' print(' is really "awesome"') # putting quotes in strings using special backslash escape character print(" is really \"awesome\"") 3.14159 This is the first group of textThis is the second group of text is really "awesome" Professor Thomas is really "awesome"Conditionsx = False y = False z = "Luck" if (x == True and y == True) or z == "Duck": print('Bingo!') else: print('Not bingo!')Not bingo!The workflow combines workflows in build_database_05.ipynb and prediction_03.ipynb workflows to test our latest model (CaCO3) on the core PS75-056-1. This core only has CaCO3 as the bulk chemistry.import numpy as np import pandas as pd import glob import matplotlib.pyplot as plt #plt.style.use('ggplot') plt.style.use('seaborn-colorblind') plt.style.use('dark_background') plt.rcParams['figure.dpi'] = 300 plt.rcParams['savefig.dpi'] = 300 plt.rcParams['savefig.bbox'] = 'tight' plt.rcParams['savefig.transparent'] = True %matplotlib inline import datetime date = datetime.datetime.now().strftime('%Y%m%d')Read and build spectral datasetThe core has the same data format as the usual one.file_name = [] spe_all = [] depth_all = [] cps_all = [] core_all = [] s_depth_all = [] # only read the 10kV which having better signal to the light elements spe_dir = glob.glob('data/PS75-056-1/Run 1 at 10kV/*.spe') # make sure the order follows the depthes in filename spe_dir.sort() for spe in spe_dir: check_depth = spe.split()[3].split('_')[-1] # there are some inconsistencies in nameing...as usual # 5 and 6 digis means in mm if len(check_depth) >= 5: start_depth = int(check_depth) # 3 and 4 digit means in cm, needs to be multipled to be mm elif len(check_depth) >= 3: start_depth = int(check_depth) * 10 file_name.append(spe.split('/')[-1]) with open(spe, 'r') as f: content = [] lines = f.readlines() for line in lines[49:]: content = np.hstack((content, line.split())) section_depth = int(lines[13][:-3]) spe_all.append(content.astype(int)) cps_all.append(int(lines[28])) core_all.append('PS75-056-1') s_depth_all.append(section_depth) depth_all.append(section_depth + start_depth) spe_df = pd.DataFrame(spe_all, columns = [str(_) for _ in range(2048)]) spe_df['cps'] = cps_all spe_df['core'] = core_all spe_df['composite_depth_mm'] = depth_all spe_df['section_depth_mm'] = s_depth_all spe_df['filename'] = file_name spe_df spe_df[spe_df.isnull().any(axis=1)]Build composite_idspe_df.composite_depth_mm.max() composite_id = [] for core, depth in zip(spe_df.core, spe_df.composite_depth_mm): composite_id.append('{}_{:05}'.format(core, depth)) spe_df['composite_id'] = composite_id spe_df = spe_df.set_index('composite_id')Drop duplicateslen(spe_df.drop_duplicates('composite_id', keep = 'last'))There is no duplicates because the length is the same. Build sectionsection_all = [] # make sure the order follows the core and composite depth spe_df.sort_values(by = 'composite_id', axis = 0, inplace = True) # I assume every core scanned from section 0 so the first section in the core is marked as section 0 # the deeper the larger number section = 0 X = spe_df['section_depth_mm'] for i in range(len(X)): section_all.append(section) try: # when section changes, the section depth should be rest to smaller number if X[i] > X[i + 1]: section += 1 except IndexError: print('bottom of the core.') spe_df['section'] = section_all spe_dfRead bulk chemistrybulk_df = pd.read_table('data/Bulk chem/PS75-56_bulkCaCO3 (%)', skiprows=1) bulk_df['depth m'] = bulk_df['depth m'] * 1000 bulk_df.columns = ['mid_depth_mm', 'CaCO3%'] # modify headers to match previous bulk datasets bulk_df['core'] = ['PS75-056-1' for _ in range(len(bulk_df))] bulk_df = bulk_df[['CaCO3%', 'core', 'mid_depth_mm']]# modify orders to match previous bulk datasets bulk_df bulk_df.isna().any()Merge spe and bulk datasetsmask_c = spe_df.columns[:2048] # only the channels merge_df = pd.DataFrame() for index, row in bulk_df.iterrows(): mid = row['mid_depth_mm'] # get the spe in 10 mm interval mask_r = (spe_df.composite_depth_mm >= (mid-5)) & (spe_df.composite_depth_mm <= (mid+5)) merge_df = pd.concat( [merge_df, spe_df.loc[mask_r, mask_c].apply(np.mean, axis = 0).append(row)], axis = 1 ) merge_df = merge_df.T.reset_index(drop = True) merge_dfCheck rows having nan in any columnmerge_df[merge_df.isnull().any(axis = 1)]Good, no nan in the dataset. Export datasetThis dataset combines the merged datasets of core PS75-056-1 and only has CaCO3 measurements.merge_df.to_csv('data/spe+bulk_PS75-056-1_{}.csv'.format(date)) bulk_df.to_csv('data/bulk_PS75-056-1_{}.csv'.format(date)) spe_df.to_csv('data/spe_PS75-056-1_{}.csv'.format(date)) print(date)20210601Test the accuracy#merge_df = pd.read_csv('data/spe+bulk_PS75-056-1_20210601.csv', index_col=0) X = merge_df.iloc[:, :2048].values X = X / X.sum(axis = 1, keepdims = True) from joblib import load m_caco3 = load('models/caco3_nmf+svr_model_20201216.joblib')Predictfrom sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import max_error y_caco3 = np.exp(m_caco3.predict(X)) print('Scores in the test set:') print('R2 = {:.3f} .'.format(r2_score(merge_df['CaCO3%'], y_caco3))) print('The mean absolute error is {:.3f} (%, concetration).'.format(mean_absolute_error(merge_df['CaCO3%'], y_caco3))) print('The max. residual error is {:.3f} (%, concetration).'.format(max_error(merge_df['CaCO3%'], y_caco3))) plt.plot(range(len(y_caco3)), merge_df['CaCO3%'], alpha=0.6, label='Measurement') plt.plot(range(len(y_caco3)), y_caco3, label='Prediction (R$^2$={:.2f})'.format(r2_score(merge_df['CaCO3%'], y_caco3))) #plt.text(12, -7, r'R$^2$={:.2f}, mean ab. error={:.2f}, max. ab. error={:.2f}'.format(grid.best_score_, mean_absolute_error(y_ttest, y_predict), max_error(y_ttest, y_predict))) plt.ylabel('CaCO$_3$ concentration (%)') plt.xlabel('Sample no.') plt.legend(loc = 'upper right') plt.savefig('results/caco3_predictions_PS75-056-1_{}.png'.format(date)) plt.hist(merge_df['CaCO3%']) plt.xlabel('CaCO${_3}$ (wt%) ') plt.ylabel('Count') plt.savefig('results/caco3_hist_PS75-056-1_{}.png'.format(date)) plt.plot(range(520), X.max(axis=0)[:520], label='Max.') plt.plot(range(520), X.min(axis=0)[:520], label='Min.') fig, ax = plt.subplots(1, 1) ax.plot([0, 80], [0, 80], ls='--', c='gray') ax.scatter(merge_df['CaCO3%'], y_caco3, s=6) ax.set_aspect('equal') ax.set_xlabel('Measurement (wt%) ') ax.set_ylabel('Prediction (wt%) ') fig.savefig('results/caco3_predictions_PS75-056-1_2_{}.png'.format(date))Draw spectrumpre_df = pd.read_csv('data/spe+bulk_dataset_20201215.csv', index_col=0) pre_df.core.unique() from sklearn.model_selection import train_test_split X_p = pre_df.iloc[:, : -5].values X_p = X_p / X_p.sum(axis = 1, keepdims = True) SO264_core = pre_df.core.unique()[:-3] s1_X_train, _ = train_test_split(X_p[pre_df.core.isin(SO264_core)], test_size = 0.2, shuffle = True, random_state = 24) s2_ca_train, _ = train_test_split(X_p[pre_df.core.isin(['LV28-44-3', 'LV29-114-3'])], test_size = 0.2, shuffle = True, random_state = 24)The sum of carbonate and TOC training data amounts fit to the histogram, respectively. I don't have enough time to fine-tune a figure for all these spectra in different steps and analytes so I plot the figure of the 1st+2nd scale ups only for now.fig, axes = plt.subplots(1, 2, figsize=(7.2, 3), sharex='row', sharey='row') axes[0].plot(range(2048), np.vstack((s1_X_train, s2_ca_train)).max(axis=0), label='Max.') axes[0].plot(range(2048), np.vstack((s1_X_train, s2_ca_train)).min(axis=0), label='Min.') axes[0].set_xlim(0, 520) axes[0].set_ylabel('Normalized intensity') axes[0].set_xlabel('Channel, Training data') axes[1].plot(range(2048), X.max(axis=0), label='Max.') axes[1].plot(range(2048), X.min(axis=0), label='Min.') axes[1].set_xlabel('Channel, PS75-056-1') axes[1].legend() fig.subplots_adjust(wspace=.04) plt.savefig('results/spectra_comparison_1_{}.png'.format(date)) print(date) plt.plot(range(2048), np.vstack((s1_X_train, s2_ca_train)).mean(axis=0), label='Training data') plt.plot(range(2048), X.mean(axis=0), label='PS75-056-1') plt.xlim(0, 520) plt.ylabel('Normalized intensity') plt.xlabel('Channel') plt.legend() plt.suptitle('Averaged spectra') plt.subplots_adjust(top=.92) plt.savefig('results/spectra_comparison_2_{}.png'.format(date)) print(date)20210603Cluster same word structure based on POS and Entitiesbahdanau_entities = malaya.entity.deep_model('bahdanau') bahdanau_pos = malaya.pos.deep_model('bahdanau') result_entities = bahdanau_entities.predict(string) result_pos = bahdanau_pos.predict(string) from malaya.cluster import cluster_words, pos_entities_ngram generated_grams = pos_entities_ngram( result_pos, result_entities, ngram = (1, 3), accept_pos = ['NOUN', 'PROPN', 'VERB'], accept_entities = ['law', 'location', 'organization', 'person', 'time'], ) generated_grams cluster_words(generated_grams)Cluster POS and Entitiesfrom malaya.cluster import cluster_pos, cluster_entities cluster_pos(result_pos) cluster_entities(result_entities)Generate ngramsfrom malaya.cluster import sentence_ngram sentence_ngram(string, ngram = (3, 5))Module 5- Other Forms of Visualizationauthor: In this module, I will be desmonstrating a few other graphical capabilities in Python. As of July 2016, this module contains examples for a pie-chart and a table.I will be using the following link to create a table and pi chart:https://radwatch.berkeley.edu/sites/default/files/pictures/rooftop_tmp/weather.csvThis module is also the first time I will use numpy. Numpy provides functions that perform common numerical techniques such as mean, median, and mode. It aleo enables multi-dimensional data arrays: allowing Python to deal with vector computation, matrix arithmetic, and a variety of other numerical manipulations.%matplotlib inline import csv import io import urllib.request import matplotlib.pyplot as plt from datetime import datetime import numpy as np url = 'https://radwatch.berkeley.edu/sites/default/files/pictures/rooftop_tmp/weather.csv' response = urllib.request.urlopen(url) reader = csv.reader(io.TextIOWrapper(response)) timedata = [] Bi214 = [] K40 = [] Cs134 = [] Cs137 = [] line = 0 for row in reader: if line != 0: timedata.append(datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S')) Bi214.append(float(row[1])) K40.append(float(row[2])) Cs134.append(float(row[3])) Cs137.append(float(row[4])) line += 1The following module creates a pi chart that illustrates the breakdown of which isotope is measured to have the highest concentration during the given interval of our data.In order to do this, we start with an for-loop script that tallies which isotope has the highest concentration measurement at each index until it reaches the end of the data:def pie_chart(activity1, activity2, activity3, activity4, labels): tally = [0,0,0,0] for i in range(0,len(bi214)-1): comparing_list = [bi214[i],k40[i], cs134[i], cs137[i]] if max(comparing_list) == bi214[i]: tally[0] += 1 elif max(comparing_list) == k40[i]: tally[1] += 1 elif max(comparing_list) == cs134[i]: tally[2] += 1 else: tally[3] += 1 # plt.pie places the items counter-clockwise total_counts = sum(tally) fracs = [tally[0]/total_counts, tally[1]/total_counts, tally[2]/total_counts, tally[3]/total_counts] explode = (0,0,0,0) # explode refers to "pulling out" certain items for exaggerated effect plt.pie(fracs, explode=explode, labels=labels, autopct='%1.1f%%') # 'autopct='1.1f%%' displays percent value for each slice plt.axis('equal') # set the pie chart against a square axis, otherwise it won't be circular. plt.title('Air Monitor Data:\nBreakdown of Highest Measured Count Rate in Isotopes\nof Interest from %s to %s' %(timedata[0],timedata[-1])) # this title string demonstrates a line break (signified by \n after # isotopes) and string interpolation (represented by %s). plt.show() # a more interesting example pie chart would breakdown total activity in each isotope: activity = [sum(bi214), sum(k40), sum(cs134), sum(cs137)] total_activity = sum(activity) fracs = [activity[0]/total_activity, activity[1]/total_activity, activity[2]/total_activity, activity[3]/total_activity] explode = (0,0,0,0) plt.pie(fracs, explode=explode, labels=labels, autopct='%1.1f%%') plt.axis('equal') plt.title('Air Monitor Data:\nBreakdown of Total Activity by Isotope of Interest\nfrom %s to %s' %(timedata[0],timedata[-1])) plt.show() labels = ['Bi214','K40','Cs134','Cs137'] pie_chart(Bi214, K40, Cs134, Cs137, labels)The following commands will create a table with 4 rows, each containing an isotope (Bi-214, K-40, Cs-134, and Cs-137). There will be 5 columns with the following information: isotope name, mean concentration, median concentration, max concentration measured, and time of occurrence:def table(bi214, k40, cs134, cs137): RowLabel = ("Bi-214", "K-40", "Cs-134", "Cs-137") ColLabel = ("isotope", "mean CPS", "median CPS", "max CPS", "time of occurrence") # The statistical meaning of mean and a computational method to obtain # it are explored in a different module. # For this module, we will use the numpy function 'np.mean' mean_data = (np.mean(bi214), np.mean(k40), np.mean(cs134), np.mean(cs137)) mean_data = np.around(mean_data, decimals=4) # np.around rounds elements in a list to the chosen decimal point. # For median, we will use the numpy function 'np.median' median_data = np.around((np.median(bi214), np.median(k40), np.median(cs134), np.median(cs137)), decimals=4) # Numpy also has a function to scan a list for the max value contained in that list! max_data = np.around((np.max(bi214), np.max(k40), np.max(cs134), np.max(cs137)), decimals=4) # Each max CPS has a corresponding datetime component with the same index. # I will use LIST.index(max(LIST)) to find these corresponding indices. # Note: this method's weakness is that it only identifies the first # occurrence of a maximum; if there the max occurs multiple times it will # not acknowledge them. Can you think/find a way to do this in a better way? time_data = (timedata[Bi214.index(np.max(bi214))], timedata[k40.index(np.max(k40))], timedata[Cs134.index(np.max(cs134))], timedata[cs137.index(np.max(cs137))]) # if you have trouble understanding the previous commands, make # sure you understand each line piece their purpose together! data_array = np.vstack((RowLabel,mean_data,median_data, max_data,time_data)).T # vstack places the lists atop eachother and '.T' transposes the # information so it'll appear as it should on the table. fig, ax = plt.subplots() ax.axis('off') # By default, matplotlib will always plot against axes. # Therefore, if we want a table only, we turn the axis 'off' ax.table(cellText=data_array, colLabels=ColLabel, loc='center') fig.set_size_inches(18, 10) plt.show() table(Bi214, K40, Cs134, Cs137)ResourcesThis depth model architechture is from: https://arxiv.org/abs/1809.04766We will make changes in the model but the APIs will remain same. All model related files will be in src folder.For preprocessing, we all can use this notebook or we can add a python file additionally.img_path = 'examples/ExpKITTI_joint/278_org.png' img_org = np.array(Image.open(img_path)) depth_y_path = 'examples/ExpKITTI_joint/278_depth.png' depth_y = np.array(Image.open(depth_y_path)) # processing orignal depth images assert(np.max(depth_y) > 255) depth_y = depth_y.astype(np.float) / 256.PCApca_img = img_org.reshape(375, -1 ) # Increasing n components will increance explained variance but will decrease our accuracy benefits. pca = PCA(n_components = 64, svd_solver='randomized').fit(pca_img) pca_img = pca.transform(pca_img) print(pca_img.shape ) print("Retained variance", np.sum(pca.explained_variance_ratio_)) img = pca.inverse_transform(pca_img) img = img.reshape(375, 1242, 3)(375, 64) Retained variance 0.9622597384383695Evaluationdef RMSE(target, prediction): return np.sqrt(np.mean((target - prediction)**2)) def measure_duration(img, times = 10): durations = [] for _ in range(times): start = time.time() depth = run(img) end = time.time() durations.append(end - start) return depth, (sum(durations) / times) depth_pca, duration_pca = measure_duration(img, 10) depth_no_pca, duration_no_pca = measure_duration(img_org, 10) diff = duration_no_pca - duration_pca print("Run time diff ", round(diff, 4))Run time diff -0.0016Imporved accuracy but no benefit in processing time :(rmse = RMSE(depth_y, depth_no_pca) pca_rmse = RMSE(depth_y, depth_pca) print('accuracy change %: ',(rmse - pca_rmse)*100/rmse )accuracy change %: 19.899622033804242RMSE and time taken with frame skippingdef runDepthEstimator(start_frame=275, end_frame=300, num_iter=100, isFrameSkippingEnabled=True): prevPredDepth = depth_no_pca prevDepthExists = False runModel = True totalRmse = 0 start = time.time() for img in range(start_frame,end_frame): imgPath = 'examples/kitti_car/0000000'+str(img)+'.png' depthPath = 'examples/kitti_car/depth/0000000'+str(img)+'.png' depth_y = np.array(Image.open(depthPath)) # processing orignal depth images assert(np.max(depth_y) > 255) depth_y = depth_y.astype(np.float) / 256. #Run model everytime if frameSkipping is disabled if(not isFrameSkippingEnabled): runModel = True if(runModel): #Debug statement to identify progress of function print("Running DL model for frame: ", imgPath) imgMap = np.array(Image.open(imgPath)) #Run model and obtain depth of image currDepth, _ = measure_duration(imgMap, num_iter) #Calculate difference in depth maps #If difference is below a threshold, skip the next frame and set flag accordingly if(prevDepthExists and isFrameSkippingEnabled): depthDiff = RMSE(prevPredDepth, currDepth) if(depthDiff <= 1.5): runModel = False else: prevDepthExists = True modelRmse = RMSE(currDepth, depth_y) prevPredDepth = currDepth else: print("Skipping frame ", imgPath) #Calculate rmse with assumed depth(previous frame depth) instead of calculated depth #since frame was skipped modelRmse = RMSE(prevPredDepth, depth_y) runModel = True totalRmse += modelRmse end = time.time() totalTime = end - start totalRmse /= (end_frame - start_frame + 1) return totalTime, totalRmse # Test model time and accuracy with frame skipping enabled timeWithSkip, rmseWithSkip = runDepthEstimator(275, 300, 3, True)Running DL model for frame: examples/kitti_car/0000000275.png Running DL model for frame: examples/kitti_car/0000000276.png Skipping frame examples/kitti_car/0000000277.png Running DL model for frame: examples/kitti_car/0000000278.png Skipping frame examples/kitti_car/0000000279.png Running DL model for frame: examples/kitti_car/0000000280.png Skipping frame examples/kitti_car/0000000281.png Running DL model for frame: examples/kitti_car/0000000282.png Skipping frame examples/kitti_car/0000000283.png Running DL model for frame: examples/kitti_car/0000000284.png Skipping frame examples/kitti_car/0000000285.png Running DL model for frame: examples/kitti_car/0000000286.png Skipping frame examples/kitti_car/0000000287.png Running DL model for frame: examples/kitti_car/0000000288.png Skipping frame examples/kitti_car/0000000289.png Running DL model for frame: examples/kitti_car/0000000290.png Skipping frame examples/kitti_car/0000000291.png Running DL model for frame: exam[...]RMSE and time taken without frame skippingtimeWithoutSkip, rmseWithoutSkip = runDepthEstimator(275, 300, 3, False)Running DL model for frame: examples/kitti_car/0000000275.png Running DL model for frame: examples/kitti_car/0000000276.png Running DL model for frame: examples/kitti_car/0000000277.png Running DL model for frame: examples/kitti_car/0000000278.png Running DL model for frame: examples/kitti_car/0000000279.png Running DL model for frame: examples/kitti_car/0000000280.png Running DL model for frame: examples/kitti_car/0000000281.png Running DL model for frame: examples/kitti_car/0000000282.png Running DL model for frame: examples/kitti_car/0000000283.png Running DL model for frame: examples/kitti_car/0000000284.png Running DL model for frame: examples/kitti_car/0000000285.png Running DL model for frame: examples/kitti_car/0000000286.png Running DL model for frame: examples/kitti_car/0000000287.png Running DL model for frame: examples/kitti_car/0000000288.png Running DL model for frame: examples/kitti_car/0000000289.png Running DL model for frame: examples/kitti_car/0000000[...]Comparing performance of Depth Estimator with and without skippingprint("Time and RMSE values for depth estimator with frame skipping:\n") print("Total time taken in seconds: ", timeWithSkip) print("Root mean squared error value: ", rmseWithSkip) print("\n") print("Time and RMSE values for depth estimator without frame skipping:\n") print("Total time taken in seconds: ", timeWithoutSkip) print("Root mean squared error value: ", rmseWithoutSkip) print("\n") timeSaved = (float(timeWithoutSkip - timeWithSkip)/timeWithoutSkip) * 100 print("Improvement in time with frame skipping (in percentage):", timeSaved) rmseChange = (float(rmseWithSkip - rmseWithoutSkip)/rmseWithSkip) * 100 print("RMSE change between with and without frame skipping (in percentage):", rmseChange)Time and RMSE values for depth estimator with frame skipping: Total time taken in seconds: 6.588036775588989 Root mean squared error value: 25.119838235318262 Time and RMSE values for depth estimator without frame skipping: Total time taken in seconds: 10.0713210105896 Root mean squared error value: 25.115423202954666 Improvement in time with frame skipping (in percentage): 34.58617028826778 RMSE change between with and without frame skipping (in percentage): 0.01757587896162718Plotplt.figure(figsize=(36, 24)) plt.subplot(131) plt.imshow(img_org) plt.title('orig img') plt.axis('off') plt.subplot(132) plt.imshow(depth_y, cmap='plasma', vmin=MIN_DEPTH, vmax=MAX_DEPTH) plt.title('KITTI Orignal depth map') plt.axis('off') plt.subplot(133) plt.imshow(depth_pca, cmap='plasma', vmin=MIN_DEPTH, vmax=MAX_DEPTH) plt.title('Predicted depth') plt.axis('off');Data preparation HyperparametersLEARNING_RATE = 0.001 BATCH_SIZE = 32data loadingl = np.load('./env_set/dataset.npz') train_input = l['train_input'] train_label = l['train_label'] test_input = l['test_input'] test_label = l['test_label'] MAXS = l['MAXS'] MINS = l['MINS'] TIME_STEPS = l['TIME_STEPS'] OUTPUT_SIZE = l['OUTPUT_SIZE'] NUM_FEATURES = train_input.shape[-1] print(train_input.shape) print(train_label.shape) print() print(test_input.shape) print(test_label.shape) trainset = np.concatenate([train_input, train_label], axis=0).astype(np.float32) testset = np.concatenate([test_input, test_label], axis=0).astype(np.float32) train_dataset = tf.data.Dataset.from_tensor_slices((trainset, trainset)) train_dataset = train_dataset.cache().shuffle(BATCH_SIZE*100).batch(BATCH_SIZE).repeat() test_dataset = tf.data.Dataset.from_tensor_slices((testset, testset)) test_dataset = test_dataset.batch(BATCH_SIZE) class RetrainLayer(tf.keras.layers.Layer): def __init__(self, num_hidden, activation=tf.nn.relu): super(RetrainLayer, self).__init__() self.num_hidden = num_hidden self.dense = tf.keras.layers.Dense(self.num_hidden, activation=activation, kernel_initializer='he_uniform') def call(self, inp): return self.dense(inp) class Encoder(tf.keras.layers.Layer): def __init__(self, num_hiddens, encoding_size): super(Encoder, self).__init__() self.num_hiddens = num_hiddens self.encoding_size = encoding_size self.denses = [tf.keras.layers.Dense(self.num_hiddens[_], activation=tf.nn.relu, kernel_initializer='he_uniform') for _ in range(len(self.num_hiddens))] self.output_layer = tf.keras.layers.Dense(self.encoding_size, activation=tf.nn.sigmoid) def call(self, inp): for _ in range(len(self.num_hiddens)): inp = self.denses[_](inp) return self.output_layer(inp) class Decoder(tf.keras.layers.Layer): def __init__(self, num_hiddens, original_size): super(Decoder, self).__init__() self.num_hiddens = num_hiddens[::-1] self.original_size = original_size self.denses = [tf.keras.layers.Dense(self.num_hiddens[_], activation=tf.nn.relu, kernel_initializer='he_uniform') for _ in range(len(self.num_hiddens))] def call(self, inp): for _ in range(len(self.num_hiddens)): inp = self.denses[_](inp) return inp class Autoencoder(tf.keras.Model): def __init__(self, num_hiddens, encoding_size, original_size): super(Autoencoder, self).__init__() self.num_hiddens = num_hiddens self.encoding_size = encoding_size self.original_size = original_size self.in_retrain_layer = RetrainLayer(self.num_hiddens[0]) self.encoder = Encoder(self.num_hiddens, self.encoding_size) self.decoder = Decoder(self.num_hiddens, self.original_size) self.out_retrain_layer = RetrainLayer(self.original_size, activation = tf.nn.sigmoid) def call(self, inp, need_code=False, decoding=None): inp = self.in_retrain_layer(inp) encoded_values = self.encoder(inp) if decoding is not None: decoding = self.decoder(decoding) return self.out_retrain_layer(decoding) if not need_code: encoded_values = self.decoder(encoded_values) return self.out_retrain_layer(encoded_values) else: return encoded_values def loss(model, original): reconstruction_error = tf.reduce_mean(tf.square(tf.subtract(model(original), original))) return reconstruction_error @tf.function def train(loss, model, opt, original): with tf.GradientTape() as tape: gradients = tape.gradient(loss(model, original), model.trainable_variables) gradient_variables = zip(gradients, model.trainable_variables) opt.apply_gradients(gradient_variables) num_hiddens = [32, 16] encoding_size = 8 original_size = 5 autoencoder = Autoencoder(num_hiddens, encoding_size, original_size) opt = tf.optimizers.Adam(learning_rate=0.001) checkpoint_path = "./checkpoints/trained_AE" ckpt = tf.train.Checkpoint(autoencoder=autoencoder, opt=opt) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=10) writer = tf.summary.create_file_writer('tmp') EPOCHS = 50 prev_test_loss = 100.0 with writer.as_default(): with tf.summary.record_if(True): for epoch in range(EPOCHS): for step, (inp, tar) in enumerate(train_dataset): train(loss, autoencoder, opt, inp) loss_values = loss(autoencoder, inp) tf.summary.scalar('loss', loss_values, step=step) if step % 500 == 0: test_loss = 0 for step_, (inp_, tar_) in enumerate(test_dataset): test_loss += loss(autoencoder, inp_) if step_ > 500: test_loss /= 100 break if test_loss.numpy() < prev_test_loss: ckpt_save_path = ckpt_manager.save() prev_test_loss = test_loss.numpy() print('Saving checkpoint at {}'.format(ckpt_save_path)) print('Epoch {} batch {} train loss: {:.4f} test loss: {:.4f}' .format(epoch, step, loss_values.numpy(), test_loss.numpy()))Saving checkpoint at ./checkpoints/trained_AE/ckpt-59 Epoch 0 batch 0 train loss: 0.0000 test loss: 0.0001 Saving checkpoint at ./checkpoints/trained_AE/ckpt-60 Epoch 0 batch 500 train loss: 0.0000 test loss: 0.0001 Saving checkpoint at ./checkpoints/trained_AE/ckpt-61 Epoch 0 batch 1000 train loss: 0.0000 test loss: 0.0001 Saving checkpoint at ./checkpoints/trained_AE/ckpt-62 Epoch 0 batch 1500 train loss: 0.0000 test loss: 0.0001 Epoch 0 batch 2000 train loss: 0.0000 test loss: 0.0001 Saving checkpoint at ./checkpoints/trained_AE/ckpt-63 Epoch 0 batch 2500 train loss: 0.0000 test loss: 0.0001 Epoch 0 batch 3000 train loss: 0.0000 test loss: 0.0001 Epoch 0 batch 3500 train loss: 0.0000 test loss: 0.0001 Epoch 0 batch 4000 train loss: 0.0000 test loss: 0.0001 Epoch 0 batch 4500 train loss: 0.0000 test loss: 0.0001 Epoch 0 batch 5000 train loss: 0.0000 test loss: 0.0001 Epoch 0 batch 5500 train loss: 0.0000 test loss: 0.0001 Epoch 0 batch 6000 train loss: 0.0000 test loss: 0.0001 Epoch [...]Model savingi = -1 if ckpt_manager.checkpoints: ckpt.restore(ckpt_manager.checkpoints[i]) print ('Checkpoint ' + ckpt_manager.checkpoints[i][-6:] +' restored!!') test_loss = 0 for step_, (inp_, tar_) in enumerate(test_dataset): test_loss += loss(autoencoder, inp_) autoencoder.save_weights('./checkpoints/trained_AE')While the package seems to handle the synthetic data fairly well right now - I'm beginning to realise that the real data is of course likely to be much more noisy and messy -- and I need to start considering it as soon as possible. This notebook is basically me checking out how well real data can be handled in the current package, and what can be done to improve the CF-FM segmentation performance.While exploring how well real-data is CF-FM segmented, I also ended up figuring out how to segment the call from the background - and this too is now part of the package. 2020-03-10-- %matplotlib notebook import glob import os import sys sys.path.append('..\\measure_horseshoe_bat_calls\\') import pywt from segment_horseshoebat_call import * from measure_a_horseshoe_bat_call import * real_data = glob.glob('real_data\\'+'*.WAV')Loading, upsampling and storing the audio clips into a dictionaryMy previous notebooks have shown that upsampling has the effect of improving FM and CF segmentation - especially for call with short FM durations.all_calls = {} for each in real_data: call, fs = sf.read(each) call_ups = signal.resample(call, call.size*2) file_name = os.path.split(each)[-1] all_calls[file_name] = call_ups all_file_names = list(all_calls.keys()) make_x_time = lambda X, fs: np.linspace(0, X.size/float(fs), X.size) def visualise_call(audio, fs): plt.figure() a0 = plt.subplot(211) plt.plot(make_x_time(audio,fs), audio) a1 = plt.subplot(212, sharex=a0) plt.specgram(audio, Fs=fs, NFFT=128,noverlap=127, vmin=-100) return a0, a1 example_call = all_calls[all_file_names[57]] pk_pctage = 0.985 win_size = 25 a,b = visualise_call(example_call, fs*2) cf, fm, info = segment_call_into_cf_fm(example_call, fs*2, peak_percentage=pk_pctage, window_size=win_size) b.plot(make_x_time(cf,fs*2), cf*90000,'r') b.plot(make_x_time(cf,fs*2), cf*120000,'r') b.plot(make_x_time(fm,fs*2), fm*150000) # get rid of the silence before and after the call start, stop = calc_sound_borders(example_call, 90) main_call = example_call[start:stop] a1,b1 = visualise_call(main_call, fs*2) cf, fm, info = segment_call_into_cf_fm(main_call, fs*2, peak_percentage=pk_pctage, window_size=win_size) b1.plot(make_x_time(cf,fs*2), cf*90000,'r') b1.plot(make_x_time(cf,fs*2), cf*120000,'r') b1.plot(make_x_time(fm,fs*2), fm*150000)The real data have a few milliseconds of silence on the left and right of the main call - and this might also affect how the segmentation happens. Does the situation improve if we pre-select the main energy window? Things which make proper call-background + CF-FM segmentation tricky:* Non-uniform received levels across the call duration (start of call is faint, but end of call is intense)* Banding/interference in the CF and FM. The interference creates weird false CF/FM bands. This is typically a sign of a signal with some reflections in it -- though, if necessary, signals could still be recovered from it by some kind of interpolation.def segment_call_from_background(audio, fs,**kwargs): '''Performs a wavelet transform to track the signal within the relevant portion of the bandwidth. Parameters ---------- audio : np.array fs : float>0 Frequency of sampling in Hertz. lowest_relevant_freq : float>0, optional The lowest frequency band in Hz whose coefficients will be tracked. The coefficients of all frequencies in the signal >= the lowest relevant frequency are tracked. This is the lowest possible frequency the signal can take. It is best to give ~10-20 kHz of berth. Defaults to 35kHz. wavelet_type : str, optional The type of wavelet which will be used for the continuous wavelet transform. See pywt.wavelist(kind='continuous') for all possible types in case the default doesn't seem to work. Defaults to mexican hat, 'mexh' scales : array-like, optional ''' lowest_relevant_freq = kwargs.get('lowest_relevant_freq', 35000.0) wavelet_type = kwargs.get('wavelet_type', 'mexh') background_threshold = kwargs.get('background_threshold', -20) scales = kwargs.get('scales',np.arange(1,10)) coefs, freqs = pywt.cwt(audio, scales, wavelet_type, sampling_period=1.0/(fs)) relevant_freqs = freqs[freqs>=lowest_relevant_freq] if np.sum(relevant_freqs) == 0: raise ValueError('The lowest relevant frequency is too high. Please re-check the value') within_centre_freqs = np.logical_and(np.min(freqs)<=lowest_relevant_freq, np.max(freqs)>=lowest_relevant_freq) if not within_centre_freqs: raise ValueError('The lowest relevant frequency %.2f is not included in the centre frequencies of the wavelet scales.\ Increase the scale range.'%np.round(lowest_relevant_freq,2)) relevant_rows = int(np.argwhere(np.min(relevant_freqs)==freqs)) summed_profile = np.sum(abs(coefs[:relevant_rows]), 0) dbrms_profile = dB(moving_rms_edge_robust(summed_profile, **kwargs)) dbrms_profile -= np.max(dbrms_profile) if np.min(dbrms_profile) >= background_threshold: raise IncorrectThreshold('The dynamic range of the signal is lower than the background threshold.\ Please decrease the background threshold') potential_region = identify_valid_regions(dbrms_profile>=background_threshold, 1) return potential_region class IncorrectThreshold(ValueError): pass coefs, freqs = pywt.cwt(example_call, np.arange(1,100), 'mexh',sampling_period=1/(fs*2)) #print(freqs, np.argmax(np.sum(coefs,1))) #plt.figure() #plt.imshow(dB(abs(coefs)), aspect='auto', origin='upper') profile_highf = np.sum(abs(coefs[:4,:]),0) profile_lowf = np.sum(abs(coefs[-4:,:]),0) dbrms_highf = dB(moving_rms_edge_robust(profile_highf, window_size=75)) dbrms_highf -= np.max(dbrms_highf) thres = -20 plt.figure() w0 = plt.subplot(212) plt.plot(make_x_time(dbrms_highf,fs*2), dbrms_highf) plt.hlines(0,0,dbrms_highf.size/(fs*2)) plt.hlines(thres,0,dbrms_highf.size/(fs*2)) plt.yticks(np.arange(0,thres-6,-6),) plt.subplot(211,sharex=w0) plt.specgram(example_call, Fs=fs*2, NFFT=128,noverlap=127, vmin=-100); batcall = dbrms_highf.flatten()>=thres plt.plot(make_x_time(example_call, fs*2), batcall*130000) plt.figure() plt.specgram(example_call, Fs=fs*2, NFFT=128,noverlap=127) call_region = segment_call_from_background(example_call, fs*2, scales=np.arange(1,5), background_threshold=-24) plt.plot(make_x_time(call_region, fs*2), call_region*100000)Notes for the wavelet based call-background segmentation:> if dynamic range of whole wavelet dbrms >= threshold dB --> throw error.> output largest continuous chunk above threshold.import datetime as dt print('Notebook run to the end last on:', dt.datetime.now())Notebook run to the end last on: 2020-03-10 18:45:26.443627Problem 3 Question a)Let us write the likelihood function in this setting : $$L(\alpha, \lambda) = f_{\alpha, \lambda}(T_{1}) \times f_{\alpha, \lambda}(T_{2}) \times \mathbb{P}(T > 100K)\times \mathbb{P}(T > T_4)\times \mathbb{P}(T > T_5)$$After calculations, the negative log-likelihood is equal to : $$-l(\alpha, \lambda) = -[2\log(\alpha \lambda) + (\alpha - 1)\log(\lambda^{2}T_{1}T_{2}) - (\lambda)^{\alpha}(T_{1}^{\alpha} + T_{2}^{\alpha})] + (\lambda)^{\alpha}(T_{3}^{\alpha}+T_{4}^{\alpha} + T_{5}^{\alpha})$$where $T_{1} = 44K, T_{2} = 26K, T_{3} = 100 K, T_{4} = 19K, T_{5} = 45K$. We can solve the MLE numerically with `scipy` module `optimize`.import numpy as np from scipy import optimize from scipy.stats import weibull_min from math import gamma # Problem 3 T1 = 44 * 10 ** 3 T2 = 26 * 10 ** 3 T3 = 100 * 10 ** 3 T4 = 19 * 10 ** 3 T5 = 45 * 10 ** 3 def neg_log_L(gamma, alpha) -> float: A = 2 * np.log(alpha / gamma) + (alpha - 1) * np.log(T1 * T2 / (gamma ** 2)) - \ ((1 / gamma) ** alpha) * ((T1 ** alpha) + (T2 ** alpha)) B = -((1 / gamma) ** alpha) * (T3 ** alpha + T4 ** alpha + T5 ** alpha) return -A - B ## Numerically solve MLE equations. epsilon = 1 * 10 ** -12 x0 = [10, 10] bnds = ((epsilon, np.inf), (epsilon, np.inf)) fun = lambda x: neg_log_L(x[0], x[1]) solver = optimize.minimize(fun, x0=x0, bounds=bnds) print(" {} iterations \n".format(solver.nit), "lambda = {} and alpha = {}".format(1 / solver.x[0], solver.x[1]))171 iterations lambda = 1.0823481787828909e-05 and alpha = 1.535248714292919Question b) We want to compute a $(1-\delta)\%$ confidence interval for $\mathbb{E}(T)$. Theoretically : $$ \mathbb{E}(T) = \frac{1}{\lambda} \Gamma(1+ \frac{1}{\alpha})$$So we have the plug-in estimate of $\mathbb{E}(T)$ by using $\hat \lambda_{MLE}$ and $\hat \alpha_{MLE}$. Let us compute a parametric bootstrap confidence interval. - 1) Generate $(\mu_{n}^{(1)}, \ldots, \mu_{n}^{(m)} )$ where $\mu_{n}^{(i)} = \frac{1}{n} \displaystyle \sum_{k=1}^{n}T_{k}^{(i)}$- 2) Find $x$ and $y$ such that $\mathbb{P}(\mu_{n} - \mu_{*} < x ) = 1 - \delta/2$ and $\mathbb{P}(\mu_{n} - \mu_{*} < y ) = \delta/2$ where $\mu_{*}$ is the plug-in estimate obtained with the MLE.## Parametric Boostrap (1-delta) confidence interval lmda = 1 / solver.x[0] alpha = solver.x[1] # reference parameter mu_star = (1 / lmda) * gamma(1 + 1 / alpha) print(" In average, {} kms before a reliability problem occurs (plug-in estimation with MLE)".format(mu_star)) m = 100 # number of iterations to aggregate bootstrap_estimates = [] for i in range(m): T_bootstrap = weibull_min.rvs(c=alpha, scale=1 / lmda, size=100) bootstrap_estimates.append(np.mean(T_bootstrap)) delta = 0.1 print(" delta = {}".format(delta), "\n", "m = {} ; ".format(m), "n = 100") # Upper bound : we find x s.t prob(estimate - mu_star < x) = 1-delta/2 x = 2000 count = 0 while (count / m != 1 - delta / 2): count = 0 for i in range(m): if bootstrap_estimates[i] - mu_star < x: count += 1 # print(count / m) x += 10 print(" Prob(mu_n - mu_estimate < {}) = {}".format(x, count / m)) # Lower bound : we find y s.t prob(estimate - mu_star < y ) = delta/2 y = -10000 count = 0 while (count / m != delta / 2): count = 0 for i in range(m): if bootstrap_estimates[i] - mu_star < y: count += 1 # print(count / m) y += 10 print(" Prob(mu_n - mu_estimate < {}) = {}".format(y, count / m)) print(" Parametric Boostrap {}% confidence interval : [{},{}]".format((1 - delta)*100, -x + np.mean(bootstrap_estimates), - y + np.mean(bootstrap_estimates)))In average, 83182.41571001986 kms before a reliability problem occurs (plug-in estimation with MLE) delta = 0.1 m = 100 ; n = 100 Prob(mu_n - mu_estimate < 8170) = 0.95 Prob(mu_n - mu_estimate < -9780) = 0.05 Parametric Boostrap 90.0% confidence interval : [74749.82839853111,92699.82839853111]Problem 6Written part is in the written section of the report.# Problem 4 ## Posterior distribution X = np.array([6.00, 4.82, 3.35, 2.38, 3.59, 4.12, 4.98, 2.69, 6.24, 6.77, 6.22, 5.42, 5.42, 3.10, 4.65, 4.24, 4.53, 4.62, 5.36, 2.57]) Sigma = (1 + (0.5) * np.sum(X[0:19]**2))**-1 X_i_1 = X[0:19] X_i = X[1:20] Mu = Sigma * ( (0.5) * np.sum(X_i_1*(X_i-2)) + 0.5) print("rho*|X follows a normal distribution with mean = {} and variance = {}".format(Mu, Sigma)) ## Prob(X_23 > 4) n = 100000 X23_estimates = [] count = 0 for i in range(n) : rho = np.random.normal(Mu, Sigma, 1) epsilon = np.random.normal(0,2,3) X23 = (rho**3) * X[19] + 2 * (1 + rho + rho**2) + epsilon[0] + epsilon[1] * rho + epsilon[2] * rho**2 X23_estimates.append(X23) if X23 >= 4 : count += 1 print("X23 estimate = {}".format(np.mean(X23_estimates))) print("Prob(X_23 > 4) = {}".format(count/n))rho*|X follows a normal distribution with mean = 0.5160781386089336 and variance = 0.0045245286006764165 X23 estimate = 3.9179892166389694 Prob(X_23 > 4) = 0.4861Revisão Cônicas https://mmas.github.io/conics-matplotlibimport numpy as np import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['lines.color'] = 'k' mpl.rcParams['axes.prop_cycle'] = mpl.cycler('color', ['k']) from __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets x = np.linspace(-9, 9, 400) y = np.linspace(-5, 5, 400) x, y = np.meshgrid(x, y) def axes(): plt.axhline(0, alpha=.3) plt.axvline(0, alpha=.3)Plotar o grafico de $(y^2 - 4ax = 0)$a = .5 axes() plt.contour(x, y, (y**2 - 4*a*x), [0], colors='k') plt.show() axes() plt.contour(x, y, (y**2 - 4*a*x), [0], colors='k', alpha=.3) # Focus. plt.plot(a, 0, 'r*') # Directrix. plt.axvline(-a) plt.show() def f(m, b): plt.figure(2) x = np.linspace(-10, 10, num=1000) plt.plot(x, m**x + b*x - 1) plt.ylim(-5, 5) plt.show() interactive_plot = interactive(f, m=(-2.0, 2.0), b=(-3, 3, 0.5)) output = interactive_plot.children[-1] output.layout.height = '350px' interactive_plot def g(a, b): axes() plt.contour(x, y, (a*y**2 - b*x), [0], colors='k') plt.show() interactive_plot = interactive(g, a=(-2, 2), b=(-3, 4)) output = interactive_plot.children[-1] output.layout.height = '500px' interactive_plot$$\frac{x^2}{a^2} + \frac{y^2}{b^2} = 0$$ para: a=2 e b=2$$\frac{x^2}{2^2} + \frac{x^2}{2^2} = 0$$a = 2. b = 2. axes() plt.contour(x, y,(x**2/a**2 + y**2/b**2), [1], colors='k') plt.show() a = 4. b = 2. axes() plt.contour(x, y,(x**2/a**2 + y**2/b**2), [1], colors='k') plt.show() axes() plt.contour(x, y,(x**2/a**2 + y**2/b**2), [1], colors='k', alpha=.1) # Eccentricity. e = np.sqrt(1 - b**2/a**2) # Foci. plt.plot(a*e, 0, '.', -a*e, 0, '.') # Directrices. plt.axvline(a/e) plt.axvline(-a/e) plt.show() a, b, c, d, e, f = 4, -5, 2, 4, -3, -3 assert b**2 - 4*a*c < 0 axes() plt.contour(x, y,(a*x**2 + b*x*y + c*y**2 + d*x + e*y + f), [0], colors='k') plt.show()Prepare Tidy CVC Datasets (with SWMM model hydrology) Setup the basic working environment%matplotlib inline import os import sys import datetime import warnings import csv import numpy as np import matplotlib.pyplot as plt import pandas import seaborn seaborn.set(style='ticks', context='paper') import wqio import pybmpdb import pynsqd import pycvc min_precip = 1.9999 big_storm_date = datetime.date(2013, 7, 8) palette = seaborn.color_palette('deep', n_colors=6) pybmpdb.setMPLStyle() POCs = [ p['cvcname'] for p in filter( lambda p: p['include'], pycvc.info.POC_dicts ) ] if wqio.testing.checkdep_tex() is None: tex_msg = ("LaTeX not found on system path. You will " "not be able to compile ISRs to PDF files") warnings.warn(tex_msg, UserWarning) warning_filter = "ignore" warnings.simplefilter(warning_filter)Load External Data (this takes a while)bmpdb = pycvc.external.bmpdb(palette[3], 'D') nsqdata = pycvc.external.nsqd(palette[2], 'd')Load CVC Databasecvcdbfile = "C:/users/phobson/Desktop/scratch/cvc/cvc.accdb" cvcdb = pycvc.Database(cvcdbfile, nsqdata, bmpdb)Hydrologic Relationships $V_{\mathrm{runoff, \ LV1}} = \max\left(0,\: -12.05 + 2.873\, D_{\mathrm{precip}} + 0.863 \, \Delta t \right)$def LV1_runoff(row): return max(0, -12.0 + 2.87 * row['total_precip_depth'] + 0.863 * row['duration_hours'])ED-1$\log \left(V_{\mathrm{runoff, \ ED1}}\right) = 1.58 + 0.000667 \, I_{\mathrm{max}} + 0.0169 \, D_{\mathrm{precip}} $$V_{\mathrm{bypass, \ ED1}} = \max \left(0,\: -26.4 + 0.184 \, I_{\mathrm{max}} + 1.22 \, D_{\mathrm{precip}} \right)$$V_{\mathrm{inflow, \ ED1}} = \max \left(0,\: V_{\mathrm{runoff, \ ED1}} - V_{\mathrm{bypass, \ ED1}} \right)$def ED1_runoff(row): return 10**(1.58 + 0.000667 * row['peak_precip_intensity'] + 0.0169 * row['total_precip_depth'] ) def ED1_bypass(row): return max(0, -26.4 + 0.184 * row['peak_precip_intensity'] + 1.22 * row['total_precip_depth']) def ED1_inflow(row): return max(0, ED1_runoff(row) - ED1_bypass(row))LV-2$\log \left(V_{\mathrm{runoff, \ LV2}}\right) = 1.217 + 0.00622 \, I_{\mathrm{max}} + 0.0244 \, D_{\mathrm{precip}} $$V_{\mathrm{bypass, \ LV2}} = 0$$V_{\mathrm{inflow, \ LV2}} = \max \left(0,\: V_{\mathrm{runoff, \ LV2}} - V_{\mathrm{bypass, \ LV2}} \right)$def LV2_runoff(row): return 10**(1.22 + 0.00622 * row['peak_precip_intensity'] + 0.0244 * row['total_precip_depth'] ) def LV2_bypass(row): return 0 def LV2_inflow(row): return max(0, LV2_runoff(row) - LV2_bypass(row))LV-4$\log \left(V_{\mathrm{runoff, \ LV4}}\right) = 1.35 + 0.00650 \, I_{\mathrm{max}} + 0.00940 \, D_{\mathrm{precip}} $$V_{\mathrm{bypass, \ LV4}} = \max \left(0,\: 7.37 + 0.0370 \, I_{\mathrm{max}} + 0.112 \, D_{\mathrm{precip}} \right)$$V_{\mathrm{inflow, \ LV4}} = \max \left(0,\: V_{\mathrm{runoff, \ LV4}} - V_{\mathrm{bypass, \ LV4}} \right)$def LV4_runoff(row): return 10**(1.35 + 0.00650 * row['peak_precip_intensity'] + 0.00940 * row['total_precip_depth'] ) def LV4_bypass(row): return max(0, 7.36 + 0.0370 * row['peak_precip_intensity'] + 0.112 * row['total_precip_depth']) def LV4_inflow(row): return max(0, LV4_runoff(row) - LV4_bypass(row))Water quality loading relationship$ M_{\mathrm{runoff}} = V_{\mathrm{runoff}} \times \hat{\mathbb{C}}_{\mathrm{inflow}}\left(\mathrm{landuse,\ season}\right) $$ M_{\mathrm{bypass}} = V_{\mathrm{bypass}} \times \hat{\mathbb{C}}_{\mathrm{inflow}}\left(\mathrm{landuse,\ season}\right) $$ M_{\mathrm{inflow}} = M_{\mathrm{runoff}} - M_{\mathrm{bypass}} $$ M_{\mathrm{outflow}} = V_{\mathrm{outflow}} \times \mathbb{C}_{\mathrm{outflow}} $ Define the site object for the reference site and compute its median values ("influent" to other sites)LV1 = pycvc.Site(db=cvcdb, siteid='LV-1', raingauge='LV-1', tocentry='Lakeview Control', isreference=True, minprecip=min_precip, color=palette[1], marker='s') LV1.runoff_fxn = LV1_runoffLakeview BMP sites get their "influent" data from LV-1def rename_influent_cols(col): if col.lower() in ['parameter', 'units', 'season']: newcol = col.lower() else: newcol = 'influent {}'.format(col.lower()) return newcol.replace(' nsqd ', ' ').replace(' effluent ', ' ') LV_Influent = ( LV1.medians("concentration", groupby_col='season') .rename(columns={'effluent stat': 'median'}) .rename(columns=rename_influent_cols) ) LV1.influentmedians = LV_Influent LV_Influent.head()Elm Drive's "influent" data come from NSQDED_Influent = ( cvcdb.nsqdata .seasonal_medians .rename(columns=rename_influent_cols) ) ED_Influent.head()Remaining site objectsED1 = pycvc.Site(db=cvcdb, siteid='ED-1', raingauge='ED-1', tocentry='Elm Drive', influentmedians=ED_Influent, minprecip=min_precip, isreference=False, color=palette[0], marker='o') ED1.runoff_fxn = ED1_runoff ED1.inflow_fxn = ED1_inflow LV2 = pycvc.Site(db=cvcdb, siteid='LV-2', raingauge='LV-1', tocentry='Lakeview Grass Swale', influentmedians=LV_Influent, minprecip=min_precip, isreference=False, color=palette[4], marker='^') LV2.runoff_fxn = LV2_runoff LV2.inflow_fxn = LV2_inflow LV4 = pycvc.Site(db=cvcdb, siteid='LV-4', raingauge='LV-1', tocentry=r'Lakeview Bioswale 1$^{\mathrm{st}}$ South Side', influentmedians=LV_Influent, minprecip=min_precip, isreference=False, color=palette[5], marker='v') LV4.runoff_fxn = LV4_runoff LV4.inflow_fxn = LV4_inflowFix ED-1 storm that had two composite samplesED1.hydrodata.data.loc['2012-08-10 23:50:00':'2012-08-11 05:20', 'storm'] = 0 ED1.hydrodata.data.loc['2012-08-11 05:30':, 'storm'] += 1Replace total inflow volume with estimate from simple method for 2013-07-08 stormstorm_date = datetime.date(2013, 7, 8) for site in [ED1, LV1, LV2, LV4]: bigstorm = site.storm_info.loc[site.storm_info.start_date.dt.date == storm_date].index[0] inflow = site.drainagearea.simple_method(site.storm_info.loc[bigstorm, 'total_precip_depth']) site.storm_info.loc[bigstorm, 'inflow_m3'] = inflow site.storm_info.loc[bigstorm, 'runoff_m3'] = np.nan site.storm_info.loc[bigstorm, 'bypass_m3'] = np.nanExport project-wide tidy datasets Hydrologic (storm) dataThe big event from July 8, 2013 is *retained* in this stephydro = pycvc.summary.collect_tidy_data( [ED1, LV1, LV2, LV4], lambda s: s.tidy_hydro ).pipe(pycvc.summary.classify_storms, 'total_precip_depth') hydro.to_csv('output/tidy/hydro_swmm.csv', index=False)Water quality dataThe loads from the big event on July 8, 2013 are *removed* in this stepwq = ( pycvc.summary .collect_tidy_data([ED1, LV1, LV2, LV4], lambda s: s.tidy_wq) .pipe(pycvc.summary.classify_storms, 'total_precip_depth') .pipe(pycvc.summary.remove_load_data_from_storms, [big_storm_date], 'start_date') ) wq.to_csv('output/tidy/wq_swmm.csv', index=False)Individual Storm Reports(requires $\LaTeX$)for site in [ED1, LV1, LV2, LV4]: print('\n----Compiling ISR for {0}----'.format(site.siteid)) site.allISRs('composite', version='draft')Extracting tests from notebooks> The functions that grab the cells containing tests (filtering with potential flags) and execute them# export _re_all_flag = re.compile(""" # Matches any line with #all_something and catches that something in a group: ^ # beginning of line (since re.MULTILINE is passed) \s* # any number of whitespace \#\s* # # then any number of whitespace all_(\S+) # all_ followed by a group with any non-whitespace chars \s* # any number of whitespace $ # end of line (since re.MULTILINE is passed) """, re.IGNORECASE | re.MULTILINE | re.VERBOSE) # export def check_all_flag(cells): for cell in cells: if check_re(cell, _re_all_flag): return check_re(cell, _re_all_flag).groups()[0] nb = read_nb("35_tutorial_wikitext.ipynb") test_eq(check_all_flag(nb['cells']), 'slow') nb = read_nb("91_notebook_export.ipynb") assert check_all_flag(nb['cells']) is None # export _re_flags = re.compile(""" # Matches any line with a test flad and catches it in a group: ^ # beginning of line (since re.MULTILINE is passed) \s* # any number of whitespace \#\s* # # then any number of whitespace (slow|cuda|cpp) # all test flags \s* # any number of whitespace $ # end of line (since re.MULTILINE is passed) """, re.IGNORECASE | re.MULTILINE | re.VERBOSE) # export def get_cell_flags(cell): if cell['cell_type'] != 'code': return [] return _re_flags.findall(cell['source']) test_eq(get_cell_flags({'cell_type': 'code', 'source': "#hide\n# slow\n"}), ['slow']) test_eq(get_cell_flags({'cell_type': 'code', 'source': "#hide\n# slow\n # cuda"}), ['slow', 'cuda']) test_eq(get_cell_flags({'cell_type': 'markdown', 'source': "#hide\n# slow\n # cuda"}), []) test_eq(get_cell_flags({'cell_type': 'code', 'source': "#hide\n"}), []) # export def _add_import_cell(mod): "Return an import cell for `mod`" return {'cell_type': 'code', 'execution_count': None, 'metadata': {'hide_input': True}, 'outputs': [], 'source': f"\nfrom local.{mod} import *"} # export _re_is_export = re.compile(r""" # Matches any text with #export or #exports flag: ^ # beginning of line (since re.MULTILINE is passed) \s* # any number of whitespace \#\s* # # then any number of whitespace exports? # export or exports \s* # any number of whitespace """, re.IGNORECASE | re.MULTILINE | re.VERBOSE) # export _re_has_import = re.compile(r""" # Matches any text with import statement: ^ # beginning of line (since re.MULTILINE is passed) \s* # any number of whitespace import # # then any number of whitespace \s+ | \s* from \s+\S+\s+ import \s+ """, re.IGNORECASE | re.MULTILINE | re.VERBOSE) # export class NoExportPreprocessor(ExecutePreprocessor): "An `ExecutePreprocessor` that executes not exported cells" @delegates(ExecutePreprocessor.__init__) def __init__(self, flags, **kwargs): self.flags = flags super().__init__(**kwargs) def preprocess_cell(self, cell, resources, index): if 'source' not in cell or cell['cell_type'] != "code": return cell, resources #if _re_is_export.search(cell['source']) and not _re_has_import.search(cell['source']): # return cell, resources for f in get_cell_flags(cell): if f not in self.flags: return cell, resources res = super().preprocess_cell(cell, resources, index) return res # export def test_nb(fn, flags=None): "Execute `nb` (or only the `show_doc` cells) with `metadata`" os.environ["IN_TEST"] = '1' try: nb = read_nb(fn) all_flag = check_all_flag(nb['cells']) if all_flag is not None and all_flag not in L(flags): return mod = find_default_export(nb['cells']) #if mod is not None: nb['cells'].insert(0, _add_import_cell(mod)) ep = NoExportPreprocessor(L(flags), timeout=600, kernel_name='python3') pnb = nbformat.from_dict(nb) ep.preprocess(pnb) except Exception as e: print(f"Error in {fn}") raise e finally: os.environ.pop("IN_TEST") test_nb("07_vision_core.ipynb")Export-#hide notebook2script(all_fs=True)Converted 00_test.ipynb. Converted 01_core.ipynb. Converted 01a_dispatch.ipynb. Converted 01b_torch_core.ipynb. Converted 02_script.ipynb. Converted 03_dataloader.ipynb. Converted 04_transform.ipynb. Converted 05_data_core.ipynb. Converted 06_data_transforms.ipynb. Converted 07_vision_core.ipynb. Converted 08_pets_tutorial.ipynb. Converted 09_vision_augment.ipynb. Converted 10_data_block.ipynb. Converted 11_layers.ipynb. Converted 11a_vision_models_xresnet.ipynb. Converted 12_optimizer.ipynb. Converted 13_learner.ipynb. Converted 14_callback_schedule.ipynb. Converted 14a_callback_data.ipynb. Converted 15_callback_hook.ipynb. Converted 16_callback_progress.ipynb. Converted 17_callback_tracker.ipynb. Converted 18_callback_fp16.ipynb. Converted 19_callback_mixup.ipynb. Converted 20_metrics.ipynb. Converted 21_tutorial_imagenette.ipynb. Converted 22_vision_learner.ipynb. Converted 23_tutorial_transfer_learning.ipynb. Converted 30_text_core.ipynb. Converted 31_text_data.ipynb. Converted 32_[...]RGI18 (New Zealand)F. Maussion December 2021New inventory by Baumann et al.import pandas as pd import geopandas as gpd import subprocess import matplotlib.pyplot as plt import matplotlib.patches as mpatches import seaborn as sns import numpy as np from utils import mkdir, submission_summary, needs_size_filter, size_filter, plot_map, plot_date_hist import osFiles and storage paths# Region of interest reg = 18 # go down from rgi7_scripts/workflow data_dir = '../../rgi7_data/' # Level 2 GLIMS files l2_dir = os.path.join(data_dir, 'l2_sel_reg_tars') # Output directories output_dir = mkdir(os.path.join(data_dir, 'l3_rgi7a')) output_dir_tar = mkdir(os.path.join(data_dir, 'l3_rgi7a_tar')) # RGI v6 file for comparison later rgi6_reg_file = os.path.join(data_dir, 'l0_RGIv6', '18_rgi60_NewZealand.zip')Load the input data# Read L2 files shp = gpd.read_file('tar://' + l2_dir + f'/RGI{reg:02d}.tar.gz/RGI{reg:02d}/RGI{reg:02d}.shp')List of submissionssdf, _ = submission_summary(shp) sdf # # Optional: write out selection in intermediate shape files for manual GIS review # tmp_output_dir = mkdir(os.path.join(data_dir, 'l0_tmp_data', f'rgi{reg:02d}_inventories')) # tmp_output_dir_tar = mkdir(os.path.join(data_dir, 'l0_tmp_data')) # for subid in shp.subm_id.unique(): # s_loc = shp.loc[shp.subm_id == subid] # s_loc.to_file(tmp_output_dir + f'/subm_{int(subid):03d}.shp') # print('Taring...') # print(subprocess.run(['tar', '-zcvf', f'{tmp_output_dir_tar}/rgi{reg:02d}_inventories.tar.gz', '-C', # os.path.join(data_dir, 'l0_tmp_data'), f'rgi{reg:02d}_inventories']))Outline selection Apply selection criteria to create the RGI-07 data subset# try to get the data relevant for RGI07 and select by attributes rgi7 = shp.loc[shp['subm_id'] == 749].copy() rgi7['is_rgi6'] = False # Size filter? needs_size_filter(rgi7)Some sanity checkssdf, df_class = submission_summary(rgi7) df_class # Check the orphaned rock outcrops orphan_f = os.path.join(data_dir, 'l1_orphan_interiors', f'RGI{reg:02d}', f'RGI{reg:02d}.shp') if os.path.exists(orphan_f): orphan_f = gpd.read_file(orphan_f) if np.any(np.isin(rgi7.subm_id.unique(), orphan_f.subm_id.unique())): print('Orphan rock outcrops detected.')Plotsplot_map(rgi7, reg, linewidth=2) plot_map(rgi7, reg, linewidth=2, is_rgi6=True) plot_date_hist(rgi7, reg)Text for githubfgh = sdf.loc[rgi7.subm_id.unique().astype(int)].T fgh print(fgh.to_markdown(headers=np.append(['subm_id'], fgh.columns)))| subm_id | 749 | |:--------------|:-----------------------| | N | 3018 | | A | 886.4 | | analysts | Baumann, Paul, Rastner | | submitters | Paul | | release_date | 2021 | | geog_area | Various (GlobGlacier) | | src_date_mode | 2000 | | src_date_min | 2000 | | src_date_max | 2002 |Write out and tardd = mkdir(f'{output_dir}/RGI{reg:02d}/', reset=True) print('Writing...') rgi7.to_file(dd + f'RGI{reg:02d}.shp') print('Taring...') print(subprocess.run(['tar', '-zcvf', f'{output_dir_tar}/RGI{reg:02d}.tar.gz', '-C', output_dir, f'RGI{reg:02d}']))Writing... Taring... CompletedProcess(args=['tar', '-zcvf', '../../rgi7_data/l3_rgi7a_tar/RGI18.tar.gz', '-C', '../../rgi7_data/l3_rgi7a', 'RGI18'], returncode=0)New RGI-file created - Check result! load reference data comparisonsupport_dir = os.path.join(data_dir, 'l0_support_data') # RGI6 from utils import open_zip_shapefile ref_rgi6 = open_zip_shapefile(rgi6_reg_file) # FP df_ref = open_zip_shapefile(os.path.join(support_dir, 'new_zealand2000.zip'))Number of elementsprint('Number of glaciers in new RGI subset:', len(rgi7)) print('Number of glaciers in reference data:', len(df_ref)) print('Difference:', len(rgi7)-len(df_ref)) print('Number of glaciers in RGI6:', len(ref_rgi6))Number of glaciers in new RGI subset: 3018 Number of glaciers in reference data: 3018 Difference: 0 Number of glaciers in RGI6: 3537Total area# add an area field to the selected GAMDAM table df_ref['area'] = df_ref.to_crs({'proj':'cea'}).area ref_rgi6['area'] = ref_rgi6.to_crs({'proj':'cea'}).area # print and compare area values Area_RGI = rgi7['area'].sum() * 1e-6 print('Area RGI [km²]:', Area_RGI) Area_ref = df_ref['area'].sum() * 1e-6 print('Area Ref [km²]:', Area_ref) d = (Area_RGI - Area_ref) print('Area difference [km²]:',d) # print and compare area values Area_RGI = rgi7['area'].sum() * 1e-6 print('Area RGI7 [km²]:', Area_RGI) Area_ref = ref_rgi6['area'].sum() * 1e-6 print('Area RGI6 [km²]:', Area_ref) d = (Area_RGI - Area_ref) print('Area difference [km²]:',d)Area RGI7 [km²]: 886.4041457633251 Area RGI6 [km²]: 1161.7753563763804 Area difference [km²]: -275.37121061305527Kobe Bryant Tribute Word Cloud @Author: import pandas as pd import numpy as np from os import path, getcwd import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS from PIL import Image from twitterscraper import query_tweets import datetime as dt begin_date = dt.date(2020,1,26) end_date = dt.date(2020,1,27) lang = 'english' limit = 10000 tweets = query_tweets("#Kobe", begindate = begin_date, enddate=end_date, limit=limit, lang=lang) df = pd.DataFrame(i.__dict__ for i in tweets) df.head() df.shape df.to_csv('tweets.csv') user_tweets = pd.read_csv('tweets.csv') user_tweets.shape tweet_words = " ".join(user_tweets.text.drop_duplicates()) #removing punchuation from nltk.tokenize import RegexpTokenizer tokenizer = RegexpTokenizer(r'\w+') words = tokenizer.tokenize(tweet_words) import nltk nltk.download('stopwords') nltk.download('punkt') words_1 = ' '.join(words) #removing stop words from nltk.corpus import stopwords from nltk import word_tokenize stop_words = set(stopwords.words('english')) text = word_tokenize(words_1) text=[x.lower() for x in text] unwanted = ['kobe','bryant','kobebryant','https','helicopter', 'crash','today','rip','accident','daughter','death','black','pic','com'] text = " ".join([w for w in text if w not in unwanted]) no_digits=[] for i in text: if not i.isdigit(): no_digits.append(i) result = ''.join(no_digits) import nltk nltk.download('words') words = set(nltk.corpus.words.words()) sent = result final =" ".join(w for w in nltk.wordpunct_tokenize(sent) \ if w.lower() in words or not w.isalpha()) final import os p = os.getcwd() mask_logo = np.array(Image.open(path.join(p,'kobe.png'))) stopwords = set(STOPWORDS) stopwords.add('www') stopwords.add('instagram') stopwords.add('twitter') stopwords.add('igshid') stopwords.add('status') wc = WordCloud(background_color="white", max_words=2000, mask=mask_logo,stopwords=stopwords) # generate word cloud wc.generate(text) # store to file # wc.to_file(path.join(p, "kobe.png")) # show plt.imshow(wc, interpolation='bilinear') plt.axis("off") plt.figure() image_colors = ImageColorGenerator(mask_logo) wc.recolor(color_func=image_colors).to_file('kobe.png') plt.figure(figsize=[10,10]) plt.imshow(wc.recolor(color_func=image_colors), interpolation="bilinear") plt.axis("off") plt.show()Cargamos utilidades de colab y Talib# !git clone https://github.com/b0tm4r/RNN---Divisas.git from google.colab import drive # Para cargar el disco from google.colab import files # Para manejar los archivos y, por ejemplo, exportar a su navegador import glob # Para manejar los archivos y, por ejemplo, exportar a su navegador drive.mount('/content/drive') !ls '/content/drive/My Drive'Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True). 'Colab Notebooks'TaLibimport os # download TA-Lib !wget http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz !tar xvzf ta-lib-0.4.0-src.tar.gz os.chdir('ta-lib') # co-lab no admite cd !./configure --prefix=/usr !make !make install # wait ~ 30s os.chdir('../') !pip install TA-Lib--2021-01-13 17:05:22-- http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz Resolving prdownloads.sourceforge.net (prdownloads.sourceforge.net)... 172.16.31.10 Connecting to prdownloads.sourceforge.net (prdownloads.sourceforge.net)|172.16.31.10|:80... connected. HTTP request sent, awaiting response... 301 Moved Permanently Location: http://downloads.sourceforge.net/project/ta-lib/ta-lib/0.4.0/ta-lib-0.4.0-src.tar.gz [following] --2021-01-13 17:05:23-- http://downloads.sourceforge.net/project/ta-lib/ta-lib/0.4.0/ta-lib-0.4.0-src.tar.gz Resolving downloads.sourceforge.net (downloads.sourceforge.net)... 172.16.31.10 Reusing existing connection to prdownloads.sourceforge.net:80. HTTP request sent, awaiting response... 302 Found Location: https://netactuate.dl.sourceforge.net/project/ta-lib/ta-lib/0.4.0/ta-lib-0.4.0-src.tar.gz [following] --2021-01-13 17:05:23-- https://netactuate.dl.sourceforge.net/project/ta-lib/ta-lib/0.4.0/ta-lib-0.4.0-src.tar.gz Resolving netactuate[...]Libreríasimport os import numpy as np import pandas as pd import matplotlib.pyplot as plt import datetime import talib import pickle from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential from keras.layers import Dense, LSTM, Dropout from keras.models import load_modelImportar el dataset de entrenamientodef get_dataframe(dir_path, valor): df1 = pd.read_csv(os.path.join(dir_path,'datasets', "{}.csv".format(valor))) df2 = pd.read_csv(os.path.join(dir_path,'datasets', "{}-total.csv".format(valor))) df = pd.concat([df1, df2], ignore_index=True) df = df.drop_duplicates(['Fecha']) # Datafame reverse xq los datos vienen de mas a menos y reindexamos df = df.iloc[::-1] df = df.reset_index(drop=True) # Close df['Close'] = df.iloc[:,1:2] df['Close'] = df['Close'].apply(lambda x: x.replace(',','.')).astype(float) # Open df['Open'] = df.iloc[:,2:3] df['Open'] = df['Open'].apply(lambda x: x.replace(',','.')).astype(float) # High df['High'] = df.iloc[:,3:4] df['High'] = df['High'].apply(lambda x: x.replace(',','.')).astype(float) # Low df['Low'] = df.iloc[:,4:5] df['Low'] = df['Low'].apply(lambda x: x.replace(',','.')).astype(float) # RSI 14 rsi = talib.RSI(df['Close']) df['Rsi'] = rsi # Selección de columnas df = df.iloc[:,[0,6,7,8,9,10]] return dfVariables de entrada periodos = numero de periodos de entrenamientodir_path = '/content/drive/My Drive/Colab Notebooks/RNN-Divisas' periodos = 60 valor = 'EUR-USD'Información sobre el modelopickle_file = os.path.join(dir_path,'models', "{}-precio-rsi.pk".format(valor)) info = pickle.load(open(pickle_file,'rb')) info['summary']Test La red se ha creado con dos capas de profundidad La primera corresponde al precio, en posicion 1:2 del dataset La segunda corresponde al rsi, en posicion 5:6 del datasetdata_position = [[1,2],[5,6]] modelo = load_model(os.path.join(dir_path,'models', "{}-precio-rsi.h5".format(valor))) dataset_train = get_dataframe(dir_path,valor) dataset_train = dataset_train.dropna() dataset_train.head(3) testing_set = dataset_train.iloc[-periodos:, data_position[0][0]:data_position[0][1] ].values real_stock_price = testing_set sc = [] X_values = [] for i in range(0,len(data_position)): sc.append(MinMaxScaler(feature_range = (0, 1))) values = dataset_train.iloc[ len(dataset_train) - len(testing_set) - periodos:, data_position[i][0]:data_position[i][1] ].values values = values.reshape(-1,1) values = sc[i].fit_transform(values) X_values.append(values) X_test_list = [] for i in range(0,len(data_position)): X_test_data = [] for x in range(periodos, X_values[i].shape[0]): X_test_data.append(X_values[i][x-periodos:x, 0]) X_test_list.append(X_test_data) X_test_np = [] for i in range(0,len(data_position)): X_test_np.append( np.array(X_test_list[i]) ) ## axis = dimensiones X_test = np.stack(X_test_np, axis=len(data_position)) predicted_stock_price = modelo.predict(X_test) predicted_stock_price = sc[0].inverse_transform(predicted_stock_price)WARNING:tensorflow:6 out of the last 11 calls to .predict_function at 0x7f3ef90a0730> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.Visualizar los Resultadosplt.plot(real_stock_price, color = 'red', label = 'Precio Real del {}'.format(valor)) plt.plot(predicted_stock_price, color = 'blue', label = 'Precio Predicho del {}'.format(valor)) plt.title("Testing del valor {}".format(valor)) plt.xlabel("Periodos") plt.ylabel("Precio") plt.legend() plt.savefig(os.path.join(dir_path,'models','{}-test.jpg'.format(valor))) plt.show()Calculate the attachment coefficient via filtration theory Our model:\begin{equation}\begin{array}{rr} \dfrac{\partial C}{\partial t} + \dfrac{1}{\theta}\:\vec{q} \cdot \nabla C - \nabla \cdot (D_T\nabla C) =& - k_{\rm att}C + \dfrac{1}{\theta}k_{\rm det}S -\lambda_{\rm aq} C\\ \\ \dfrac{1}{\theta}\dfrac{\partial S}{\partial t} =& k_{\rm att}C - \dfrac{1}{\theta}k_{\rm det}S -\lambda_{\rm im} S\\\end{array}\end{equation}**Concentrations**- $C$ : concentration of infective viruses in the water phase- $S$ : concentration of infective viruses attached to the solid phase**Reaction rates**- $k_{\rm att}$ : attachment rate (C -> S)- $k_{\rm det}$ : detachment rate (S -> C)- $\lambda_{\rm aq}$ : decay rate while in aqueous phase (C -> X)- $\lambda_{\rm im}$ : decay rate while attached to solid phase (S -> X)**Advection/diffusion parameters**- $\vec{q}$ : darcy Flow- $t$ : time- $\theta$ : porosity- $D_T$ : Dispersion coefficient Dispersion coefficient $D_T$$ D_T = D_m + \alpha_LU^n $- $D_m$ : molecular diffusion coefficient- $\alpha_L$ : longitudinal dispersion coefficient- $U$ : intersticial flow velocity- $n$ : empirical fitting exponent*Neglecting transversal dispersion*bdef dispCoef(D_m,alpha_L,U,n=1.0): return D_m + alpha_L*(U**n)Intersticial flow velocityPore-water velocity from the darcy velocity:$U = \dfrac{q}{\theta}$def poreVel(q,theta): return q/thetaMolecular diffusion coefficient $D_m$From the Stokes-Einstein equation:$D_m = \dfrac{k_BT}{3\pi\eta d_p}$- $T$ : Temperature- $k_B$ : Boltzmann constant- $\eta$ : water dynamic viscosity- $d_p$ : (virus) particle diameter_____def molecularDiff(visco,dp): return (BOLTZMANN*TEMP)/(3*PI*visco*dp)Attachment rate coefficient: $k_{\rm att}$\begin{equation} k_{\rm att} = \dfrac{3 }{2d}(1-\theta)\alpha||\vec{q}||\eta_0\end{equation}- $d$ : collector diameter (soil grain size)- $\alpha$ : collision/attachment efficiency- $\eta_0$ : collector efficiencydef attachmentRate(dc,theta,alpha,q,eta0): return (3*(1-theta)*alpha*q*eta0)/(2*dc)Collision/attachment efficiency $\alpha$\begin{equation} \alpha = \dfrac{\text{Rate at which particles attach to the collector}} {\text{Rate at which particles collide with the collector}}\end{equation}$\alpha = 1.0$ for **favorable attachment conditions**, e.g., opposed electrostatic charges between the collector (soil grains) and the particles.For **infavorable conditions** such as the movement of viruses in soils, $\alpha < 1.0$. In these cases, it will be dependant of the isoelectric point of the particles and the soil matrix. The collector efficiency: $\eta_0$ $ \eta_0 = \overbrace{\eta_{\rm D}}^\text{Diffusion} + \underbrace{\eta_{\rm I}}_\text{Interception} + \overbrace{\eta_{\rm G}}^\text{Grav. deposition}$def collectorEff(etaD,etaI,etaG): return etaD + etaI + etaGEach term is approximated by Tufenkji & Elimelech (2004) [\[10.1021/es034049r\]](https://pubs.acs.org/doi/10.1021/es034049r):$\begin{array}{rl} \eta_{\rm D} =& 2.4 A_s^{1/3}N_{\rm R}^{-0.081}N_{\rm Pe}^{-0.175}N_{\rm vdW}^{0.052}\\ \\ \eta_{\rm I} =& 0.55 A_sN_{\rm R}^{1.55}N_{\rm Pe}^{-0.125}N_{\rm vdW}^{0.125}\\ \\ \eta_{\rm G} =& 0.475 N_{\rm gr}^{1.11} N_{\rm R}^{-1.35}N_{\rm Pe}^{-1.11}N_{\rm vdW}^{0.053}\\\end{array}$def collectorEfficiency_Diffusion(A_s,N_R,N_Pe,N_vdW): return 2.40 * (A_s**(1./3.)) * (N_R**-0.081) * (N_Pe**-0.175) * (N_vdW**0.052) def collectorEfficiency_Interception(A_s,N_R,N_Pe,N_vdW): return 0.55 * A_s * (N_R**1.55) * (N_Pe**-0.125) * (N_vdW**0.125) def collectorEfficiency_GDeposition(N_gr,N_R,N_Pe,N_vdW): return 0.475 * (N_gr**1.11) * (N_R**-1.35) * (N_Pe**-1.11) * (N_vdW**0.053)All these collector efficiencies come from **non-dimensional numbers** that defined as follows:- $\begin{array}{lcl} A_s = \dfrac{2(1-s^{5/3})}{2-3s^{1/3}+3s^{5/3}-2s^2} &\quad& s = 1-\theta\end{array}$def happelParameter(theta): s = 1-theta s53 = s**(5./3.) s13 = s**(1./3.) s21 = s**2 return (2*(1-s53))/(2 - (3*s13) + (3*s53) - (2*s21))**Size ratio:**- $N_{\rm R} = \dfrac{d_p}{d}$def noDim_SizeRatio(dp,dc): return dp/dc**Péclet number:**- $N_{\rm Pe} = \dfrac{Ud}{D_m}$def noDim_Péclet(U,dc,Dm): return U*dc/Dm**van der Waals number:**- $N_{\rm vdW} = \dfrac{A}{k_BT}$def noDim_vanderWaals(A): return A/(BOLTZMANN*TEMP)**Gravitational number:**- $N_{\rm gr} = \dfrac{4\pi r_p^4 (\rho_p - \rho_f)g}{3k_BT} = \dfrac{\pi d_p^4 (\rho_p - \rho_f)g}{12k_BT}$def noDim_Gravitational(dp,rhof,rhop): return (PI*(dp**4)*(rhop-rhof)*g)/(12.*BOLTZMANN*TEMP)_________ Attachment rate $k_{\rm att}$ as a function of particle size'''CASE CONSTANTS''' #Porosity theta = 0.35 # adim #Collector diameter dc = 2.0E-3 # 2mm << sand #Hamaker constant A = 5.0E-21 # J = N·m #Water dynamic viscosity viscosity = 0.0008891 # N·s/m² #Densities water_density = 997.05 # kg/m³ particle_density = 1050.0 # kg/m³ #Collision/attachment efficiency alpha = 1.00 # adim << favorable conditions #Darcy flow velocity q = 0.0001 # m/s def kattFromdp(dp): #Molecular diffusion Dm = molecularDiff(viscosity,dp) #Pore water velocity U = poreVel(q,theta) #Non-dimensional numbers As = happelParameter(theta) NR = noDim_SizeRatio(dp,dc) NPe = noDim_Péclet(U,dc,Dm) NvW = noDim_vanderWaals(A) NGr = noDim_Gravitational(dp,water_density,particle_density) #Collector efficiency etaD = collectorEfficiency_Diffusion(As,NR,NPe,NvW) etaI = collectorEfficiency_Interception(As,NR,NPe,NvW) etaG = collectorEfficiency_GDeposition(NGr,NR,NPe,NvW) eta0 = collectorEff(etaD,etaI,etaG) #Attachment rate kAtt = attachmentRate(dc,theta,alpha,q,eta0) return kAttCalculator for a single particle size valuedpSlider = wd.FloatLogSlider(value=100.0E-9,base=10.,min=-9,max=-2,description="Part. size [m]") interact_manual(kattFromdp,dp=dpSlider);Plot particle size v. attachment ratedp = np.power(10.,np.arange(-9,-2,0.2)) dc = 2.0E-3 # 2mm << sand dpVirus = [60.0E-9, 140.0E-9] #Measured sarscov2 dpFloc = 10.0E-6 #A floc? plt.figure(figsize=(6,5),facecolor="white") ax1 = plt.subplot(1,1,1) ## Efficiency 100% alpha = 1.00 K = kattFromdp(dp) ax1.plot(dp,K,lw=3,c="gray",\ label="$\\alpha = $"+"{:.1f}%".format(alpha*100)) ## Different efficiencies alpha = 0.15 K = kattFromdp(dp) ax1.plot(dp,K,lw=3,c="purple",\ label="$\\alpha = $"+"{:.1f}%".format(alpha*100)) alpha = 0.015 K = kattFromdp(dp) ax1.plot(dp,K,lw=3,c="blue",\ label="$\\alpha = $"+"{:.1f}%".format(alpha*100)) ## Vertical lines ax1.axvline(x=dpFloc,\ label="A 10μm floc",\ ls="dashed",lw=1,c="purple") ## Vertical spans ax1.axvspan(dpVirus[0],dpVirus[1],\ label="SARS-CoV-2 size",\ color="orange",alpha=0.10) ## Miscelaneous ax1.set_yscale("log") ax1.set_xscale("log") ax1.set_ylabel("Attachment rate [s$^{-1}$]") ax1.set_xlabel("Particle size [m]") ax1.set_xlim(1.0E-9,5.0E-3) ax1.set_ylim(2.0E-4,2.0E+2) ## Legend ax1.legend(loc="best",\ title="$d_c = $" + "{:.1E} m".format(dc),\ title_fontsize="large") plt.tight_layout() plt.show()Plot collector size v. attachment ratedc = np.power(10.,np.arange(-5,0,0.2)) dp = 100.0E-9 #A sarscov2? plt.figure(figsize=(6,5),facecolor="white") ax1 = plt.subplot(1,1,1) ## Efficiency 100% alpha = 1.00 K = kattFromdp(dp) ax1.plot(dc,K,lw=3,c="gray",\ label="$\\alpha = $"+"{:.1f}%".format(alpha*100)) ## Different efficiencies alpha = 0.15 K = kattFromdp(dp) ax1.plot(dc,K,lw=3,c="purple",\ label="$\\alpha = $"+"{:.1f}%".format(alpha*100)) alpha = 0.015 K = kattFromdp(dp) ax1.plot(dc,K,lw=3,c="blue",\ label="$\\alpha = $"+"{:.1f}%".format(alpha*100)) ## Miscelaneous ax1.set_yscale("log") ax1.set_xscale("log") ax1.set_ylabel("Attachment rate [s$^{-1}$]") ax1.set_xlabel("Collector size [m]") ax1.set_xlim(1.0E-5,1.0E-2) ax1.set_ylim(2.0E-4,2.0E+2) ## Legend ax1.legend(loc="best",\ title="$d_p = $" + "{:.1E} m".format(dp),\ title_fontsize="large") plt.tight_layout() plt.show()Plot collector size v. attachment ratedc = np.power(10.,np.arange(-5,0,0.2)) plt.figure(figsize=(6,5),facecolor="white") ax1 = plt.subplot(1,1,1) ## Individual virus alpha = 1.00 dp = 100.0E-9 #A sarscov2 particle_density=1400 K = kattFromdp(dp) ax1.plot(dc,K,lw=3,c="gray",\ label="Single virus\n $d_p = $" + "{:.1E} m".format(dp)\ + "\n$\\rho_p = $" + " {:.2f} g/cm³".format(particle_density/1000)) dp = 10.0E-6 #A biomass floc particle_density=1050 K = kattFromdp(dp) ax1.plot(dc,K,lw=3,c="purple",\ label="Floc\n $d_p = $" + "{:.1E} m".format(dp)\ + "\n$\\rho_p = $" + " {:.2f} g/cm³".format(particle_density/1000)) ## Individual virus alpha = 0.15 dp = 100.0E-9 #A sarscov2 particle_density=1400 K = kattFromdp(dp) ax1.plot(dc,K,lw=2,c="gray",ls='dashed') dp = 10.0E-6 #A biomass floc particle_density=1050 K = kattFromdp(dp) ax1.plot(dc,K,lw=2,c="purple",ls='dashed') ## Miscelaneous ax1.set_yscale("log") ax1.set_xscale("log") ax1.set_ylabel("Attachment rate [s$^{-1}$]") ax1.set_xlabel("Collector size [m]") ax1.set_xlim(1.0E-5,1.0E-2) ax1.set_ylim(2.0E-4,2.0E+2) ## Legend ax1.legend(loc="best",\ title="Different particle types",\ title_fontsize="large",\ ncol=2) plt.tight_layout() plt.show() dc = np.power(10.,np.arange(-5,0,0.2)) plt.figure(figsize=(6,5),facecolor="white") ax1 = plt.subplot(1,1,1) ## Individual virus alpha = 0.01 #Over SiO2 dp = 100.0E-9 #A sarscov2 particle_density=1400 K = kattFromdp(dp) ax1.plot(dc,K,lw=3,c="darkgreen",\ label="Single virus\n $d_p = $" + "{:.1E} m".format(dp)\ + "\n$\\rho_p = $" + " {:.2f} g/cm³".format(particle_density/1000) + "\n$\\alpha = $" + " {:.1f} %".format(alpha*100)) ## A sand colloid alpha = 0.32 #Check ren & packman - 2000 dp = 137.0E-6 #A #100 sand colloid particle_density=1050 K = kattFromdp(dp) ax1.plot(dc,K,lw=3,c="purple",\ label="Sand colloid\n $d_p = $" + "{:.1E} m".format(dp)\ + "\n$\\rho_p = $" + " {:.2f} g/cm³".format(particle_density/1000) + "\n$\\alpha = $" + " {:.1f} %".format(alpha*100)) ## Miscelaneous ax1.set_yscale("log") ax1.set_xscale("log") ax1.set_ylabel("Attachment rate [s$^{-1}$]") ax1.set_xlabel("Collector size [m]") ax1.set_xlim(1.0E-5,1.0E-2) ax1.set_ylim(2.0E-4,2.0E+2) ## Legend ax1.legend(loc="best",\ title="Different particle types",\ title_fontsize="large",\ ncol=2) plt.tight_layout() plt.show()自然语言处理实战 —— 文本分类文本分类是自然语言处理(NLP)领域的重要研究领域。文本分类指将文本按照一定的分类体系或标准进行分类标记,包括二分类和多分类等。在人工智能浪潮席卷全球的今天,文本分类技术已经被广泛地应用在情感分析、文本审核、广告过滤和反黄识别等 NLP 领域。现阶段的文本分类模型种类繁多,既有机器学习中的朴素贝叶斯模型、SVM 等,也有深度学习中的各种模型,比如经典的 CNN、RNN,以及它们的变形,如 CNN-LSTM 等。本实践首先介绍 ModelArts 的文本分类功能,之后使用 BERT 模型进行文本分类任务——中文文本情感分析。 ModelArts 文本分类功能本部分将介绍通过 ModelArts 的文本分类标注功能:对文本的内容按照标签进行分类处理。登录 ModelArts 管理控制台,在左侧菜单栏中选择`数据标注`,进入`数据集`管理页面。点击`创建数据集`,准备用于数据标注的文本数据。![](./img/data_tagging.png) 准备未标注数据集首先需要在 OBS 中创建一个数据集,后续的操作如标注数据、数据集发布等,都是基于创建和管理的数据集。OBS 链接在这里:https://www.huaweicloud.com/product/obs0.html数据标注功能需要获取访问 OBS 权限,在未进行委托授权之前,无法使用此功能。需要可以在`数据标注`页面,单击`服务授权`,由具备授权的账号`同意授权`后,即可使用。创建用于存储数据的 OBS 桶及文件夹。本实践中桶名设定为`classification-tagging`,**请用户建立新桶并自定义命名,OBS桶名全局唯一,若创建时桶名冲突,请选择其他不冲突桶名**。桶创建成功后,在桶中创建标注输入和标注输出的文件夹,并将用于标注是文本文件上传到输入文件夹中。文本标注文件的要求为:**文件格式要求 txt 或者 csv,文件大小不超过 8M,以换行符作为分隔符,每行数据代表一个标注对象。**在本实践中使用的示例标注文件`text.txt`可以[点此下载](https://modelarts-labs.obs.cn-north-1.myhuaweicloud.com/notebook/DL_nlp_text_classification/text.tar.gz),解压后可上传到输入文件夹中按照本案例步骤使用。在本实践中创建文件夹结构示例如下:```tagging │ ├─input │ └─text.txt └─output```其中- `input` 为文本分类输入文件夹- `text.txt` 为文本分类输入文本文件- `output` 为文本分类输出文件夹创建文本分类任务数据集,如下图所示![](./img/tagging_classification_1.png)注意创建参数- 名称:可自定义数据集名称,本案例中设定为`classification-tagging`- 数据集输入位置:本案例中设定为`/classification-tagging/tagging/input/`- 数据集输出位置:本案例中设定为`/classification-tagging/tagging/output/`- 标注场景:选择`文本`- 标注类型:选择`文本分类`- 添加标签集:可自定义标签名称、个数、颜色。本案例中设定两个分类标签:`正面`标签为红色;`负面`标签为绿色。![](./img/label_color.png)完成以上设定后,点击右下角`创建`。文本分类数据集创建完成后,系统自动跳转至数据集管理页面。![](./img/tagging_classification_2.png)点击数据集名称,进入标注界面。选择未标注对象,点击标签进行标注,如图所示![](./img/tagging_classification_3.png)选择标注对象:`那场比赛易建联打得真好!`,从标签集选择`正面`标签,然后点击下方`保存当前页`进行保存。继续选择其他标注对象,按上述方法进行标注。数据全部标注完成后(本样例中仅提供五条分类文本),点击`已标注`可查看标注结果。![](./img/tagging_classification_4.png)点击`返回数据集`,可以看到数据集已全部标注成功。![](./img/tagging_classification_5.png)针对刚创建的数据集(未发布前),无数据集版本信息,必须执行发布操作后,才能应用于模型开发或训练。点击`发布`,可以编辑版本名称,本案例中为默认`V001`。![](./img/tagging_classification_6.png)发布成功如图所示。![](./img/tagging_classification_7.png)可以查看数据集版本的 “名称”、 “状态”、 “文件总数”、 “已标注文件个数”,并在左侧的 “演进过程”中查看版本的发布时间。随后可以使用标注成功的数据集,标注结果储存在`output`文件夹中。后续 ModelArts 将会上线智能标注功能,相信大家已经体验过第二期实战的图像智能标注,能够快速完成数据标注,节省70%以上的标注时间。智能标注是指基于当前标注阶段的标签及学习训练,选中系统中已有的模型进行智能标注,快速完成剩余数据的标注操作。请持续关注数据标注功能。 进入ModelArts点击如下链接:https://www.huaweicloud.com/product/modelarts.html , 进入ModelArts主页。点击“立即使用”按钮,输入用户名和密码登录,进入ModelArts使用页面。 创建ModelArts notebook下面,我们在ModelArts中创建一个notebook开发环境,ModelArts notebook提供网页版的Python开发环境,可以方便的编写、运行代码,并查看运行结果。第一步:在ModelArts服务主界面依次点击“开发环境”、“创建”![create_nb_create_button](./img/create_nb_create_button.png)第二步:填写notebook所需的参数:| 参数 | 说明 || - - - - - | - - - - - || 计费方式 | 按需计费 || 名称 | Notebook实例名称,如 text_sentiment_analysis || 工作环境 | Python3 || 资源池 | 选择"公共资源池"即可 || 类型 | 本案例使用较为复杂的深度神经网络模型,需要较高算力,选择"GPU" || 规格 | 选择"8核 &124; 64GiB &124; 1*p100" || 存储配置 | 选择EVS,磁盘规格5GB |第三步:配置好notebook参数后,点击下一步,进入notebook信息预览。确认无误后,点击“立即创建”![create_nb_creation_summary](./img/create_nb_creation_summary.png)第四步:创建完成后,返回开发环境主界面,等待Notebook创建完毕后,打开Notebook,进行下一步操作。![modelarts_notebook_index](./img/modelarts_notebook_index.png) 在ModelArts中创建开发环境接下来,我们创建一个实际的开发环境,用于后续的实验步骤。第一步:点击下图所示的“打开”按钮,进入刚刚创建的Notebook![inter_dev_env](img/enter_dev_env.png)第二步:创建一个Python3环境的的Notebook。点击右上角的"New",然后创建TensorFlow 1.13.1开发环境。第三步:点击左上方的文件名"Untitled",并输入一个与本实验相关的名称![notebook_untitled_filename](./img/notebook_untitled_filename.png)![notebook_name_the_ipynb](./img/notebook_name_the_ipynb.png) 在Notebook中编写并执行代码在Notebook中,我们输入一个简单的打印语句,然后点击上方的运行按钮,可以查看语句执行的结果:![run_helloworld](./img/run_helloworld.png) 文本分类——中文文本情感分析文本情感分析是指对带有主观性的观点、喜好、情感等文本进行分析和挖掘。最初的文本情感分析来自对带有情感色彩的词语的分析,例如,“美好”是带有褒义色彩的词语,而“丑陋”是带有贬义色彩的词语。随着互联网上大量的带有情感色彩的主观性文本的出现,研究者们逐渐从简单的情感词语的分析研究扩展到更为复杂的完整情感文本的研究。为了定量表示情感偏向,一般使用0到1之间的一个浮点数给文本打上情感标签,越接近1表示文本的情感越正向,越接近0表示情感越负向。 数据集在本实战中,使用的中文文本分类的数据集来自谭松波老师从某酒店网站上整理的酒店评论数据。数据集共7000多条评论数据,5000多条正向评论,2000多条负向评论。数据格式:| 字段 | label | review | | ---- | ------- | ---------- | | 含义 | 情感标签 | 评论文本 | BERT 模型本实践使用 NLP 领域最新最强大的 **BERT** 模型。中文**BERT-Base,Chinese**预训练模型,可以从链接[BERT-Base, Chinese](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)下载使用。 准备源代码和数据准备案例所需的源代码和数据,相关资源已经保存在 OBS 中,我们通过 ModelArts SDK 将资源下载到本地。from modelarts.session import Session session = Session() if session.region_name == 'cn-north-1': bucket_path = 'modelarts-labs/notebook/DL_nlp_text_classification/text_classification.tar.gz' elif session.region_name == 'cn-north-4': bucket_path = 'modelarts-labs-bj4/notebook/DL_nlp_text_classification/text_classification.tar.gz' else: print("请更换地区到北京一或北京四") session.download_data(bucket_path=bucket_path, path='./text_classification.tar.gz') !ls -laSuccessfully download file modelarts-labs/notebook/DL_nlp_text_classification/text_classification.tar.gz from OBS to local ./text_classification.tar.gz total 374440 drwxrwxrwx 4 ma-user ma-group 4096 Sep 12 10:29 . drwsrwsr-x 22 ma-user ma-group 4096 Sep 12 09:42 .. drwxr-x--- 2 ma-user ma-group 4096 Sep 12 09:39 .ipynb_checkpoints -rw-r----- 1 ma-user ma-group 33828 Sep 12 10:23 text_classification.ipynb -rw-r----- 1 ma-user ma-group 383370868 Sep 12 10:29 text_classification.tar.gz drwx------ 2 ma-user ma-group 4096 Sep 12 10:04 .Trash-1000解压从obs下载的压缩包,解压后删除压缩包。!tar xf ./text_classification.tar.gz !rm ./text_classification.tar.gz !ls -latotal 56 drwxrwxrwx 5 ma-user ma-group 4096 Sep 12 10:29 . drwsrwsr-x 22 ma-user ma-group 4096 Sep 12 09:42 .. drwxr-x--- 2 ma-user ma-group 4096 Sep 12 09:39 .ipynb_checkpoints drwxr-x--- 6 ma-user ma-group 4096 Sep 11 11:28 text_classification -rw-r----- 1 ma-user ma-group 33828 Sep 12 10:23 text_classification.ipynb drwx------ 2 ma-user ma-group 4096 Sep 12 10:04 .Trash-1000导入依赖包import os import re import pandas as pd import tensorflow as tf from tensorflow import keras from sklearn.model_selection import train_test_split from text_classification.bert import modeling, optimization, tokenization tf.logging.set_verbosity(tf.logging.INFO)定义数据和模型路径data_dir = './text_classification/data/' output_dir = './text_classification/output/' vocab_file = './text_classification/chinese_L-12_H-768_A-12/vocab.txt' bert_config_file = './text_classification/chinese_L-12_H-768_A-12/bert_config.json' init_checkpoint = './text_classification/chinese_L-12_H-768_A-12/bert_model.ckpt'设置模型参数batch_size = 64 learning_rate = 2e-5 num_train_epochs = 10 warmup_proportion = 0.1 save_checkpoints_steps = 500 save_summary_steps = 100 max_seq_length = 128 label_list = [0, 1]读取数据集需要获取非倾斜的数据集,使标签的比例基本相等。随机展示训练集样本20条。def get_balance_corpus(corpus_size, corpus_pos, corpus_neg): sample_size = corpus_size // 2 pd_corpus_balance = pd.concat([corpus_pos.sample(sample_size, replace=corpus_pos.shape[0]总评论数:4000 正向评论:2000 负向评论:2000 训练样本示例读取BERT预训练模型中文字典tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=False) tokenizer.tokenize("今天的天气真好!")创建数据输入类class InputExample(object): def __init__(self, guid, text_a, text_b=None, label=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class InputFeatures(object): def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class PaddingInputExample(object): pass DATA_COLUMN = 'review' LABEL_COLUMN = 'label' train_InputExamples = train.apply(lambda x: InputExample(guid=None, text_a = x[DATA_COLUMN], text_b = None, label = x[LABEL_COLUMN]), axis = 1) test_InputExamples = test.apply(lambda x: InputExample(guid=None, text_a = x[DATA_COLUMN], text_b = None, label = x[LABEL_COLUMN]), axis = 1)转换为 BERT 输入向量打印前5个样例文本及其字向量、文本向量、位置向量和标签。def truncate_seq_pair(tokens_a, tokens_b, max_length): while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] tokens = [] segment_ids = [] tokens.append("[CLS]") # 句头添加 [CLS] 标志 segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") # 句尾添加[SEP] 标志 segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** 示例 ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join([tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features train_features = convert_examples_to_features(train_InputExamples, label_list, max_seq_length, tokenizer) test_features = convert_examples_to_features(test_InputExamples, label_list, max_seq_length, tokenizer)INFO:tensorflow:Writing example 0 of 3200 INFO:tensorflow:*** 示例 *** INFO:tensorflow:guid: None INFO:tensorflow:tokens: [CLS] 7 月 16 日 入 住 , 是 在 携 程 买 的 半 自 助 自 由 行 , 所 以 携 程 已 经 处 理 好 所 有 的 文 件 , 只 提 供 了 通 行 证 就 顺 利 入 住 , 非 常 快 , 前 台 服 务 生 也 非 常 客 气 , 很 专 业 。 酒 店 大 堂 有 很 贴 心 的 安 排 , 大 人 在 办 理 入 住 的 时 候 , 孩 子 可 以 在 一 旁 看 迪 士 尼 的 动 画 片 。 酒 店 的 房 间 住 2 个 大 人 2 个 孩 子 都 很 宽 敞 , 到 [SEP] INFO:tensorflow:input_ids: 101 128 3299 8121 3189 1057 857 8024 3221 1762 3025 4923 743 4638 1288 5632 1221 5632 4507 6121 8024 2792 809 3025 4923 2347 5307 1905 4415 1962 2792 3300 4638 3152 816 8024 1372 2990 897 749 6858 6121 6395 2218 7556 1164 1057 857 8024 7478 2382 2571 8024 1184 1378 3302 1218 4495 738 7478 2382 2145 3698 8024 2523 683 689 511 6983 2421 1920 1828 3300 2523 6585 2552 4638 2128 2961 8024 1920 782 1762 1215 4415 1057 857 4638 3198 952 8024 2111 2094 1377 809 1762 671 3178 4692 6832 1894 2225 4638 1220 4514 4275 511 6983 2421 4638 2791 7313 857 123 702 1920 782 123 702 2111 2094 6963 252[...]加载模型参数,构造模型结构bert_config = modeling.BertConfig.from_json_file(bert_config_file) def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps): def model_fn(features, labels, mode, params): input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) use_one_hot_embeddings = False (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = metric_fn(per_example_loss, label_ids, logits, is_real_example) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, eval_metric_ops=eval_metrics) else: output_spec = tf.estimator.EstimatorSpec( mode=mode, predictions={"probabilities": probabilities}) return output_spec return model_fn num_train_steps = int(len(train_features) / batch_size * num_train_epochs) num_warmup_steps = int(num_train_steps * warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), learning_rate=learning_rate, init_checkpoint=init_checkpoint, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps)模型训练def input_fn_builder(features, seq_length, is_training, drop_remainder): all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): batch_size = params["batch_size"] num_examples = len(features) d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn run_config = tf.estimator.RunConfig( model_dir=output_dir, save_summary_steps=save_summary_steps, save_checkpoints_steps=save_checkpoints_steps) estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params={"batch_size": batch_size}) train_input_fn = input_fn_builder( features=train_features, seq_length=max_seq_length, is_training=True, drop_remainder=False) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)INFO:tensorflow:Using config: {'_model_dir': './text_classification/output/', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 500, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1} INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Create CheckpointSaverHook. INFO:tensorflow:Graph was finalized. INFO:tensorf[...]在测试集上测试,评估测试结果eval_input_fn = input_fn_builder( features=test_features, seq_length=max_seq_length, is_training=False, drop_remainder=False) evaluate_info = estimator.evaluate(input_fn=eval_input_fn, steps=None) print("\n打印测试评估指标") for key in evaluate_info: print(key+' : '+str(evaluate_info[key]))INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2019-09-12-02:37:48 INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from ./text_classification/output/model.ckpt-500 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Finished evaluation at 2019-09-12-02:37:53 INFO:tensorflow:Saving dict for global step 500: eval_accuracy = 0.90625, eval_loss = 0.6071, global_step = 500, loss = 0.60426533 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 500: ./text_classification/output/model.ckpt-500在线测试由以上训练得到模型进行在线测试,可以任意输入句子,进行文本情感分析。输入“再见”,结束在线文本情感分析。def getPrediction(in_sentences): labels = ["负面评价", "正面评价"] input_examples = [InputExample(guid="", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, "" is just a dummy label input_features = convert_examples_to_features(input_examples, label_list, max_seq_length, tokenizer) predict_input_fn = input_fn_builder(features=input_features, seq_length=max_seq_length, is_training=False, drop_remainder=False) predictions = estimator.predict(predict_input_fn) for sentence, prediction in zip(in_sentences, predictions): print("\n评论:", sentence) print("得分:", prediction['probabilities']) print("评论情感分析:", labels[int(round(prediction['probabilities'][1]))]) return def sentiment_analysis(): while True: pred_sentences = [input()] if pred_sentences == ["再见"]: print("\n再见") return else: predictions = getPrediction(pred_sentences) print("在线文本情感分析:\n") sentiment_analysis()在线文本情感分析: 前台服务态度很热情Mount#마운트 from google.colab import drive drive.mount('/content/drive')Mounted at /content/driveReading Data# Import the pandas package, then use the "read_csv" function to read # the labeled training data import pandas as pd train = pd.read_csv('/content/drive/MyDrive/word2vec-nlp-tutorial/labeledTrainData.tsv', header=0, delimiter="\t", quoting=3) train.shape #check data shape train.columns.values #colum values train.columns #Now that you've read the training set, take a look at a few reviews: print(train['review'][0])"With all this stuff going down at the moment with MJ i've started listening to his music, watching the odd documentary here and there, watched The Wiz and watched Moonwalker again. Maybe i just want to get a certain insight into this guy who i thought was really cool in the eighties just to maybe make up my mind whether he is guilty or innocent. Moonwalker is part biography, part feature film which i remember going to see at the cinema when it was originally released. Some of it has subtle messages about MJ's feeling towards the press and also the obvious message of drugs are bad m'kay.

Visually impressive but of course this is all about Michael Jackson so unless you remotely like MJ in anyway then you are going to hate this and find it boring. Some may call MJ an egotist for consenting to the making of this movie BUT MJ and most of his fans would say that he made it for the fans which if true is really nice of him.

The actual feature film bit when it finally sta[...]Data Cleaning and Text Preprocessing#Removing HTML Markup: The BeautifulSoup Package !pip install BeautifulSoup4 #$ sudo pip install BeautifulSoup4 # Import BeautifulSoup into your workspace from bs4 import BeautifulSoup # Initialize the BeautifulSoup object on a single movie review example1 = BeautifulSoup(train["review"][0]) # Print the raw review and then the output of get_text(), for # comparison print(train["review"][0]) print(example1.get_text()) #Calling get_text() gives you the text of the review, without tags or markup. #To remove punctuation and numbers, we will use a package for dealing with regular expressions, called re. import re # Use regular expressions to do a find-and-replace letters_only = re.sub("[^a-zA-Z]", # The pattern to search for " ", # The pattern to replace it with example1.get_text() ) # The text to search print(letters_only) #[] indicates group membership and ^ means "not" #the re.sub() statement above says, "Find anything that is NOT a lowercase letter (a-z) or an upper case letter (A-Z), and replace it with a space. #We'll also convert our reviews to lower case and split them into individual words (called "tokenization" in NLP lingo) lower_case = letters_only.lower() # Convert to lower case words = lower_case.split() # Split into words words #Such words are called "stop words"; in English they include words such as "a", "and", "is", and "the". import nltk nltk.download() # Download text data sets, including stop words from nltk.corpus import stopwords # Import the stop word list print(stopwords.words("english")) #This will allow you to view the list of English-language stop words. To remove stop words from our movie review, do: # Remove stop words from "words" words = [w for w in words if not w in stopwords.words("english")] print(words) #Porter Stemming and Lemmatizing (both available in NLTK) would allow us to treat "messages", "message", and "messaging" as the same word, which could certainly be useful. #Now we have code to clean one review - but we need to clean 25,000 training reviews! To make our code reusable, let's create a function that can be called many times. def review_to_words( raw_review ): # Function to convert a raw review to a string of words # The input is a single string (a raw movie review), and # the output is a single string (a preprocessed movie review) # # 1. Remove HTML review_text = BeautifulSoup(raw_review).get_text() # # 2. Remove non-letters letters_only = re.sub("[^a-zA-Z]", " ", review_text) # # 3. Convert to lower case, split into individual words words = letters_only.lower().split() # # 4. In Python, searching a set is much faster than searching # a list, so convert the stop words to a set stops = set(stopwords.words("english")) # # 5. Remove stop words meaningful_words = [w for w in words if not w in stops] # # 6. Join the words back into one string separated by space, # and return the result. return( " ".join( meaningful_words )) #if you call the function for a single review clean_review = review_to_words( train["review"][0] ) print(clean_review) # Now let's loop through and clean all of the training set at once # Get the number of reviews based on the dataframe column size num_reviews = train["review"].size # Initialize an empty list to hold the clean reviews clean_train_reviews = [] # Loop over each review; create an index i that goes from 0 to the length # of the movie review list for i in range( 0, num_reviews ): # Call our function for each one, and add the result to the list of # clean reviews clean_train_reviews.append( review_to_words( train["review"][i] ) ) train["review"].size #Sometimes it can be annoying to wait for a lengthy piece of code to run. #It can be helpful to write code so that it gives status updates. print("Cleaning and parsing the training set movie reviews...\n") clean_train_reviews = [] for i in range( 0, num_reviews ): # If the index is evenly divisible by 1000, print a message if( (i+1)%1000 == 0 ): print("Review %d of %d\n" % ( i+1, num_reviews )) clean_train_reviews.append( review_to_words( train["review"][i] ))Cleaning and parsing the training set movie reviews... Review 1000 of 25000 Review 2000 of 25000 Review 3000 of 25000 Review 4000 of 25000 Review 5000 of 25000 Review 6000 of 25000 Review 7000 of 25000 Review 8000 of 25000 Review 9000 of 25000 Review 10000 of 25000 Review 11000 of 25000 Review 12000 of 25000 Review 13000 of 25000 Review 14000 of 25000 Review 15000 of 25000 Review 16000 of 25000 Review 17000 of 25000 Review 18000 of 25000 Review 19000 of 25000 Review 20000 of 25000 Review 21000 of 25000 Review 22000 of 25000 Review 23000 of 25000 Review 24000 of 25000 Review 25000 of 25000Creating Features from a Bag of Words (Using scikit-learn)#We'll be using the feature_extraction module from scikit-learn to create bag-of-words features. !pip install scikit-learn import sklearn print("Creating the bag of words...\n") from sklearn.feature_extraction.text import CountVectorizer # Initialize the "CountVectorizer" object, which is scikit-learn's # bag of words tool. vectorizer = CountVectorizer(analyzer = "word", \ tokenizer = None, \ preprocessor = None, \ stop_words = None, \ max_features = 5000) # fit_transform() does two functions: First, it fits the model # and learns the vocabulary; second, it transforms our training data # into feature vectors. The input to fit_transform should be a list of # strings. train_data_features = vectorizer.fit_transform(clean_train_reviews) # Numpy arrays are easy to work with, so convert the result to an # array train_data_features = train_data_features.toarray() #To see what the training data array now looks like, do print(train_data_features.shape) #However, we wanted to write our own function for data cleaning in this tutorial to show you how it's done step by step. #Now that the Bag of Words model is trained, let's look at the vocabulary # Take a look at the words in the vocabulary vocab = vectorizer.get_feature_names() print(vocab) #If you're interested, you can also print the counts of each word in the vocabulary import numpy as np # Sum up the counts of each vocabulary word dist = np.sum(train_data_features, axis=0) # For each, print the vocabulary word and the number of times it # appears in the training set for tag, count in zip(vocab, dist): print(count, tag)스트리밍 출력 내용이 길어서 마지막 5000줄이 삭제되었습니다. 187 abandoned 125 abc 108 abilities 454 ability 1259 able 85 abraham 116 absence 83 absent 352 absolute 1485 absolutely 306 absurd 192 abuse 91 abusive 98 abysmal 297 academy 485 accent 203 accents 300 accept 130 acceptable 144 accepted 92 access 318 accident 200 accidentally 88 accompanied 124 accomplished 296 according 186 account 81 accuracy 284 accurate 123 accused 179 achieve 139 achieved 124 achievement 90 acid 971 across 1251 act 658 acted 6490 acting 3354 action 311 actions 83 activities 2389 actor 4486 actors 1219 actress 369 actresses 394 acts 793 actual 4237 actually 148 ad 302 adam 98 adams 453 adaptation 80 adaptations 154 adapted 810 add 439 added 166 adding 347 addition 337 adds 113 adequate 124 admire 621 admit 134 admittedly 101 adorable 510 adult 376 adults 100 advance 90 advanced 153 advantage 510 adventure 204 adventures 91 advertising 259 advice 90 advise 346 affair 93 affect 113 affected 104 afford 126 aforemention[...]Random Forestprint("Training the random forest...") from sklearn.ensemble import RandomForestClassifier # Initialize a Random Forest classifier with 100 trees forest = RandomForestClassifier(n_estimators = 100) # Fit the forest to the training set, using the bag of words as # features and the sentiment labels as the response variable # # This may take a few minutes to run forest = forest.fit( train_data_features, train["sentiment"] )Training the random forest...Creating a Submission# Read the test data test = pd.read_csv("/content/drive/MyDrive/word2vec-nlp-tutorial/testData.tsv", header=0, delimiter="\t", \ quoting=3 ) # Verify that there are 25,000 rows and 2 columns print(test.shape) # Create an empty list and append the clean reviews one by one num_reviews = len(test["review"]) clean_test_reviews = [] print("Cleaning and parsing the test set movie reviews...\n") for i in range(0,num_reviews): if((i+1) % 1000 == 0 ): print("Review %d of %d\n" % (i+1, num_reviews)) clean_review = review_to_words( test["review"][i] ) clean_test_reviews.append( clean_review ) # Get a bag of words for the test set, and convert to a numpy array test_data_features = vectorizer.transform(clean_test_reviews) test_data_features = test_data_features.toarray() # Use the random forest to make sentiment label predictions result = forest.predict(test_data_features) # Copy the results to a pandas dataframe with an "id" column and # a "sentiment" column output = pd.DataFrame( data={"id":test["id"], "sentiment":result} ) # Use pandas to write the comma-separated output file output.to_csv( "Bag_of_Words_model.csv", index=False, quoting=3 )Exercise: GPU performance for fashion mnist dataset This notebook is derived from a tensorflow tutorial here: https://www.tensorflow.org/tutorials/keras/classificationSo please refer to it before starting work on this exercise You need to write code wherever you see `your code goes here` comment. You are going to do image classification for fashion mnist dataset and then you will benchmark the performance of GPU vs CPU for 1 hidden layer and then for 5 hidden layers. You will eventually fill out this table with your performance benchmark numbers| Hidden Layer | CPU | GPU ||:------|:------|:------|| 1 | ? | ? || 5 | ? | ? |# TensorFlow and tf.keras import tensorflow as tf from tensorflow import keras # Helper libraries import numpy as np import matplotlib.pyplot as plt print(tf.__version__) fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images.shape plt.imshow(train_images[0]) train_labels[0] class_names[train_labels[0]] plt.figure(figsize=(3,3)) for i in range(5): plt.imshow(train_images[i]) plt.xlabel(class_names[train_labels[i]]) plt.show() train_images_scaled = train_images / 255.0 test_images_scaled = test_images / 255.0 def get_model(hidden_layers=1): layers = [] # Your code goes here-----------START # Create Flatten input layers # Create hidden layers that are equal to hidden_layers argument in this function # Create output # Your code goes here-----------END model = keras.Sequential(layers) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model model = get_model(1) model.fit(train_images_scaled, train_labels, epochs=5) model.predict(test_images_scaled)[2] test_labels[2] tf.config.experimental.list_physical_devices()5 Epochs performance comparison for 1 hidden layer%%timeit -n1 -r1 with tf.device('/CPU:0'): # your code goes here %%timeit -n1 -r1 with tf.device('/GPU:0'): # your code goes here5 Epocs performance comparison with 5 hidden layers%%timeit -n1 -r1 with tf.device('/CPU:0'): # your code here %%timeit -n1 -r1 with tf.device('/GPU:0'): # your code hereLab 1 Before We BeginRun the following cell of code and type in your cse account credentials. It will download the essential data for our uses. Please do it in Google Collab since they have better internet connection compared to us.Don't worry about your password being revealed as it will be hidden.**Remove the following cell before you submit!! Otherwise, it will not be graded correctly**""" Download neccesary files for sanity check """ username = input("Please enter your username: ") import getpass password = getpass.getpass("Please enter your password: ") url = f'https://{username}:{password}@course.cse.ust.hk/comp2211/labs/Lab1/checking.zip' !wget $url -O checking.zip !unzip checking.zip """ Download CIFAR10 dataset from https://www.cs.toronto.edu/~kriz/cifar.html and extract the tarball """ !wget 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' -O cifar-10-python.tar.gz !tar xf cifar-10-python.tar.gz !rm cifar-10-python.tar.gz """ Download pictures of a present """ !wget 'https://cdn.pixabay.com/photo/2017/11/07/19/23/santa-claus-2927962_960_720.png' -O present.pngArray Transpose and ReshapingIn machine learning, basically all the calculation are done using multi-dimensional arrays. (Just remember this as a fact and you'll see very soon in later labs.) And since each multi-dimension array contains many different values, operations on these array is also a bit more complicated.One of the most common error we would encountered is the mismatching of shapes. We're not talking about the kind of mismatch happening when we trying to broadcast the array, the kind of mismatching usuaslly happens because of the order of the array.To solve this issues, we have mainly two different functions to help us to achieve this goal. ```numpy.ndarray.transpose``` and ```numpy.ndarray.reshape```. While sometimes they might produce similar result, they're actually very different. Let's consider the following two arrays,import numpy as np x = np.arange(6).reshape((2,3)) y = np.arange(6).reshape((3,2)) z = np.arange(6).reshape((3,2)) print(x) print(y.transpose((1,0))) print(z.reshape(2,3))[[0 1 2] [3 4 5]] [[0 2 4] [1 3 5]] [[0 1 2] [3 4 5]]As you can see, while y and z started the same, they ended up being two very different arrays even though they share the same shape. This is because, in mathematical sense, a reshaped matrix is actually a completely different matrix. There is infact no such operations that resemble the functions of ```reshape```. However, transpose is an actual mathematical concepts in linear algebra. In fact, it is simply flipping the matrix diagonally (draw a diagonal line from left top to right bottom, and flip along that axis).print(y) print(y.transpose(1, 0))[[0 1] [2 3] [4 5]] [[0 2 4] [1 3 5]]You can also say transpose is to permutate the order of the dimension of the numpy array. As demonstrated herex = np.arange(8).reshape(2,2,2) print(x[0, :, 0]) # The second axis of x x_1 = x.transpose((1, 0, 2)) # x_1 equals to swapping the first and second axis print(x_1[:, 0, 0]) # The first axis of x_1[[[0 1] [2 3]] [[4 5] [6 7]]] [[[0 1] [4 5]] [[2 3] [6 7]]]More on `reshape`So you might ask, why do we need to reshape the array at all if most of the time it doesn't make any mathematical sense? One of the answers is that sometimes, you might have enough axes. For example, in our review, we mentioned we can add new axis using reshape. The reshape would always be valid as long as the product of each axis is the same as the total amount of elements in the array. For examplex = np.arange(6) x.reshape((2, 3)) # valid x.reshape((6, 1)) # valid x.reshape((2, 3, 1, 1, 1, 1, 1, 1)) # valid, and probably too much axes. x.reshape((2, 3, -1)) # valid. -1 means let numpy decide the size of that axis # x.reshape((3, 3)) # invalid, 3x3=9, but only 6 elements are presented.Part 1: Extracting Images from CIFAR10Let's do some exercise to get familiar with these functions.CIFAR10 is a very famous small curated dataset among CV-related machine learning research. Due to its size and label quality, it is a very good toy examples to be used for testing, prototyping our models. Therefore, it would be nice for us to know how to extract images from it as they do have some bizarre way to store their images.In the ```cifar-10-batches-py```, there are few files and in this lab we're interested in ```data_batch_1```. This file is $\frac{1}{5}$ of the whole dataset which contains 10k images. In the following cell, I already wrote some code to load the data. ```images``` contains the raw pixel data of the images where ```filenames``` contains the filenames of each raw images. Both arrays have the same order and dimension in the first axis.import pickle import matplotlib.pyplot as plt with open('./cifar-10-batches-py/data_batch_1', 'rb') as f: data = pickle.load(f, encoding='bytes') images = data[b'data'] filenames = np.array(data[b'filenames']) data[b'data'].shape from PIL import Image def check_all_pixels(x, y): x = np.array(Image.open(x).convert('RGB')) y = np.array(Image.open(y).convert('RGB')) return np.all(x == y) # Return True if all pixels are the sameTask 1.1: Finding Images with FilenamesNow, using Boolean indexing, please extract the images with name ```b'coupe_s_002178.png'``` and ```b'capreolus_capreolus_s_000073.png'```. **The 'b' in front of the string cannot be ignored** (For details please search ```Byte string``` in Google).""" TODO: Extract the three images from ```images``` where their file name is in [b'coupe_s_002178.png', b'capreolus_capreolus_s_000073.png', b'leptodactylus_pentadactylus_s_000004.png'] and save to the variable ```extracted_images```. Hint: You might use ```np.logical_or``` or ```|``` which are the same. (Please review the review section if you forgot) You might also want to place appropriate parathesis ```()``` to avoid ambiguity if you're using ```|```. """ ### START CODE HERE ### (≈ 1 lines of code) extracted_images = None ### END CODE HERE ### ### Sanity Check ### extracted_images.shape == (3, 3072) # It should have shape (3, 3072)Task 1.2: Reshape and TransposeNow you have extracted raw data of the three images, and we now want to plot it using libraries. However, we can't plot it directly as our libraries expect our image to have a shape of ```(H, W, C)``` where H, W is the height and width of the images respectively and C represent the number of channels. Since we're using RGB to represent our image, ```C=3```.# plt.imshow(extracted_images[0]) # We can't plot it directly! """ TODO: Reshape the array such that it is a 4D array with shape (3, 32, 32, 3) and the image looks the same as the provided samples. (look at the png in the folder) Save to the variable ```reshaped_images```. """ ### YOUR CODE HERE ### (≈ 1 lines of code) reshaped_images = None ### END CODE HERE ### for i, le_image in enumerate(reshaped_images): fig = plt.figure(frameon=False) plt.axis('off') plt.imshow(le_image) plt.savefig(f'./cifar_{i}_submit.png', bbox_inches='tight', pad_inches=0)Task 1.3 Visualization of Transpose""" TODO: Now, try swapping the first and second axis of the image (or second or third axis of the 4d array), how are these images compared to the original images? Save to the variable ```transposed_images```. """ ### START CODE HERE ### (≈ 1 lines of code) transposed_images = None ### END CODE HERE ### for i, le_image in enumerate(transposed_images): fig = plt.figure(frameon=False) plt.axis('off') plt.imshow(le_image) plt.savefig(f'./cifar_{i}_t_submit.png', bbox_inches='tight', pad_inches=0) for i in range(3): isSame = check_all_pixels(f'./cifar_{i}_submit.png', f'cifar_{i}_check.png') print(f'cifar_{i}_submit.png and cifar_{i}_check.png are {("same" if isSame else "different")}') for i in range(3): isSame = check_all_pixels(f'./cifar_{i}_t_submit.png', f'cifar_{i}_t_check.png') print(f'cifar_{i}_t_submit.png and cifar_{i}_t_check.png are {("same" if isSame else "different")}')Task 2 Image Enhancement (Image Augmentation): Contrast AdjustmentIf you have fiddled with your electronics devices for a bit, youg might have heard of contrast adjustment. In short, it's a way to make the image looks dull and sometimes, it can make the image clearer. In this lab, I'm going to walk you through one of the simpler way to adjust contrast.First, let's talk about how an image is represented in multi-dimensional programming.Usually, images are represented in multi-dimensional arrays with shape either like (H, W, C) or (C, H, W) (Which we have shown in previous task). And normally, an Image contains 3 channels. R, G, B. In some sense, you can think as if three images of different channel got smashed and turned into one images. Task 2.1: Grey scaleFor the contrast adjustment, we first need to convert our image to grey scale. To do so, we would calculate the new pixel value like this:$$ 0.299 * R + 0.587 * G + 0.114 * B$$For the purpose of teaching numpy, I would like to ask you to use ```np.dot``` (or ```@```) (But you can also do it on each channel individually). It does matrix multiplication (in linear algebra way instead of elementwise). The shape of the matrices would look like: ```(m, n)@(n, k)=(m, k)```image = np.array(Image.open('present.png').convert('RGB')) plt.imshow(image) """ Save greyscaled version of the image to the variable ```grey_image``` according to the given """ ### START CODE HERE ### (≈ 1 lines of code) grey_image = None ### END CODE HERE ### plt.imshow(grey_image, cmap='gray', vmin=0, vmax=255)Task 2.2 Take RatioLastly, the new pixel value of the adjust image would be:$$ I_{new, ij} = \alpha\times grey_{ij} + (1-\alpha)\times I_{ori, ij}$$where $\alpha\in[0, 1]$ and in this example, we take $\alpha=0.5$(Hint: the operations in this formula are pixle-wise, as hinted by the subscript ${}_{ij}$. You might want to reshape the greyscale image depends on how you code the previous step)alpha = 0.5 ### START CODE HERE ### (≈ 1 lines of code) new_image = None ### END CODE HERE ### plt.axis('off') plt.imshow(new_image.astype(np.uint8)) plt.savefig(f'./present_submit.png', bbox_inches='tight', pad_inches=0) isSame = check_all_pixels('./present_submit.png', './present_check.png') print(f'present_submit.png and present_check.png are {("same" if isSame else "different")}')This is not a very good contrast adjusting algorithm SubmissionPlease export this as ```.py``` file and submit it. Please make sure there's no error in **every cell**. And remove (or comment) every line that start with **```!```**.ZINC system will evaluate your code so no need to submit any .zip or image files.### This is the end of the lab ### Created by for the purposes of teaching COMP2211DPhi Pandas Assignment DetailsDear Learner, Congratulations, you have studied Pandas all the way through the end of this bootcamp!!! This is the final graded hands-on exercise where you will be accomplishing the below mentioned task. Step 1: Select a Dataset of your choice* https://www.kdnuggets.com/datasets/index.html* https://dphi.tech/challenges/past * https://pub.towardsai.net/best-datasets-for-machine-learning-data-science-computer-vision-nlp-ai-c9541058cf4f * https://medium.com/analytics-vidhya/top-100-open-source-datasets-for-data-science-cd5a8d67cc3dor you can pick any dataset of your interest on the internet and add reference to it Evaluation CriteriaYour submission will be evaluated using the following criteria:* You must ensure that the dataset is tabular. * You must ask and answer at least 3 questions about the dataset* Your submission must include explanations using markdown cells, apart from the code.* Your work must not be plagiarized i.e. copied for somewhere else.import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import warnings warnings.filterwarnings("ignore")**INTRODUCTION**:This is the dataset for an online shoppig website. Each entry is for a unique session, independent of the user.A session is a group of user interactions with your website that takes place within a given time frame. A single user can open multiple sessions but not concurrently. Opening multiple pages on different tabs of the same website is counted as a single session. By default, a session ends based on time (after 30mins of inactivity or at midnight or campaign change (if a user arrives via one campaign, leaves, and then comes back via a different campaign)The administrative, informational and product related describe the types of pages of the website. The duration represents the amount of time spent on that particular page.According to Google analytics, bounce rate is the percentage of all sessions on your site in which users viewed only a single page(first page) and triggered only a single request to the Analytics server. Exit Rate is the percentage that were the last in the session, for all pageviews that lead to the page.For example, a user finds their way to a page on your website and then closes the website without clicking any button on it. Here the bounce rate is a 100%. If the user decides to click a button or two more buttons that lead to other pages of the website then the bounce rate for that first page no longer exists. An exit rate would exist for the last page before he closes the website. There is neither a bounce nor exit rate for pages in between. This is not to say that bounce and exit rates are mutually exclusive but it will depend on the number of sessions considered.Page Value is the average value for a page that a user visited before landing on the goal page or completing an Ecommerce transaction (or both). This value is intended to give you an idea of which page in your site contributed more to your site's revenue. If the session does not end up as a revenue, page value will be still be recorded as long as the user makes it to the goal page. Each page has its own pagevalue, if set.All definitions have been gotten from [Google Analytics](https://support.google.com/analytics/?hl=entopic=10737980) **OBJECTIVE**: To understand how the actions in a session affect a user's decision to make a purchase data source: https://s3.us-west-1.wasabisys.com/dphi/datasets/179/training_data.csv?AWSAccessKeyId=&Signature=WfvqqwJ74ua9qD4K4IDignwn6lM%3D&Expires=1645556967from google.colab import filesuploaded = files.upload() Step 2: Perform Data Cleaning/Preparation & Analysis1. Create data frame(s) from CSV files2. Perform index operations on data frame(s)3. Apply Group, merge and aggregate data frame(s)4. Check for missing and invalid values in data5. Propose a way to treat the missing and invalid values and implement the same on the dataframe5. Compute the mean, sum, range and other interesting statistics for numeric columnsshop_int_data = pd.read_csv("Online Shopping data.csv") shop_int_data.head() #understanding the data. shop_int_data.info() RangeIndex: 9864 entries, 0 to 9863 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Administrative 9864 non-null int64 1 Administrative_Duration 9864 non-null float64 2 Informational 9864 non-null int64 3 Informational_Duration 9864 non-null float64 4 ProductRelated 9864 non-null int64 5 ProductRelated_Duration 9864 non-null float64 6 BounceRates 9864 non-null float64 7 ExitRates 9864 non-null float64 8 PageValues 9864 non-null float64 9 SpecialDay 9864 non-null float64 10 Month 9864 non-null object 11 OperatingSystems 9864 non-null int64 12 Browser 9864 non-null int64 13 Region 9864 non-null int64 14 TrafficType [...]From the info, there are no missing values. It is easy to see that there are no duplicated columns. We check for duplicated rows.#we check for duplicated rows as this can cause a bias in our analysis shop_int_data.duplicated().sum() #assign the variable bool_series to the data retaining only the first of duplicates. #False shows that there are not duplicates. bool_series = shop_int_data.duplicated(keep='first') #Take all False and store as the data. View the dataset without the duplicated rows shop_int_data = shop_int_data[~bool_series] #check again for duplicates print(shop_int_data.duplicated().sum(), shop_int_data.shape)0 (9772, 18)After removing the duplicates, we have 9772 rows to work with.Another cleaning is to consider the page views and their duration.We could have said if more than 1 page was viewed then the duration cannot be zero but we did not because it is possible to switch from one page to another within a minute with a fast internet connection. So, we start from two.#if more than two product related pages were viewed then the duration cannot be 0 shop_int_data[(shop_int_data['ProductRelated'] > 2) & (shop_int_data['ProductRelated_Duration'] == 0)].shape[0]There ought to be no rows for the above code just as is the case with the other types of pages.Anyway, it is easy to guess that these sessions did not yield a revenue because the product related page was opened but no further action taken. No event was recorded since the duration is 0.0. All the multiple pages must have been opened on different tabs of the browser and subsequently closed.Therefore, we shall remove this data as it can create a bias.# we drop the rows where more than two product related pages were viewed and the duration was 0. bias_data = shop_int_data[(shop_int_data['ProductRelated'] > 2) & (shop_int_data['ProductRelated_Duration'] == 0)].index shop_int_data.drop(bias_data, inplace = True) shop_int_data.shape[0]Another cleaning to consider is that of the page value. Looking through the dataset, we can see that there is a column for pagevalue. This means that a goal page and its corresponding value must have been set on the website. The ecommerce transaction page would also have a page value which is the price of the transaction.#if revenue equals to 1 then page value cannot be zero shop_int_data[(shop_int_data['Revenue'] == 1) & (shop_int_data['PageValues'] == 0)].shape[0]In the above case, we may want to assume that the pages viewed including the page for ecommerce transaction has no page value set or were not included as a goal page. But this is not the case for this dataset.It is good practice in ecommerce to set a page value for product related and transaction pages. Before deciding on whether to get rid of them, let us confirm if page values are set for a product related page.#a product related page was viewed and a pagevalue was not assigned. shop_int_data[(shop_int_data['ProductRelated'] > 0) & (shop_int_data['PageValues'] == 0)].shape[0]Based on the above result, it appears that there are more unset pagevalues than set pagevalues. Therefore, we shall assume that no page value is set for the product related page. If we are sure that a page value is set (because other sessions have a page value), then page values cannot be zero when there is a revenue and/or when a product related page is viewed (greater than 0). These pages are very important to understand the performance of a website. In any circumstance, the page for the online payment is will always have a page value. Therefore, we shall clean the above data where revenue is 1 and pagevalue is 0 but keep the data where a product related page was viewed and page value is still 0.#assuming a pagevalue is set for the payment page, let us delete the rows where pagevalue is 0 and revenue is 1 because it #is very likely a false revenue. false_revenue1 = shop_int_data[(shop_int_data['PageValues'] == 0) & (shop_int_data['Revenue'] == 1)].index shop_int_data.drop(false_revenue1, inplace = True) shop_int_data.shape[0] #similarly, if revenue is 0 then page value must be zero because the user did not make it to the payment page. false_revenue2 = shop_int_data[(shop_int_data['PageValues'] > 0) & (shop_int_data['Revenue'] == 0)].index shop_int_data.drop(false_revenue2, inplace = True) shop_int_data.shape[0]This brings us to the end of cleaning the data. Now, let us check the info to identify the changes.shop_int_data.info() Int64Index: 8520 entries, 0 to 9863 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Administrative 8520 non-null int64 1 Administrative_Duration 8520 non-null float64 2 Informational 8520 non-null int64 3 Informational_Duration 8520 non-null float64 4 ProductRelated 8520 non-null int64 5 ProductRelated_Duration 8520 non-null float64 6 BounceRates 8520 non-null float64 7 ExitRates 8520 non-null float64 8 PageValues 8520 non-null float64 9 SpecialDay 8520 non-null float64 10 Month 8520 non-null object 11 OperatingSystems 8520 non-null int64 12 Browser 8520 non-null int64 13 Region 8520 non-null int64 14 TrafficType [...]Step 3: Exploration & Inference1. Ask at least 3 interesting questions about your dataset2. Answer the questions by computing the results using Pandas library3. Add concluding remarks **1. How many percentage of the dataset yielded a revenue?**#all rows for positive revenue shop_int_data[shop_int_data['Revenue'] == 1].shape[0] shop_int_data.shape[0] #no of rows with positive revenue times 100 divided by the total no of rows after cleaning ((shop_int_data[shop_int_data['Revenue'] == 1].shape[0]) * 100)/ shop_int_data.shape[0]14.5% of the entire dataset yielded a postive revenue. Some of the positive revenue were cleaned out as false revenue.263 revenue were from New visitors and 11 from Other visitos while the rest (965) were from returning visitors. 86% of the dataset yielded zero revenue. This means we have more data for no revenue than for revenue.Although there are less entries for revenue, most of the analysis will be based on revenue to see how the ecommerce company can further improve on them.#representation of all website visitors visitor_numbers = shop_int_data['VisitorType'].value_counts() plt.title("Piechart of Website's Visitors") visitor_type = ['Returning_Visitor','New_Visitor', 'Other' ] plt.pie(visitor_numbers, labels = visitor_type, autopct='%0.1f%%') print('') plt.show()**2. What region did most visitors come from?**#region of all visitors shop_int_data['Region'].value_counts() #483 buyers from region 1. shop_int_data[(shop_int_data['Revenue']==1)]['Region'].value_counts().head(1)Most of the website visitors are from region 1. And most of the visitors who actually bought are also from region 1. There are 9 regions in total. **3. What is the average time needed for a visitor to decide to make a purchase?**#addition of all durations shop_int_data['total_time'] = shop_int_data.Administrative_Duration + shop_int_data.Informational_Duration + shop_int_data.ProductRelated_Duration shop_int_data.head() #find max, mean, min of total_time where revenue is 1 revenue_time = shop_int_data[(shop_int_data['Revenue']==1)]['total_time'] max(revenue_time), revenue_time.mean(), min(revenue_time)This shows that the minimum duration for a user to decide to make a purchase is 15.6s, the max duration is 23340s and the average duration is 2114s. Spending a longer duration on the website also does not indicate that the user will be convinced to buy. The highest duration spent on this website is 34576.95 and it yielded 0 revenue. **4. What browser and operating system is mostly used? How does this affect navigating the website and making a purchase?**#OS mostly used shop_int_data['OperatingSystems'].value_counts().head(1) #Browser mostly used shop_int_data['Browser'].value_counts().head(1) #displaying the data of the most used browser and OS with positive revenue shop_int_data[(shop_int_data['OperatingSystems']==2) & (shop_int_data['Browser']==2) & (shop_int_data['Revenue']==1)].shape[0] #when revenue is 0 shop_int_data[(shop_int_data['OperatingSystems']==2) & (shop_int_data['Browser']==2) & (shop_int_data['Revenue']==0)].shape[0]There are more zero revenue when browser 2 and OS 2 are used so this does not really affect the decision to make a purchase. However, it might make navigating the website much more easier. **4. Using more desriptive traffic sources, analyse the dataset.** The entries of traffic type for the dataset does not give enough information. Traffic type defines the method or medium by which visitors reach the website.For this analysis and as an assumption, I have decided to group the numbers into 4 groups and assign each group a traffic medium based on Google Analytics:1. 1-5 Direct traffic2. 6-10 Organic referral3. 11-15 Social referral4. 16-20 Paid referralDirect means that they directly used the site's URL link to get to the website. It could be that they have it stored in their browser bookmarks.Organic means they got to the website through a search engine like google, bing.Social means they used a link found on social media like twitter, facebook, etc.Paid means they clicked a sponosred ad used to promote the website.#form the traffic groups and insert into the dataset shop_int_data['TrafficType'].replace(dict.fromkeys([1, 2, 3, 4, 5], 'Direct'), inplace = True) shop_int_data['TrafficType'].replace(dict.fromkeys([6, 7, 8, 9, 10], 'Organic'), inplace = True) shop_int_data['TrafficType'].replace(dict.fromkeys([11, 12, 13, 14, 15], 'Social'), inplace = True) shop_int_data['TrafficType'].replace(dict.fromkeys([16, 17, 18, 19, 20], 'Paid'), inplace = True) shop_int_data.head()The numbers in the traffic column have been replaced by more descriptive traffic sources to a website.#we group the dataset based on traffic type traffic_data = shop_int_data.groupby(['TrafficType']) #This shows the first members of each group traffic_data.first() #we view some members of the organic traffic group traffic_data.get_group('Organic').head()The group is only formed in the traffic_data and does not apply to the main dataset called shop_int_data **Q. Bounce rate is calculated for the pages through which the user enters the website which we assume to be landing page for all sessions. How often did visitors click the wrong link? How does bounce rate affect the revenue?** In reality, bounce rate is calculated when a visitor opens then closes the website. It is calculated for the first page of the website that a visitor gets to. For different users in one session, they will have different first pages depending on how they got to the website.Because this is an ecommerce website and we expect visitors to view more than one page so a high bounce rate of 1 is bad.#find the mean bounce rate shop_int_data['BounceRates'].mean()0.02 is a quite low bounce rate. This means that the people who stayed and continued browsing after the landing page opened are more than those who bounced off for the overall sessions of the dataset.#are there bounce rates of 1 that lead to a revenue? No. shop_int_data[(shop_int_data['BounceRates']==1) & (shop_int_data['Revenue']==1)] #are there bounce rates higher than the mean that leads to a revenue? shop_int_data[(shop_int_data['BounceRates'] > (shop_int_data['BounceRates'].mean())) & (shop_int_data['Revenue']==1)]['TrafficType'].shape[0]As we can see from the result of this code, there are less sessions where the bounce rate was higher than the mean bounce rate and a revenue was made. For bounce rate, we are only concerned about the initial page and how it affects the rest of the user's interaction with the website. How many people entered through this page? Did they leave or did they continue to browse the website? A low rate means they stayed and a high rate means they left.A higher percentage of visitors in the above result showed that they got to the website directly through its link, so they had no reason to leave and consequently made a purchase. **Q) Exit rate is calculated for the pages through which the users leave the website. How often did these clicks make it to the revenue page ?** Assume the exit rate here is for the payment page. Users leave the website through this page to the external payment platform e.g Paypal. So, we want a high exit rate. That the visitors got to the payment page does not guarentee that they will pay. A non zero page value is the only indicator that a payment was made (as we noted during the preparation of the data).#find the mean exit rate for all the sessions. shop_int_data['ExitRates'].mean()A mean of 0.04 shows that less people left through the payment page than those who, lets say, returned to the product related page or to the landing page. **5.) Do you think paid ads were effective for the purpose of special day?**#type of traffic for all special day visit shop_int_data[(shop_int_data['SpecialDay'] > 0)]['TrafficType'].value_counts() #special day purchase based on traffic type shop_int_data[(shop_int_data['SpecialDay'] > 0) & (shop_int_data['Revenue'] == 1)]['TrafficType'].value_counts()Apparently, paid ads had no significant effect on purchases related to special day. A closer analysis shows that over 50% of the website visitors are by direct traffic. This suports the earlier analysis that 85% of the website visitors are Returning Visitors. This means that these visitors already have a direct link to the website. **6.) Import a dataframe of age and sexes. Merge with the shop_int_data dataset and use if for the following analysis.** As an assumption, this dataset will indicate the user for each session. from google.colab import filesuploaded = files.upload()mall_customers = pd.read_csv('Mall_Customers.csv') mall_customers.info() RangeIndex: 200 entries, 0 to 199 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Customer_id 200 non-null int64 1 Gender 200 non-null object 2 Age 200 non-null int64 3 Income 200 non-null int64 4 Spend_score 200 non-null int64 dtypes: int64(4), object(1) memory usage: 7.9+ KBThe annual income is in thousand dollars.Since there are just 200 rows in the user dataset, we will create a smaller version of the shop_int_data dataset then merge with the user dataset.#lets pick 200 rows between 5800 and 7000 of the shop_int_data shop_int_data_short = shop_int_data.loc[5800:6030] #drop unneccsary columns shop_int_data_short.drop(['Administrative', 'Administrative_Duration','Informational', 'Informational_Duration','BounceRates', 'ExitRates', 'PageValues', 'Month', 'OperatingSystems', 'Browser','total_time'], axis =1, inplace=True) shop_int_data_short.columnsThe two dataframes: shop_int_data_short and mall_customers do not have any series in common. i.e no common column(s).So we insert the CustomerID series into the shop_int_data_short dataframe so that they can have a common column and then we use the merge function. By this way, we also get rid of the default index of the shop_int_data_short.#this creates a common column, customerID, for the two datasets shop_int_data_short['Customer_id'] = np.linspace(1,200,200) shop_int_data_short.columns #the datasets get merged and the common column is not repeated users_data = mall_customers.merge(shop_int_data_short) users_data.head()**Q) Which gender made the most purchase and spent more time on the website?**#the number of males and females in the dataset users_data['Gender'].value_counts() #the number of females who made a purchase users_data[(users_data['Gender'] =='Female') & (users_data['Revenue'] == 1)].shape[0] #the number of males who made a purchase users_data[(users_data['Gender'] =='Male') & (users_data['Revenue'] == 1)].shape[0] #the total time spent by females on the website users_data[(users_data['Gender'] =='Female')]['ProductRelated_Duration'].sum() #the total time spent by males on the website users_data[(users_data['Gender'] =='Male')]['ProductRelated_Duration'].sum() #the total time spent by all males who made a purchase users_data[(users_data['Gender'] =='Male') & (users_data['Revenue'] == 1)]['ProductRelated_Duration'].sum() #the total time spent by all females who made a purchase users_data[(users_data['Gender'] =='Female') & (users_data['Revenue'] == 1)]['ProductRelated_Duration'].sum()Equal number of men and women made purchases on the website but men spent more time buying than women.Generally, women spent more time on the website since there are more women than men in this dataset. **Q.) What age grade has the highest income? How does this affect their spending?**max(users_data['Income']), users_data['Income'].mean(), min(users_data['Income']) #details of users who earned above the mean users_data[(users_data['Income'] >= 60.56)].describe() #details of users who earned above the mean and made a purchase users_data[(users_data['Income'] >= 60.56) & (users_data['Revenue'] == 1)].describe() #the income of everyone who made a purchase users_data[users_data['Revenue'] == 1]['Income'].describe()A little over half of the dataset earn above the mean income and made a purchase.In the entire dataset, only 12 people out of 102 with an annual income above the mean made a purchase. This is closely related to those earning below the mean income.Definitely, the annual income has no effect in deciding whether to buy or not.max(users_data['Age']), users_data['Age'].mean(), min(users_data['Age']) #age range of those who earned above the annual income and made a purchase users_data[(users_data['Income'] > 60.56) & (users_data['Revenue'] == 1)]['Age'].describe() #age range of those earned below the annual income and made a purchase users_data[(users_data['Income'] <= 60.56) & (users_data['Revenue'] == 1)]['Age'].describe() #the age range of people who made no purchase users_data[users_data['Revenue'] == 0]['Age'].describe()More people who earned lesser than the mean income made a purchases. Most of them were below the mean age of 38. **Q.) How often were purchases done on weekends?**users_data[(users_data['Revenue'] == 1) & (users_data['Weekend'] == True)].shape[0] users_data[(users_data['Revenue'] == 1) & (users_data['Weekend'] == False)].shape[0]简体中文 | [English](./QAOA_En.ipynb) 准备本文档演示 Paddle Quantum 上量子近似优化算法(QAOA,Quantum Approximate Optimization Algorithm)的工作流程 [1]。开始之前完成准备工作: - 调用飞桨 paddlepaddle - 调用常用的库, 例如画图工具库 networkx 和 matplotlib.pyplot - 调用自定义函数from paddle import fluid import os import numpy as np import matplotlib.pyplot as plt import networkx as nx from numpy import matmul as np_matmul from paddle.complex import matmul as pp_matmul from paddle.complex import transpose from paddle_quantum.circuit import UAnsatz背景量子近似优化算法(QAOA,Quantum Approximate Optimization Algorithm)是可以在近期有噪中等规模(NISQ,Noisy Intermediate-Scale Quantum)量子计算机上运行且具有广泛应用前景的量子算法。例如,QAOA 可以用来处理压缩图信号和二次优化等领域常见的离散组合优化问题。这类优化问题通常可以归结为下面的数学模型: $$F=\max_{z_i\in\{-1,1\}} \sum q_{ij}(1-z_iz_j)=-\min_{z_i\in\{-1,1\}} \sum q_{ij}z_iz_j+ \sum q_{ij}. $$其中, $z_i \in \{-1 ,1\} $ 是待求的二元参数,系数 $q_{ij}$ 是 $z_i z_j$ 的权重 (weight)。一般地,精确求解该问题对于经典计算机是 NP-hard 的,而 QAOA 被认为对近似求解这类困难问题具有潜在速度优势。QAOA 的工作原理是把上述经典优化问题(例如组合优化问题)甚至量子优化问题(例如量子多体系统中 Ising 模型的求解)等价地转化为求解一个物理系统哈密顿量(Hamiltonian)的基态能量(对应优化问题的最优值)及其相应的基态(对应于优化问题的最优解)。在数学形式上,QAOA 等价于求解一个实对角矩阵 $H$ 的最小特征值及其对应的特征向量。和另外一种常见的变分量子特征求解器(VQE, Variational Quantum Eigensolver) 一样,QAOA 也是一种量子-经典混杂算法。 然而 QAOA 参数化量子电路的实现更简单,仅需两个可以调节参数的量子电路模块组成。接下来,我们通过图的最大割问题 (Max-Cut problem)来展示 QAOA 算法的工作流程和原理。 示例 1. Max-Cut 问题图的 Max-Cut 问题可以描述为:对于一个给定的包含 $N$ 个顶点 (nodes or vertices)和 $M$ 条边 (edges) 的无向图,找到一种分割方案将图的顶点集合分割成两个无交子集合 $S$ 和 $S^\prime$,使得连接这两个顶点集合之间边的数目尽可能多。如图所示,我们考虑含4个顶点且具有环形结构的图: ![ring4.png](https://release-data.cdn.bcebos.com/PIC%2FMaxCut.png) Max-Cut 问题建模:在做分割时,若顶点 $i$ 属于集合 $S$ 时,赋值 $z_i=1$;若顶点 $j$ 属于 $S^\prime$ 时,则令 $z_j=-1$。那么对于图的任意连接顶点 $(i, j)$ 的边则满足:若顶点属于同一集合 $S$ 或 $S^\prime$ 时,$z_iz_j=1$; 若顶点分别属于不同集合时,$z_izj=-1$。于是 Max-Cut 问题转化为如下优化问题:$$ F=\min_{z_i\in\{-1, 1\}} z_1 z_2+z_2z_3+z_3z_4+z_4z_1.$$这里所有 $q_{ij}$ 均设置为 1,表示每条边的权重相等。该问题的所有可行解由比特串 $ \boldsymbol{z}=z_1z_2z_3z_4 \in \{-1, 1\}^4$ 组成,而且通常需要遍历所有比特串才能得到问题的最小值(最优解)。容易看出,比特串的数目是顶点数目 $N$ 的指数级别,即 $2^N$。因此,随着顶点数目的增加,搜索的代价也会呈指数级别增加。接下来,我们提供两种方法来预处理编码经典优化问题的图,即如何通过 Paddle Quantum 输入和可视化无权(或带权重)图:- 方法1是通过指定图的顶点和相应的边(及其权重)- 方法2是通过直接输入图的邻接矩阵。def generate_graph(N, GRAPHMETHOD): """ It plots an N-node graph which is specified by Method 1 or 2. Args: N: number of nodes (vertices) in the graph METHOD: choose which method to generate a graph Return: the specific graph and its adjacency matrix """ # Method 1 generates a graph by self-definition if GRAPHMETHOD == 1: print("Method 1 generates the graph from self-definition using EDGE description") graph = nx.Graph() graph_nodelist=range(N) graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0)]) graph_adjacency = nx.to_numpy_matrix(graph, nodelist=graph_nodelist) # Method 2 generates a graph by using its adjacency matrix directly elif GRAPHMETHOD == 2: print("Method 2 generates the graph from networks using adjacency matrix") graph_adjacency = np.array([[0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0]]) graph = nx.Graph(graph_adjacency) else: print("Method doesn't exist ") return graph, graph_adjacency这里指定方法1来预处理图:- 图的顶点数目 $N=4$- 图的输入方法 GRAPHMETHOD = 1 注意:上述两种方法给出的图的顶点均从 $0$ 开始计数。# number of qubits or number of nodes in the graph N=4 classical_graph, classical_graph_adjacency= generate_graph(N, GRAPHMETHOD=1) print(classical_graph_adjacency) pos = nx.circular_layout(classical_graph) nx.draw(classical_graph, pos, width=4, with_labels=True, font_weight='bold') plt.show()2. 编码量子优化问题接下来需要把上述经典优化问题映射为量子优化问题。利用替代关系 $z=1\rightarrow |0\rangle = \begin{bmatrix}1 \\ 0\end{bmatrix}$ 和 $z=-1\rightarrow |1\rangle = \begin{bmatrix}0 \\ 1\end{bmatrix}$, 我们把二元参数 $z_i\in\{-1, 1\}$ 对应为描述量子比特的 Pauli-Z 算符 $Z_i=\begin{bmatrix} 1 & 0\\ 0 & -1\end{bmatrix}$ 的两个本征值。于是经典优化问题里的目标函数相应地编码为一个描述系统哈密顿量的矩阵$$H_{c}= Z_1Z_2+Z_2Z_3+Z_3Z_4+Z_4Z_1.$$其中 $Z_iZ_{j}$ 是 tensor product 运算,表示 Pauli-Z 算符分别作用在量子比特 $i$ 和 $j$ 上,而其余的量子比特上作用单位算符 $I=\begin{bmatrix} 1 & 0\\ 0 & 1\end{bmatrix}$ ,例如 $Z_1Z_2 =Z_1\otimes Z_2\otimes I_3\otimes I_4$。经过上述操作,我们把经典优化问题转化为求解矩阵 $H_{c}$ 的最小特征值 $F$ 及其对应的向量 $|\psi\rangle$, 即$$ F=\min_{|\psi\rangle} \langle \psi| H_c |\psi\rangle.$$这里,$|\psi\rangle$ 记为一个模长为1的 $2^4=16$ 维复向量,$\langle \psi|$ 是其共轭转置。Paddle Quantum 中通过函数 H_generator 完成编码任务:def H_generator(N, adjacency_matrix): """ This function maps the given graph via its adjacency matrix to the corresponding Hamiltiona H_c. Args: N: number of qubits, or number of nodes in the graph, or number of parameters in the classical problem adjacency_matrix: the adjacency matrix generated from the graph encoding the classical problem Return: H_graph: the problem-based Hamiltonian H generated from the graph_adjacency matrix for the given graph H_graph_diag: the real part of the problem-based Hamiltonian H_graph """ sigma_Z = np.array([[1, 0], [0, -1]]) H = np.zeros([2 ** N, 2 ** N]) # Generate the Hamiltonian H_c from the graph via its adjacency matrix for row in range(N): for col in range(N): if abs(adjacency_matrix[N - row - 1, N - col - 1]) and row < col: identity_1 = np.diag(np.ones([2 ** row])) identity_2 = np.diag(np.ones([2 ** (col - row - 1)])) identity_3 = np.diag(np.ones([2 ** (N - col - 1)])) H += adjacency_matrix[N - row - 1, N - col - 1] * np.kron( np.kron(np.kron(np.kron(identity_1, sigma_Z), identity_2), sigma_Z), identity_3, ) H_graph = H.astype("complex64") H_graph_diag = np.diag(H_graph).real return H_graph, H_graph_diag我们可以查看生成矩阵 $H_c $ 的具体形式,并且获取它的特征值信息:_, H_problem_diag = H_generator(N, classical_graph_adjacency) H_graph_max = np.max(H_problem_diag) H_graph_min = np.min(H_problem_diag) print(H_problem_diag) print('H_max:', H_graph_max, ' H_min:', H_graph_min)3. 搭建 QAOA 电路通过交替地摆放两个参数可调的电路模块,我们得以搭建QAOA电路$$U_x(\beta_P)U_c(\gamma_P)\dots U_x(\beta_1)U_c(\gamma_1),$$其中放置的次数记为 $P$。具体地,模块一是由描述问题哈密顿量的矩阵生成,即$$U_c(\gamma)=e^{-i \gamma H_c },$$其中 $i= \sqrt{-1}$ 是虚数单位, $\gamma\in [0, \pi]$ 是可以调节的参数。模块二是$$U_x(\beta)=e^{-i \beta H_x },$$由描述驱动哈密顿量的另一个矩阵生成 $$H_x =X_1+X_2+X_3+X_4. $$$\beta\in [0, \pi]$ 也是一个可调参数,算符 $X=\begin{bmatrix} 0 & 1\\ 1 & 0\end{bmatrix}$ 是作用在量子比特上的 Pauli-X 逻辑门,例如 $X_1$ 实际数学表达式为 $X_1\otimes I_2\otimes I_3\otimes I_4$。QAOA 电路的每一模块可以进一步分解为若干个作用在单比特和两比特上的含参的量子逻辑门,如图所示:![QAOA.png](https://release-data.cdn.bcebos.com/PIC%2FQAOACir.png) 其中,模块 $U_x(\beta)$ 可以分解为在每个量子比特上作用绕 $X$ 方向转动的量子逻辑门 $R_x(\beta)= e^{-i\beta X_j}$,而模块 $U_c(\gamma)$ 则可分解为作用在两比特上的 $ZZ$ 逻辑门 $R_{zz}(\gamma)= e^{-i\gamma Z\otimes Z}$。此外,我们可以设置交叉放置两个模块的次数,记为 QAOA 电路的层数 $P$。于是输入- 量子电路的初始状态- 经典问题的邻接矩阵- 电路比特数目- 电路层数构建标准的 QAOA 量子电路:def circuit_QAOA(theta, input_state, adjacency_matrix, N, P): """ This function constructs the parameterized QAOA circuit which is composed of P layers of two blocks: one block is U_theta[layer][0] based on the problem Hamiltonian H which encodes the classical problem, and the other is U_theta[layer][1] constructed from the driving Hamiltonian describing the rotation around Pauli X acting on each qubit. It finally outputs the final state of the QAOA circuit. Args: theta: parameters to be optimized in the QAOA circuit input_state: initial state of the QAOA circuit which usually is the uniform superposition of 2^N bit-strings in the computataional basis adjacency_matrix: the adjacency matrix of the graph encoding the classical problem N: number of qubits, or equivalently, the number of parameters in the original classical problem P: number of layers of two blocks in the QAOA circuit Returns: the final state of the QAOA circuit: cir.state """ cir = UAnsatz(N, input_state=input_state) # This loop defines the QAOA circuit with P layers of two blocks for layer in range(P): # The second and third loops construct the first block U_theta[layer][0] which involves two-qubit operation # e^{-i\beta Z_iZ_j} acting on a pair of qubits or nodes i and j in the circuit in each layer. for row in range(N): for col in range(N): if abs(adjacency_matrix[row, col]) and row < col: cir.cnot([row + 1, col + 1]) cir.rz( theta=theta[layer][0] * adjacency_matrix[row, col], which_qubit=col + 1, ) cir.cnot([row + 1, col + 1]) # This loop constructs the second block U_theta only involving the single-qubit operation e^{-i\beta X}. for i in range(1, N + 1): cir.rx(theta=theta[layer][1], which_qubit=i) return cir.state在标准 QAOA 的基础上,我们还支持对电路结构进行扩展,进一步探索 QAOA 更多可能性。例如,可以将模块二的驱动哈密顿量 $H_x$ 中的的绕单比特 X 方向转动 $R_x(\beta)$ 扩展为绕任意方向转动,且任意方向等价于依次绕 Z, X, Z 方向转动适当的角度,即$R_z(\beta_1)R_x(\beta_2)R_z(\beta_3)$:def circuit_extend_QAOA(theta, input_state, adjacency_matrix, N, P): """ This is an extended version of the QAOA circuit, and the main difference is U_theta[layer]([1]-[3]) constructed from the driving Hamiltonian describing the rotation around an arbitrary direction on each qubit. Args: theta: parameters to be optimized in the QAOA circuit input_state: input state of the QAOA circuit which usually is the uniform superposition of 2^N bit-strings in the computational basis adjacency_matrix: the adjacency matrix of the problem graph encoding the original problem N: number of qubits, or equivalently, the number of parameters in the original classical problem P: number of layers of two blocks in the QAOA circuit Returns: final state of the QAOA circuit: cir.state Note: If this U_extend_theta function is used to construct QAOA circuit, then we need to change the parameter layer in the Net function defined below from the Net(shape=[D, 2]) for U_theta function to Net(shape=[D, 4]) because the number of parameters doubles in each layer in this QAOA circuit. """ cir = UAnsatz(N, input_state=input_state) for layer in range(P): for row in range(N): for col in range(N): if abs(adjacency_matrix[row, col]) and row < col: cir.cnot([row + 1, col + 1]) cir.rz( theta=theta[layer][0] * adjacency_matrix[row, col], which_qubit=col + 1, ) cir.cnot([row + 1, col + 1]) for i in range(1, N + 1): cir.rz(theta=theta[layer][1], which_qubit=i) cir.rx(theta=theta[layer][2], which_qubit=i) cir.rz(theta=theta[layer][3], which_qubit=i) return cir.state搭建 QAOA 量子电路的工作完成后,此时量子电路的输出状态为$$|\psi(\boldsymbol{\beta},\boldsymbol{\gamma}, P)\rangle=U_x(\beta_P)U_c(\gamma_P)\dots U_x(\beta_1)U_c(\gamma_1)|+\rangle_1\dots|+\rangle_N.$$其中每个量子比特的初始状态处于量子叠加态 $|+\rangle=\frac{1}{\sqrt{2}}\left(|0\rangle+|1\rangle\right)$ 。最终,我们得到量子优化问题的损失函数$$F_P=\min_{\boldsymbol{\beta},\boldsymbol{\gamma}} \langle \psi(\boldsymbol{\beta},\boldsymbol{\gamma}, P)| H_c|\psi(\boldsymbol{\beta},\boldsymbol{\gamma}, P)\rangle.$$因为 QAOA 是一个量子-经典混杂算法,所以搭建完成 QAOA 电路且得到相应的损失函数后,我们可以进一步利用经典的优化算法寻找最优参数 $\boldsymbol{\beta},\boldsymbol{\gamma}$,从而形成一个完整的闭环网络。下面的函数给出了通过 Paddle Quantum 搭建的完整 QAOA 网络:class Net(fluid.dygraph.Layer): """ It constructs the net for QAOA which combines the QAOA circuit with the classical optimizer which sets rules to update parameters described by theta introduced in the QAOA circuit. """ def __init__( self, shape, param_attr=fluid.initializer.Uniform(low=0.0, high=np.pi, seed=1024), dtype="float32", ): super(Net, self).__init__() self.theta = self.create_parameter( shape=shape, attr=param_attr, dtype=dtype, is_bias=False ) def forward(self, input_state, adjacency_matrix, out_state_store, N, P, METHOD): """ This function constructs the loss function for the QAOA circuit. Args: self: the free parameters to be optimized in the QAOA circuit and defined in the above function input_state: initial state of the QAOA circuit which usually is the uniform superposition of 2^N bit-strings in the computational basis $|0\rangle, |1\rangle$ adjacency_matrix: the adjacency matrix generated from the graph encoding the classical problem out_state_store: the output state of the QAOA circuit N: number of qubits P: number of layers METHOD: which version of QAOA is chosen to solve the problem, i.e., standard version labeled by 1 or extended version by 2. Returns: The loss function for the parameterized QAOA circuit. """ # Generate the problem_based quantum Hamiltonian H_problem based on the classical problem in paddle H, _ = H_generator(N, adjacency_matrix) H_problem = fluid.dygraph.to_variable(H) # The standard QAOA circuit: the function circuit_QAOA is used to construct the circuit, indexed by METHOD 1. if METHOD == 1: out_state = circuit_QAOA(self.theta, input_state, adjacency_matrix, N, P) # The extended QAOA circuit: the function circuit_extend_QAOA is used to construct the net, indexed by METHOD 2. elif METHOD == 2: out_state = circuit_extend_QAOA(self.theta, input_state, adjacency_matrix, N, P) else: raise ValueError("Wrong method called!") out_state_store.append(out_state.numpy()) loss = pp_matmul( pp_matmul(out_state, H_problem), transpose( fluid.framework.ComplexVariable(out_state.real, -out_state.imag), perm=[1, 0], ), ) return loss.real4. 训练网络我们开始训练整个 QAOA 网络,即通过优化参数向量 $\boldsymbol{\beta}=(\beta_1,\beta_2,\beta_3,\beta_4)$ 和 $\boldsymbol{\gamma}=(\gamma_1,\gamma_2,\gamma_3, \gamma_4)$ 来达到求解 $H_c$ 最小特征值的目的。与经典机器学习算法一样,首先设置 QAOA 网络里的超参数:- 电路比特数目 N- 电路的层数 P- 迭代次数 ITR- 学习步长 LRN = 4 # number of qubits, or number of nodes in the graph P = 4 # number of layers ITR = 120 # number of iteration steps LR = 0.1 # learning rate然后,灵活调用:- 量子电路初始状态:每个量子比特态处于相干叠加态 $\frac{1}{\sqrt{2}}\left(|0\rangle+|1\rangle\right)$- 采用标准 QAOA (记为 METHOD=1)或者扩展 QAOA (记为 METHOD = 2)- 经典优化器 Adam optimizer 最后,训练模型并保存结果:def Paddle_QAOA(classical_graph_adjacency, N, P, METHOD, ITR, LR): """ This is the core function to run QAOA. Args: classical_graph_adjacency: adjacency matrix to describe the graph which encodes the classical problem N: number of qubits (default value N=4) P: number of layers of blocks in the QAOA circuit (default value P=4) METHOD: which version of the QAOA circuit is used: 1, standard circuit (default); 2, extended circuit ITR: number of iteration steps for QAOA (default value ITR=120) LR: learning rate for the gradient-based optimization method (default value LR=0.1) Returns: optimized parameters theta and the bitstrings sampled from the output state with maximal probability """ out_state_store = [] with fluid.dygraph.guard(): # Preparing the initial state _initial_state = np.ones([1, 2 ** N]).astype("complex64") / np.sqrt(2 ** N) initial_state = fluid.dygraph.to_variable(_initial_state) # Construct the net or QAOA circuits based on the standard modules if METHOD == 1: net = Net(shape=[P, 2]) # Construct the net or QAOA circuits based on the extended modules elif METHOD == 2: net = Net(shape=[P, 4]) else: raise ValueError("Wrong method called!") # Classical optimizer opt = fluid.optimizer.AdamOptimizer(learning_rate=LR, parameter_list=net.parameters()) # Gradient descent loop summary_iter, summary_loss = [], [] for itr in range(1, ITR + 1): loss = net( initial_state, classical_graph_adjacency, out_state_store, N, P, METHOD ) loss.backward() opt.minimize(loss) net.clear_gradients() print("iter:", itr, " loss:", "%.4f" % loss.numpy()) summary_loss.append(loss[0][0].numpy()) summary_iter.append(itr) theta_opt = net.parameters()[0].numpy() print(theta_opt) os.makedirs("output", exist_ok=True) np.savez("./output/summary_data", iter=summary_iter, energy=summary_loss) # Output the measurement probability distribution which is sampled from the output state of optimized QAOA circuit. prob_measure = np.zeros([1, 2 ** N]).astype("complex") rho_out = out_state_store[-1] rho_out = np_matmul(np.conjugate(rho_out).T, rho_out).astype("complex") for index in range(0, 2 ** N): comput_basis = np.zeros([1, 2 ** N]) comput_basis[0][index] = 1 prob_measure[0][index] = np.real(np_matmul(np_matmul(comput_basis, rho_out), comput_basis.T)) return prob_measure调用模型训练结果,输出得到的最优参数向量 $\boldsymbol{\beta}^*$ 和 $\boldsymbol{\gamma}^*$,并且将 QAOA 的输出结果和真实结果进行比较:classical_graph, classical_graph_adjacency = generate_graph(N, 1) prob_measure_dis = Paddle_QAOA(classical_graph_adjacency, N =4, P=4, METHOD=1, ITR=120, LR=0.1) # Load the data of QAOA x1 = np.load('./output/summary_data.npz') H_min = np.ones([len(x1['iter'])]) * H_graph_min # Plot it loss_QAOA, = plt.plot(x1['iter'], x1['energy'], \ alpha=0.7, marker='', linestyle="--", linewidth=2, color='m') benchmark, = plt.plot(x1['iter'], H_min, alpha=0.7, marker='', linestyle=":", linewidth=2, color='b') plt.xlabel('Number of iteration') plt.ylabel('Performance of the loss function for QAOA') plt.legend(handles=[ loss_QAOA, benchmark ], labels=[ r'Loss function $\left\langle {\psi \left( {\bf{\theta }} \right)} ' r'\right|H\left| {\psi \left( {\bf{\theta }} \right)} \right\rangle $', 'The benchmark result', ], loc='best') # Show the picture plt.show()5. 解码量子答案当求得损失函数 $\langle \psi(\boldsymbol{\beta},\boldsymbol{\gamma}, P)| H_{\rm Cut}|\psi(\boldsymbol{\beta},\boldsymbol{\gamma}, P)\rangle$ 的最小值以及相对应的一组参数 $(\boldsymbol{\beta}^*,\boldsymbol{\gamma}^*)$ 后,我们的任务还没有完成。为了进一步求得 Max-Cut 问题的解,需要从 QAOA 输出的量子态 $$|\psi(\boldsymbol{\beta}^*,\boldsymbol{\gamma}^*, P)\rangle=\sum_{i=1}^{2^4}\lambda_i |\boldsymbol{x}_i\rangle$$中解码出经典优化问题的答案。上式中 $\boldsymbol{x}_i=x_1x_2x_3 x_4\in \{0, 1\}^4$,对应着经典问题的一个可行解。物理上,解码量子态需要对量子态进行测量,然后统计测量结果的概率分布: $$ p(\boldsymbol{x})=|\langle \boldsymbol{x}|\psi(\boldsymbol{\beta}^*,\boldsymbol{\gamma}^*,P)\rangle|^2.$$ 某种程度上,某个比特串出现的概率越大,意味着其对应的 Max-Cut 问题最优解的可能性越大。此外,Paddle Quantum 提供了查看 QAOA 量子电路输出状态的测量结果概率分布的函数:prob_measure = prob_measure_dis.flatten() pos = nx.circular_layout(classical_graph) # when N is large, it is not suggested to plot this figure name_list = [np.binary_repr(index, width=N) for index in range(0, 2 ** N)] plt.bar( range(len(np.real(prob_measure))), np.real(prob_measure), width=0.7, tick_label=name_list, ) plt.xticks(rotation=90) plt.show()最后,再次利用参数代换 $|x \rangle\rightarrow z=2x-1\in\{-1, 1\}$,可以从量子答案中解码得到 Max-Cut 问题的可行解。 此时,记 $z_i=-1$ 的顶点属于集合 $S^\prime$ 以及 $z_j=1$ 的顶点属于集合 $S$,这两个顶点集合之间存在的边就是该图的一个可能得最大割方案。 选取测量结果中出现几率最大的比特串,然后将其映射回经典解,并且画出对应的最大割方案:- 蓝色顶点属于集合 $S$- 红色顶点属于集合 $S^\prime$- 折线属于图的一条割线# Find the position of max value in the measure_prob_distribution max_prob_pos_list = np.where(prob_measure == max(prob_measure)) # Store the max value from ndarray to list max_prob_list = max_prob_pos_list[0].tolist() # Change it to the binary format solution_list = [np.binary_repr(index, width=N) for index in max_prob_list] print("The output bitstring:", solution_list) # Draw the graph representing the first bitstring in the solution_list to the MaxCut-like problem head_bitstring = solution_list[0] node_cut = ["blue" if head_bitstring[node] == "1" else "red" for node in classical_graph] edge_cut = [ "solid" if head_bitstring[node_row] == head_bitstring[node_col] else "dashed" for node_row, node_col in classical_graph.edges() ] nx.draw( classical_graph, pos, node_color=node_cut, style=edge_cut, width=4, with_labels=True, font_weight="bold", ) plt.show()Focused Ion Beam Scanning Electron Microscopy Image Segmentation**Summary:**1. SimpleITK supports a large number of filters that facilitate classical segmentation algorithms (variety of thresholding algorithms, watersheds...).2. Once your data is segmented SimpleITK enables you to efficiently post process the segmentation (e.g. label distinct objects, analyze object shapes).This notebook will illustrate the use of SimpleITK for segmentation of bacteria from a 3D Focused Ion Beam Scanning Electron Microscopy (FIB-SEM) image. The specific bacterium is bacillus subtilis, a rod shaped organism naturally found in soil and plants. The bacteria have been subjected to stress to initiate the process of forming an endospore. These endospores can be seen as a generally dark ellipsoid inside the individual bacterium.import SimpleITK as sitk import pandas as pd %matplotlib notebook import matplotlib.pyplot as plt import gui from math import ceil from downloaddata import fetch_data as fdataLoad dataLoad the 3D volume and display it.img = sitk.ReadImage(fdata("fib_sem_bacillus_subtilis.mha")) gui.MultiImageDisplay(image_list = [img], figure_size=(8,4));SegmentationTo allow us to analyze the shape of whole bacteria we first need to segment them. We will do this in several steps:1. Separate the bacteria from the embedding resin background.2. Mark each potential bacterium with a unique label, to evaluate the segmentation.3. Remove small components and fill small holes using binary morphology operators (opening and closing).4. Use seed based watersheds to perform final segmentation.5. Remove bacterium that are connected to the image boundary. Separate the bacteria from the backgroundBased on the visualization of the data above, it intuitively appears that the background and foreground are separable using a single intensity threshold. Our first step towards validating this observation is to plot the intensity distribution.plt.figure() plt.hist(sitk.GetArrayViewFromImage(img).flatten(), bins=100) plt.show()The histogram is bi-modal with a clear separation, which we have manually identified as having an intensity value of 120.We can also use one of several binary threshold selection filters available in SimpleITK.threshold_filters = {'Otsu': sitk.OtsuThresholdImageFilter(), 'Triangle' : sitk.TriangleThresholdImageFilter(), 'Huang' : sitk.HuangThresholdImageFilter(), 'MaxEntropy' : sitk.MaximumEntropyThresholdImageFilter()} filter_selection = 'Manual' try: thresh_filter = threshold_filters[filter_selection] thresh_filter.SetInsideValue(0) thresh_filter.SetOutsideValue(1) thresh_img = thresh_filter.Execute(img) thresh_value = thresh_filter.GetThreshold() except KeyError: thresh_value = 120 thresh_img = img>thresh_value print("Threshold used: " + str(thresh_value)) gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, thresh_img)], title_list = ['Binary Segmentation'], figure_size=(8,4));Mark each potential bacterium with unique label and evaluatestats = sitk.LabelShapeStatisticsImageFilter() stats.Execute(sitk.ConnectedComponent(thresh_img)) # Look at the distribution of sizes of connected components (bacteria). label_sizes = [ stats.GetNumberOfPixels(l) for l in stats.GetLabels() if l != 1] plt.figure() plt.hist(label_sizes,bins=200) plt.title("Distribution of Object Sizes") plt.xlabel("size in pixels") plt.ylabel("number of objects") plt.show()The histogram above shows tens of thousands of very small labels which are not visually detected by looking at the segmentation. Remove small islands and holesUsing binary morphological operations we remove small objects using the opening operation and fill small holes using the closing operation. The use of opening and closing by reconstruction maintains the boundary of the original objects.cleaned_thresh_img = sitk.BinaryOpeningByReconstruction(thresh_img, [10, 10, 10]) cleaned_thresh_img = sitk.BinaryClosingByReconstruction(cleaned_thresh_img, [10, 10, 10]) gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, cleaned_thresh_img)], title_list = ['Cleaned Binary Segmentation'], figure_size=(8,4));Check that the number of objects defined by the binary image is more reasonable.stats = sitk.LabelShapeStatisticsImageFilter() stats.Execute(sitk.ConnectedComponent(cleaned_thresh_img)) # Look at the distribution of sizes of connected components (bacteria). label_sizes = [ stats.GetNumberOfPixels(l) for l in stats.GetLabels() if l != 1] plt.figure() plt.hist(label_sizes,bins=200) plt.title("Distribution of Object Sizes") plt.xlabel("size in pixels") plt.ylabel("number of objects") plt.show()After the morphological operations, our binary image seems to have a reasonable number of objects, but is this true? We next look at the unique objects defined by this binary segmentation (each object is marked with a unique color).gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, sitk.ConnectedComponent(cleaned_thresh_img))], title_list = ['Cleaned Binary Segmentation'],figure_size=(8,4));Seed based watershed segmentationThe bacteria appear to be segmented correctly from the background but not from each other. Using the visualization and histogram above we see that in 3D many of them are connected, even if on a slice by slice inspection they appear separate.dist_img = sitk.SignedMaurerDistanceMap(cleaned_thresh_img != 0, insideIsPositive=False, squaredDistance=False, useImageSpacing=False) radius = 10 # Seeds have a distance of "radius" or more to the object boundary, they are uniquely labelled. seeds = sitk.ConnectedComponent(dist_img < -radius) # Relabel the seed objects using consecutive object labels while removing all objects with less than 15 pixels. seeds = sitk.RelabelComponent(seeds, minimumObjectSize=15) # Run the watershed segmentation using the distance map and seeds. ws = sitk.MorphologicalWatershedFromMarkers(dist_img, seeds, markWatershedLine=True) ws = sitk.Mask( ws, sitk.Cast(cleaned_thresh_img, ws.GetPixelID()))Visualize the distance map, the unique seeds and final object segmentation.gui.MultiImageDisplay(image_list = [dist_img, sitk.LabelOverlay(img, seeds), sitk.LabelOverlay(img, ws)], title_list = ['Segmentation Distance', 'Watershed Seeds', 'Binary Watershed Labeling'], shared_slider=True, horizontal=False, figure_size=(6,12));Removal of objects touching the image boundaryWe are not sure objects touching the image boundary are whole bacteria, so we remove them.# The image has a small black border which we account for here. bgp = sitk.BinaryGrindPeak( (ws!=0)| (img==0)) non_border_seg = sitk.Mask( ws, bgp==0) gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, non_border_seg)], title_list = ['Final Segmentation'],figure_size=(8,4));Object AnalysisOnce we have the segmented objects we look at their shapes and the intensity distributions inside the objects.Note that sizes are in nanometers. ITK and consequently SimpleITK are agnostic of the actual measurement units. It is up to you as the developer to explicitly use the correct units and more importantly, DO NOT MIX UNITS.We first compute all of the measurements we are interested in.shape_stats = sitk.LabelShapeStatisticsImageFilter() shape_stats.ComputeOrientedBoundingBoxOn() shape_stats.Execute(non_border_seg) intensity_stats = sitk.LabelIntensityStatisticsImageFilter() intensity_stats.Execute(non_border_seg,img)Insert the values into a pandas dataframe and display some descriptive statistics.stats_list = [ (shape_stats.GetPhysicalSize(i), shape_stats.GetElongation(i), shape_stats.GetFlatness(i), shape_stats.GetOrientedBoundingBoxSize(i)[0], shape_stats.GetOrientedBoundingBoxSize(i)[2], intensity_stats.GetMean(i), intensity_stats.GetStandardDeviation(i), intensity_stats.GetSkewness(i)) for i in shape_stats.GetLabels()] cols=["Volume (nm^3)", "Elongation", "Flatness", "Oriented Bounding Box Minimum Size(nm)", "Oriented Bounding Box Maximum Size(nm)", "Intensity Mean", "Intensity Standard Deviation", "Intensity Skewness"] # Create the pandas data frame and display descriptive statistics. stats = pd.DataFrame(data=stats_list, index=shape_stats.GetLabels(), columns=cols) stats.describe()Create a plot to investigate the relationship, possible correlations, between volume and object shape characteristics (elongation, flatness, principal moments).fig, axes = plt.subplots(nrows=len(cols), ncols=2, figsize=(6,4*len(cols))) axes[0,0].axis('off') stats.loc[:,cols[0]].plot.hist(ax=axes[0,1], bins=25) axes[0,1].set_xlabel(cols[0]) axes[0,1].xaxis.set_label_position("top") for i in range(1,len(cols)): c = cols[i] bar = stats.loc[:,[c]].plot.hist(ax=axes[i,0], bins=20,orientation='horizontal',legend=False) bar.set_ylabel(stats.loc[:,[c]].columns.values[0]) scatter = stats.plot.scatter(ax=axes[i,1],y=c,x=cols[0]) scatter.set_ylabel('') # Remove axis labels from all plots except the last (they all share the labels) if(iFinally, we visualize a lineup of the bacteria using a coordinate system that is defined by the oriented bounding box enclosing each of them.bacteria_labels = shape_stats.GetLabels() bacteria_volumes = [shape_stats.GetPhysicalSize(label) for label in bacteria_labels] num_images = 5 # number of bacteria images we want to display bacteria_labels_volume_sorted = [label for _,label in sorted(zip(bacteria_volumes, bacteria_labels))] resampler = sitk.ResampleImageFilter() aligned_image_spacing = [10,10,10] #in nanometers for label in bacteria_labels_volume_sorted[0:num_images]: aligned_image_size = [ int(ceil(shape_stats.GetOrientedBoundingBoxSize(label)[i]/aligned_image_spacing[i])) for i in range(3) ] direction_mat = shape_stats.GetOrientedBoundingBoxDirection(label) aligned_image_direction = [direction_mat[0], direction_mat[3], direction_mat[6], direction_mat[1], direction_mat[4], direction_mat[7], direction_mat[2], direction_mat[5], direction_mat[8] ] resampler.SetOutputDirection(aligned_image_direction) resampler.SetOutputOrigin(shape_stats.GetOrientedBoundingBoxOrigin(label)) resampler.SetOutputSpacing(aligned_image_spacing) resampler.SetSize(aligned_image_size) obb_img = resampler.Execute(img) # Change the image axes order so that we have a nice display. obb_img = sitk.PermuteAxes(obb_img,[2,1,0]) gui.MultiImageDisplay(image_list = [obb_img], title_list = ["OBB_{0}".format(label)])Los Alamos Cybersecurity dataset toolsIncludes tools to manipulate dataset storage, as well as load events from said storage.%load_ext pycodestyle_magic %flake8_on --max_line_length 120 --ignore W293,E302 import notebooks_as_modules from collections import OrderedDict from contextlib import contextmanager import dask from dask.delayed import Delayed import dask.dataframe as ddf from dask.distributed import Client, LocalCluster from glob import glob from growing import growing import gzip import igraph as ig import io from jupytest import Suite, Report, Magic, summarize_results, assert_, eq, approx, Explanation, ExplanationOnFailure, \ join_args, fail import numpy as np import os import os.path as op import pandas as pd import re import shutil import sys import time from typing import * # noqa from unittest.mock import patch, Mock, call, MagicMock suite = Suite() if __name__ == "__main__": suite |= Report() suite |= Magic()Dataset chunkingThis dataset is large! To facilitate its processing, it is best to cut its bigger files into *chunks*, which can be processed in parallel. Let's embody the intended file hierarchy of the LANL dataset into a class.@growing class DataStoreLosAlamos: """ Main files making up the Los Alamos Cybersecurity dataset. """ def __init__(self, path: os.PathLike) -> None: self._dir_base = path @property def dir_base(self) -> os.PathLike: return self._dir_base def __dask_tokenize__(self) -> str: return self.dir_baseCutting the raw files into compressed chunks is a long-running computation. Let's structure it so it's run into a compute cluster, when we need it.SIZE_CHUNK = (2 << 25) + (2 << 24) # 96 MB maximum @DataStoreLosAlamos.method(wrapped_in=dask.delayed(pure=True)) def join_chunked(self, stream: str, *p: os.PathLike, size_chunk: int = SIZE_CHUNK) -> os.PathLike: path_stream_chunked = op.join(self.dir_base, "chunked", stream) os.makedirs(path_stream_chunked, exist_ok=True) names_chunk = glob(op.join(path_stream_chunked, "*.txt.gz")) if len(names_chunk) == 0 or any(os.stat(p).st_size == 0 for p in names_chunk): # Raw files have not been chunked yet, or some chunks are corrupted. It's chunking time. with gzip.open(op.join(self.dir_base, f"{stream}.txt.gz"), "rb") as file_raw: for index in range(sys.maxsize): with FileChunk( op.join(self.dir_base, "chunked", stream, f"{index:04d}.txt.gz"), size_chunk ) as file_chunk: for line in file_raw: if not file_chunk.write(line): break else: break # ...out of outer infinite loop. return op.join(path_stream_chunked, *p)**Tests** for method `join_chunked`:T = TypeVar("T") @contextmanager def mocking_global(name: str, value_mock: T) -> ContextManager[T]: must_restore = False G = globals() if name in G: value_orig = G[name] must_restore = True G[name] = value_mock try: yield value_mock finally: if must_restore: G[name] = value_orig else: del G[name] def mock_file_raw(lines: Iterable[str]) -> Mock: mock = Mock() mock.__enter__ = lambda self: self mock.__exit__ = lambda self, t, v, tb: False iter_lines = iter(lines) mock.__iter__ = lambda self: iter_lines return mock @contextmanager def mocking_gzip_open(lines: Iterable[str]) -> ContextManager[Mock]: with patch("gzip.open", return_value=mock_file_raw(lines)) as mock: yield mock def mock_file_chunk(**kwargs: Any) -> Mock: mock = Mock() mock.__enter__ = lambda self: self mock.__exit__ = lambda self, t, v, tb: False mock.write = Mock(**kwargs) return mock @contextmanager def mocking_FileChunk(mocks: Sequence[Mock]) -> ContextManager[Mock]: with mocking_global("FileChunk", Mock(side_effect=mocks)) as mock: yield mock %%test join-chunked/Stop mocks_chunk = [ mock_file_chunk(**kwargs) for kwargs in [dict(side_effect=[True, False]), dict(side_effect=[True, False]), dict(return_value=True)] ] with patch("os.makedirs"), patch("glob.glob", return_value=[]),\ mocking_gzip_open([b"asdf\n", b"qwer\n", b"zxcv\n", b"qwerty\n", b"uiop\n"]) as mock_raw,\ mocking_FileChunk(mocks_chunk) as mock_class: ds = DataStoreLosAlamos("/path/to/data") assert_( eq, actual=ds.join_chunked("dns", "asdf", "qwer", size_chunk=10).compute(scheduler="single-threaded"), expected="/path/to/data/chunked/dns/asdf/qwer" ) mock_class.assert_has_calls( [call(f"/path/to/data/chunked/dns/{i:04d}.txt.gz", 10) for i in range(3)] ) mocks_chunk[0].write.assert_has_calls([call(s) for s in [b"asdf\n", b"qwer\n"]]) mocks_chunk[1].write.assert_has_calls([call(s) for s in [b"zxcv\n", b"qwerty\n"]]) mocks_chunk[2].write.assert_has_calls([call(s) for s in [b"uiop\n"]]) %%test join-chunked/End of raw file corresponds to end of chunk mocks_chunk = [ mock_file_chunk(**kwargs) for kwargs in [dict(side_effect=[True, False]), dict(side_effect=[True, False]), dict(return_value=True)] ] with patch("os.makedirs"), patch("glob.glob", return_value=[]),\ mocking_gzip_open([b"asdf\n", b"qwer\n", b"zxcv\n", b"qwerty\n"]) as mock_raw,\ mocking_FileChunk(mocks_chunk) as mock_class: ds = DataStoreLosAlamos("/path/to/data") assert_( eq, actual=ds.join_chunked("dns", "asdf", "qwer", size_chunk=10).compute(scheduler="single-threaded"), expected="/path/to/data/chunked/dns/asdf/qwer" ) mock_class.assert_has_calls( [call(f"/path/to/data/chunked/dns/{i:04d}.txt.gz", 10) for i in range(3)] ) mocks_chunk[0].write.assert_has_calls([call(s) for s in [b"asdf\n", b"qwer\n"]]) mocks_chunk[1].write.assert_has_calls([call(s) for s in [b"zxcv\n", b"qwerty\n"]]) mocks_chunk[2].write.assert_not_called() %%test join-chunked/Raw file is empty mocks_chunk = [ mock_file_chunk(**kwargs) for kwargs in [dict(side_effect=[True, False]), dict(side_effect=[True, False]), dict(return_value=True)] ] with patch("os.makedirs"), patch("glob.glob", return_value=[]),\ mocking_gzip_open([]) as mock_raw,\ mocking_FileChunk(mocks_chunk) as mock_class: ds = DataStoreLosAlamos("/path/to/data") assert_( eq, actual=ds.join_chunked("dns", "asdf", "qwer").compute(scheduler="single-threaded"), expected="/path/to/data/chunked/dns/asdf/qwer" ) mock_class.assert_called_once_with("/path/to/data/chunked/dns/0000.txt.gz", SIZE_CHUNK) for mock in mocks_chunk: mock.write.assert_not_called()Test join-chunked/Raw file is empty passed.Class `FileChunk` then embodies the creation of a chunk and the transfer of its content into the target file. Note that the algorithm of `join_chunked()` made it so the context of the `FileChunk` instance is entered before we have any content for the chunk; the creation of the file should thus be delayed to a call to method `write()`.class FileChunk: """ Delays the creation of a chunk file until the user commits to writing something in it. """ def __init__(self, path: os.PathLike, limit: int) -> None: self._path = path self._file: Optional[io.RawByteIO] = None self._limit = limit self._size = 0 def __enter__(self) -> "FileChunk": return self def __exit__(self, type_exc, value_exc, tb_exc) -> bool: if self._file is not None: self._file.close() return False def write(self, buf: bytes) -> bool: if self._file is None: self._file = gzip.open(self._path, "wb") index = 0 while index < len(buf): index += self._file.write(buf[index:]) self._size += len(buf) return self._size < self._limit**Tests**:%%test FileChunk/No file created without write with patch("gzip.open") as mock: with FileChunk("asdf", 100) as file_chunk: pass mock.assert_not_called() %%test FileChunk.write/All written in one single underlying write bytes_written = io.BytesIO() with patch("gzip.open", return_value=bytes_written): with FileChunk("asdf", 100) as file_chunk: assert file_chunk.write(b"qwerty\n") assert_(eq, actual=bytes_written.getvalue(), expected=b"qwerty\n") %%test FileChunk.write/Multiple underlying writes needed with patch("gzip.GzipFile") as mock: mock.return_value.write = Mock(side_effect=[3, 4]) with FileChunk("asdf", 100) as file_chunk: assert file_chunk.write(b"qwerty\n") mock.return_value.write.assert_has_calls([call(b'qwerty\n',), call(b'rty\n',)]) def test_chunk_filling(last: bytes) -> None: bytes_written = io.BytesIO() with patch("gzip.open", return_value=bytes_written): with FileChunk("asdf", 25) as file_chunk: assert file_chunk.write(b"asdf\nqwer\n") assert file_chunk.write(b"zxcv\n") assert file_chunk.write(b"uiop\n") assert not file_chunk.write(last) assert_(eq, actual=bytes_written.getvalue(), expected=b"asdf\nqwer\nzxcv\nuiop\n" + last) for adverb, last in [("exactly", b"1234\n"), ("beyond", b"1234567890\n")]: suite.test( test_chunk_filling, args=(last,), name=f"FileChunk.write/Return False once chunk once {adverb} full" )Test FileChunk.write/Return False once chunk once exactly full passed. Test FileChunk.write/Return False once chunk once beyond full passed.Experiments repository@DataStoreLosAlamos.method def join_experiments(self, *p: os.PathLike) -> os.PathLike: return op.join(self.dir_base, "experiments", *p)Loading a data stream into a Dask dataframeDask dataframes easily leverage the chunking of the streams that we have wrought. However, given how the chunks are already sorted, one gets the best benefits from these by supplying knowledge of the *divisions* of the index key (here, time) across the partitions. This is why we implement a custom dataframe loading that quickly extracts the division knowledge. Event timestampsThe Los Alamos Cybersecurity dataset has been captured over a period of two months, but the exact dates are unknown; the timestamps provided in the dataset start at 0. While mapping these directly to timestamps would yield funny 1970's dates to events, we rather choose a more modern setting. Given the late-2015 moment the dataset was released, we shall assume the acquisition ran from January 1st, 2015, to February 27th.@growing class Time: START = pd.Timestamp("2015-01-01T00:00:00") END = pd.Timestamp("2015-02-28T00:00:00") - pd.Timedelta(nanoseconds=1) Time.END def seconds2ts(n: str) -> pd.Timestamp: return Time.START + pd.Timedelta(seconds=int(n)) %%test Timestamp mapping assert_(eq, actual=seconds2ts("3600"), expected=pd.Timestamp("2015-01-01T01:00:00"))Test Timestamp mapping passed.Data schemasThe `SCHEMAS` dictionary describe the columns for each of the four main data streams, in addition to the label array stored in `redteam.txt.gz`. Each stream is sorted (and thus indexable) by its `time` column, which is omitted from the schema descriptions to facilitate the usage of the schema objects.SCHEMA = Sequence[Tuple[str, str]] SCHEMAS: Mapping[str, SCHEMA] = { "dns": [ ("host_focus", "object"), ("host_resolved", "object") ], "flows": [ ("duration", "int64"), ("host_focus", "object"), ("port_focus", "object"), ("host_server", "object"), ("port_server", "object"), ("protocol", "category"), ("num_packets", "int32"), ("num_bytes", "int64") ], "proc": [ ("userdomain_focus", "object"), ("host_focus", "object"), ("process", "object"), ("action", "category") ], "auth": [ ("userdomain_init", "object"), ("userdomain_focus", "object"), ("host_init", "object"), ("host_focus", "object"), ("auth", "category"), ("logon", "category"), ("direction", "category"), ("result", "category") ], "redteam": [ ("userdomain_focus", "object"), ("host_init", "object"), ("host_focus", "object") ] }Figuring out which stream a file isRXS_NAMES_STREAM = r"(" + "|".join(SCHEMAS.keys()) + ")" RX_PATH2STREAM = re.compile(r"/" + RXS_NAMES_STREAM + r"/|" + RXS_NAMES_STREAM + r"\.txt\.gz") def path2stream(path: os.PathLike) -> str: m = re.search(RX_PATH2STREAM, path) if m is None: return "" return m.group(1) or m.group(2) %%test Stream name for a raw file assert_(eq, actual=path2stream("/data/lanl/redteam.txt.gz"), expected="redteam") %%test Stream name for a chunk file assert_(eq, actual=path2stream("/data/lanl/chunked/auth/0034.txt.gz"), expected="auth") %%test Stream name for a stream-specific processing result (not chunking) assert_(eq, actual=path2stream("/data/lanl/experiments/asdf/qwer/proc/zxcv"), expected="proc") %%test Path with no stream name assert not path2stream("/data/lanl/wtf")Test Path with no stream name passed.Getting the first timestamp of a LANL filedef get_timestamp_lower(path: os.PathLike) -> pd.Timestamp: with gzip.open(path, "rb") as file: line1 = next(file) # Assumption: no file empty. num_seconds, *_ = line1.split(b",") return seconds2ts(num_seconds) %%test First timestamp for line of a DNS stream file with patch("gzip.open", return_value=io.BytesIO(b"90842,C326,C89\n")): assert_(eq, actual=get_timestamp_lower("asdf"), expected=pd.Timestamp("2015-01-02T01:14:02")) %%test First timestamp for line of a flows stream file with patch("gzip.open", return_value=io.BytesIO(b"2957021,2,C347,50234,C812,443,https,12,15723\nqwerty\n")): assert_(eq, actual=get_timestamp_lower("asdf"), expected=pd.Timestamp("2015-02-04T05:23:41"))Test First timestamp for line of a flows stream file passed.Loading a LANL CSV filedef read_lanl_csv(path: os.PathLike, **kwargs: Any) -> pd.DataFrame: stream = path2stream(path) if not stream: raise ValueError(f"Path {path} does not involve a LANL data stream.") schema = SCHEMAS[stream] return pd.read_csv( path, header=None, names=["time"] + [attr for attr, _ in schema], dtype=dict(schema), parse_dates=["time"], date_parser=seconds2ts, index_col="time", compression="gzip", **kwargs ) @contextmanager def dummy_proc_content() -> ContextManager[os.PathLike]: content = b"""\ 3,C3@DOM1,C4,P2,Start 18,C89@DOM1,C23,P78,Start 29,C14@DOM1,C90,P123,Start 53,C90@DOM1,C34,P23,End """ with patch("gzip.builtins.open", return_value=io.BytesIO(gzip.compress(content))): yield "/path/with/proc/" %%test Reading LANL content with dummy_proc_content() as path: df = read_lanl_csv(path) assert_(eq, actual=len(df), expected=4) assert_(eq, actual=len(df.columns), expected=4) assert_(eq, actual=df.index.dtype, expected=np.dtype("datetime64[ns]")) assert_( eq, actual={c: str(dt) for c, dt in df.dtypes.items()}, expected={ "userdomain_focus": "object", "host_focus": "object", "process": "object", "action": "category" } )Test Reading LANL content passed.Putting it all together into a Dask dataframe@DataStoreLosAlamos.method def get_stream(self, name: str) -> ddf.DataFrame: paths_chunk = sorted(glob(self.join_chunked(name, "*.txt.gz").compute())) divisions = [get_timestamp_lower(path) for path in paths_chunk] + [Time.END] schema = dict(SCHEMAS[name]) return ddf.from_delayed( [dask.delayed(read_lanl_csv)(path) for path in paths_chunk], meta=pd.DataFrame(columns=schema.keys(), index=pd.DatetimeIndex([], name="time")).astype(schema), divisions=divisions, prefix="load_chunk", verify_meta=False ) %%test Stream dataframe coherence ds = DataStoreLosAlamos("/lanl") indices = [3, 2, 4, 0, 1] with patch("__main__.glob", side_effect=[[f"/lanl/flows/{n:04d}.txt.gz" for n in indices]]),\ patch.object(ds, "join_chunked", new=MagicMock()),\ patch( "__main__.get_timestamp_lower", side_effect=[pd.Timestamp(s) for s in [ "2015-01-01T00:00:04", "2015-01-11T12:45:32", "2015-01-27T18:19:19", "2015-02-12T14:10:23", "2015-02-23T18:02:38" ]] ): df = ds.get_stream("flows") assert_(eq, actual=df.npartitions, expected=5) assert_( eq, actual=list(df.index.divisions), expected=[pd.Timestamp(s) for s in [ "2015-01-01T00:00:04", "2015-01-11T12:45:32", "2015-01-27T18:19:19", "2015-02-12T14:10:23", "2015-02-23T18:02:38", "2015-02-27T23:59:59.999999999" ]] ) assert_(eq, actual=dict(df.dtypes), expected=dict(SCHEMAS["flows"])) @suite.test(name="Resolving a complete dataframe") def resolving_whole_dataframe(): all_events = [ b"""\ 63,U34@DOM1,U23@DOM1,C98,C98,Kerberos,Network,LogOn,Success 91,U67@DOM1,SYSTEM@C89,C89,C89,Negotiate,Service,LogOn,Success """, b"""\ 304,U45@DOM1,U45@DOM1,C234,C329,Kerberos,Network,LogOff,Success 897,U93@DOM1,U93@DOM1,C123,C123,Kerberos,Network,LogOn,Success """, b"""\ 956,U93@DOM1,U93@DOM1,C123,C123,Kerberos,Network,LogOff,Success 3456,U67@DOM1,U45@DOM1,C89,C329,Kerberos,Network,LogOn,Failure 4127,U980@DOM1,U980@DOM1,C23,C32,Kerberos,Service,LogOn,Success """ ] map_content = { op.join("/lanl", "chunked", "auth", f"{n:02d}.txt.gz"): content for n, content in enumerate(all_events) } def grab_content(path: os.PathLike, *args, **kwargs) -> io.RawIOBase: return io.BytesIO(gzip.compress(map_content[path])) ds = DataStoreLosAlamos("asdf") with patch.object(ds, "join_chunked", new=MagicMock()),\ patch("__main__.glob", side_effect=[list(map_content.keys())]),\ patch("gzip.builtins.open", side_effect=grab_content): df = ds.get_stream("auth") assert_(eq, actual=df.npartitions, expected=3) assert_( eq, actual=list(df.divisions), expected=[pd.Timestamp(s) for s in [ "2015-01-01T00:01:03", "2015-01-01T00:05:04", "2015-01-01T00:15:56" ]] + [Time.END] ) df_realized = df.compute() assert_(eq, actual=dict(df_realized.dtypes), expected=dict(SCHEMAS["auth"])) for line, ts_and_row in zip(b"".join(all_events).split(b"\n"), df_realized.iterrows()): num_seconds_expected, *cols_expected = line.split(b",") ts_expected = seconds2ts(num_seconds_expected) ts_obtained, row_obtained = ts_and_row assert_(eq, expected=ts_expected, obtained=ts_obtained) assert_(eq, expected=[str(c, encoding="utf-8") for c in cols_expected], obtained=list(row_obtained))Test Resolving a complete dataframe passed.Provide all streams in one query@DataStoreLosAlamos.method def streams(self) -> Mapping[str, ddf.DataFrame]: # First ensure all these streams have been chunked; leverage parallel cluster computation. streams = ["auth", "dns", "flows", "proc"] persisted = [self.join_chunked(name).persist() for name in streams] dask.compute(persisted) del persisted return {name: self.get_stream(name) for name in streams} @suite.test def gathering_all_streams(): stream_delay: Mapping[str, float] = { "auth": 4.0, "dns": 0.5, "flows": 1.0, "proc": 2.0 } longest = max(stream_delay.values()) @dask.delayed def mock_join_chunked(name: str) -> os.PathLike: time.sleep(stream_delay[name]) return name # Unused def mock_get_stream(name: str) -> Tuple[str]: return (name,) cluster = LocalCluster(n_workers=4, threads_per_worker=1, dashboard_address=None) client = Client(cluster) try: ds = DataStoreLosAlamos("dummy") with patch.object(ds, "join_chunked", new=Mock(side_effect=mock_join_chunked)),\ patch.object(ds, "get_stream", side_effect=mock_get_stream): tic = time.time() assert_(eq, actual=ds.streams(), expected={name: (name,) for name in ["auth", "dns", "flows", "proc"]}) toc = time.time() assert_(approx(longest, 0.1), toc - tic) finally: client.close() cluster.close()Test gathering_all_streams passed.Combination of all four streams in a single streamThis is useful for software that needs to consider the heterogeneous telemetry as a single homogeneous stream.def combine_streams( ds: DataStoreLosAlamos, streams: Sequence[str] = [], start: Optional[pd.Timestamp] = None, end: Optional[pd.Timestamp] = None, ) -> ddf.DataFrame: if not streams: streams = ["auth", "dns", "flows", "proc"] to_concat = [] for name in streams: df = ds.get_stream(name)[(start or Time.START):(end or Time.END)] df["stream"] = name to_concat.append(df) return ddf.concat(to_concat, interleave_partitions=True) def dataframes_equal(**dfs: pd.DataFrame) -> ExplanationOnFailure: left, right = dfs.values() if not left.equals(right): return Explanation("The two dataframes are not equal", join_args([], dfs)) return True import numpy as np import pandas as pd class MockLANL(DataStoreLosAlamos): def __init__(self): super().__init__("dummy") def get_stream(self, name: str) -> ddf.DataFrame: return ddf.from_pandas( pd.DataFrame( data={ "auth": { "index": [pd.Timestamp("2015-01-05T13:00:05")], "userdomain_init": ["U3@DOM1"], "userdomain_focus": ["U3@DOM1"], "host_init": ["C328"], "host_focus": ["C345"], "auth": ["NTLM"], "logon": ["?"], "direction": ["LogOn"], "result": ["Success"] }, "proc": { "index": [pd.Timestamp("2015-01-05T13:00:06"), pd.Timestamp("2015-01-05T13:00:32")], "userdomain_focus": ["U4@DOM1", "U3@DOM1"], "host_focus": ["C45", "C45"], "process": ["P3254", "P129"], "action": ["Start", "End"] }, "flows": { "index": [pd.Timestamp("2015-01-05T13:00:03")], "duration": [3], "host_focus": ["C89"], "port_focus": ["N435"], "host_server": ["C2390"], "port_server": ["443"], "protocol": ["3"], "num_packets": [10], "num_bytes": [1454] }, "dns": { "index": [pd.Timestamp("2015-01-05T13:00:03"), pd.Timestamp("2015-01-05T13:00:24")], "host_focus": ["C89", "C234"], "host_resolved": ["C2390", "C123"] } }[name] ).set_index("index"), npartitions=1 ) def test_df(data: Mapping[str, Sequence]) -> pd.DataFrame: df = pd.DataFrame(data=data) map_types = { "category": "object", "int64": "float64", "int32": "float64" } dict_dtype = {k: map_types.get(v, v) for k, v in set(sum(list(SCHEMAS.values()), [])) if k in df.columns} return df.astype(dict_dtype).set_index("time").sort_index() %%test Combination of selected streams assert_( dataframes_equal, expected=test_df({ "time": [pd.Timestamp(s) for s in ["2015-01-05T13:00:03", "2015-01-05T13:00:03", "2015-01-05T13:00:24"]], "host_focus": ["C89", "C89", "C234"], "host_resolved": ["C2390", np.nan, "C123"], "stream": ["dns", "flows", "dns"], "duration": [np.nan, 3, np.nan], "port_focus": [np.nan, "N435", np.nan], "host_server": [np.nan, "C2390", np.nan], "port_server": [np.nan, "443", np.nan], "protocol": [np.nan, "3", np.nan], "num_packets": [np.nan, 10, np.nan], "num_bytes": [np.nan, 1454, np.nan] }), combination=combine_streams(MockLANL(), ["dns", "flows"]).compute().sort_index() ) %%test Combination of all streams assert_( dataframes_equal, expected=test_df({ "time": [pd.Timestamp(s) for s in [ "2015-01-05T13:00:05", "2015-01-05T13:00:06", "2015-01-05T13:00:32", "2015-01-05T13:00:03", "2015-01-05T13:00:24", "2015-01-05T13:00:03" ]], "userdomain_init": ["U3@DOM1", np.nan, np.nan, np.nan, np.nan, np.nan], "userdomain_focus": ["U3@DOM1", "U4@DOM1", "U3@DOM1", np.nan, np.nan, np.nan], "host_init": ["C328", np.nan, np.nan, np.nan, np.nan, np.nan], "host_focus": ["C345", "C45", "C45", "C89", "C234", "C89"], "auth": ["NTLM", np.nan, np.nan, np.nan, np.nan, np.nan], "logon": ["?", np.nan, np.nan, np.nan, np.nan, np.nan], "direction": ["LogOn", np.nan, np.nan, np.nan, np.nan, np.nan], "result": ["Success", np.nan, np.nan, np.nan, np.nan, np.nan], "stream": ["auth", "proc", "proc", "dns", "dns", "flows"], "host_resolved": [np.nan, np.nan, np.nan, "C2390", "C123", np.nan], "duration": [np.nan, np.nan, np.nan, np.nan, np.nan, 3], "port_focus": [np.nan, np.nan, np.nan, np.nan, np.nan, "N435"], "host_server": [np.nan, np.nan, np.nan, np.nan, np.nan, "C2390"], "port_server": [np.nan, np.nan, np.nan, np.nan, np.nan, "443"], "protocol": [np.nan, np.nan, np.nan, np.nan, np.nan, "3"], "num_packets": [np.nan, np.nan, np.nan, np.nan, np.nan, 10], "num_bytes": [np.nan, np.nan, np.nan, np.nan, np.nan, 1454], "process": [np.nan, "P3254", "P129", np.nan, np.nan, np.nan], "action": [np.nan, "Start", "End", np.nan, np.nan, np.nan] }), combination=combine_streams(MockLANL()).compute().sort_index() ) %%test Combination of all streams beyond a timestamp assert_( dataframes_equal, expected=test_df({ "time": [pd.Timestamp(s) for s in [ "2015-01-05T13:00:32", "2015-01-05T13:00:24" ]], "userdomain_init": [np.nan, np.nan], "userdomain_focus": ["U3@DOM1", np.nan], "host_init": [np.nan, np.nan], "host_focus": ["C45", "C234"], "auth": [np.nan, np.nan], "logon": [np.nan, np.nan], "direction": [np.nan, np.nan], "result": [np.nan, np.nan], "stream": ["proc", "dns"], "host_resolved": [np.nan, "C123"], "duration": [np.nan, np.nan], "port_focus": [np.nan, np.nan], "host_server": [np.nan, np.nan], "port_server": [np.nan, np.nan], "protocol": [np.nan, np.nan], "num_packets": [np.nan, np.nan], "num_bytes": [np.nan, np.nan], "process": ["P129", np.nan], "action": ["End", np.nan] }), combination=combine_streams(MockLANL(), start=pd.Timestamp("2015-01-05T13:00:10")).compute().sort_index() ) %%test Combination of all streams prior to a timestamp assert_( dataframes_equal, expected=test_df({ "time": [pd.Timestamp(s) for s in [ "2015-01-05T13:00:05", "2015-01-05T13:00:03", "2015-01-05T13:00:03" ]], "userdomain_init": ["U3@DOM1", np.nan, np.nan], "userdomain_focus": ["U3@DOM1", np.nan, np.nan], "host_init": ["C328", np.nan, np.nan], "host_focus": ["C345", "C89", "C89"], "auth": ["NTLM", np.nan, np.nan], "logon": ["?", np.nan, np.nan], "direction": ["LogOn", np.nan, np.nan], "result": ["Success", np.nan, np.nan], "stream": ["auth", "dns", "flows"], "host_resolved": [np.nan, "C2390", np.nan], "duration": [np.nan, np.nan, 3], "port_focus": [np.nan, np.nan, "N435"], "host_server": [np.nan, np.nan, "C2390"], "port_server": [np.nan, np.nan, "443"], "protocol": [np.nan, np.nan, "3"], "num_packets": [np.nan, np.nan, 10], "num_bytes": [np.nan, np.nan, 1454], "process": [np.nan, np.nan, np.nan], "action": [np.nan, np.nan, np.nan] }), combination=combine_streams(MockLANL(), end=pd.Timestamp("2015-01-05T13:00:05")).compute().sort_index() ) %%test Combination of all streams within a time interval assert_( dataframes_equal, expected=test_df({ "time": [pd.Timestamp(s) for s in [ "2015-01-05T13:00:05", "2015-01-05T13:00:06" ]], "userdomain_init": ["U3@DOM1", np.nan], "userdomain_focus": ["U3@DOM1", "U4@DOM1"], "host_init": ["C328", np.nan], "host_focus": ["C345", "C45"], "auth": ["NTLM", np.nan], "logon": ["?", np.nan], "direction": ["LogOn", np.nan], "result": ["Success", np.nan], "stream": ["auth", "proc"], "host_resolved": [np.nan, np.nan], "duration": [np.nan, np.nan], "port_focus": [np.nan, np.nan], "host_server": [np.nan, np.nan], "port_server": [np.nan, np.nan], "protocol": [np.nan, np.nan], "num_packets": [np.nan, np.nan], "num_bytes": [np.nan, np.nan], "process": [np.nan, "P3254"], "action": [np.nan, "Start"] }), combination=combine_streams( MockLANL(), start=pd.Timestamp("2015-01-05T13:00:05"), end=pd.Timestamp("2015-01-05T13:00:10") ).compute().sort_index() )Test Combination of all streams within a time interval passed.Writing a stream to CSVWe restrict writing back CSV to streams that can hold in a reasonable amount of memory, meaning a single cluster node. In such a case, the stream is reduced from a Dask dataframe to a single-node regular Pandas dataframe.import os def write_stream_csv(path: os.PathLike, stream: ddf.DataFrame, **options: Any) -> None: df = stream.compute().sort_index() df.insert(0, "time", df.index.to_series().apply(lambda ts: int((ts - Time.START).total_seconds()))) df.to_csv(path, index=False, **options)Selecting a feature subset for refining representation When computing artifact-specific numerical representations of data, sometimes one may want to restrict further analysis steps to a subset of artifacts. While embracing the joint analysis of all artifacts can be done (at high runtime and memory usage), it may yield a very generic representation less able to model normal activity *tightly*: such a representation may suffer from reduced sensitivity to anomalous phenomena. So artifact subsets can bring a tighter representation that performs better at anomaly detection. Naturally, only experimental evidence can verify or undermine such hypotheses.ArtifactIndex = Mapping[str, int] FeaturesRestricted = Tuple[np.ndarray, ArtifactIndex]Restricting to subset of artifact types Restricting, for instance, to only host or focused host features could be used to detect as-yet unknown malicious activity over a whole network.In this dataset, not just any combination of vertex types make sense. Rather, one may want to work only with `host_focus` artifact features, or all artifacts whose name starts with `host`. So the type is chosen as a prefix of the actual encoded artifact name.def _select_features(features: np.ndarray, vertices: Sequence[ig.Vertex]) -> FeaturesRestricted: a2i: ArtifactIndex = OrderedDict((v["name"], v.index) for v in vertices) return features[list(a2i.values()), :], {name: i for i, name in enumerate(a2i.keys())} def select_by_type(type_artifact: str, features: np.ndarray, graph: ig.Graph) -> FeaturesRestricted: return _select_features(features, [v for v in graph.vs if v["name"].startswith(type_artifact)]) def graph_and_features_for_selection_test(): graph = ig.Graph() vindex: Mapping[str, ig.Vertex] = {} for name in [ "userdomain_init:U34@DOM1", "userdomain_focus:U34@DOM1", "host_init:C23", "host_focus:C87", "host_focus:C27", "process:P8", "userdomain_focus:U12@DOM1", "host_focus:C49" ]: vindex[name] = graph.add_vertex(name) for name_s, name_e in [ ("host_init:C23", "userdomain_init:U34@DOM1"), ("host_init:C23", "userdomain_focus:U34@DOM1"), ("host_init:C23", "host_focus:C87"), ("userdomain_init:U34@DOM1", "userdomain_focus:U34@DOM1"), ("userdomain_init:U34@DOM1", "host_focus:C87"), ("userdomain_focus:U34@DOM1", "host_focus:C87"), ("userdomain_focus:U34@DOM1", "host_focus:C27"), ("userdomain_focus:U34@DOM1", "process:P8"), ("process:P8", "host_focus:C27"), ("process:P8", "userdomain_focus:U12@DOM1"), ("process:P8", "host_focus:C49"), ("host_focus:C49", "userdomain_focus:U12@DOM1") ]: graph.add_edge(vindex[name_s], vindex[name_e]) features = np.array([ [0, 6, 8], [5, 8, 9], [1, 3, 8], [4, 0, 0], [1, 1, 1], [10, 11, 3], [7, 3, 6], [9, 2, 2] ]) assert (features[[v.index for v in graph.vs], :] == features).all() return graph, features def arrays_equal(**kwargs: np.ndarray) -> ExplanationOnFailure: left, right = kwargs.values() if left.size != right.size: return Explanation("Arrays of distinct size", join_args([], kwargs)) if not np.isclose(left, right, equal_nan=True, atol=1e-5).all(): return Explanation("Arrays not equal within tolerance", join_args([], kwargs)) return True %%test Selecting features by single vertex type graph, features = graph_and_features_for_selection_test() selected, a2i = select_by_type("host_focus", features, graph) assert_( arrays_equal, expected=np.array([ [4, 0, 0], [1, 1, 1], [9, 2, 2] ]), selected=selected ) assert_(eq, expected={"host_focus:C87": 0, "host_focus:C27": 1, "host_focus:C49": 2}) %%test Selecting features by more general vertex type graph, features = graph_and_features_for_selection_test() selected, a2i = select_by_type("host", features, graph) assert_( arrays_equal, expected=np.array([ [1, 3, 8], [4, 0, 0], [1, 1, 1], [9, 2, 2] ]), selected=selected ) assert_(eq, expected={"host_init:C23": 0, "host_focus:C87": 1, "host_focus:C27": 2, "host_focus:C49": 3})Test Selecting features by more general vertex type passed.Restricting to the neighbourhood of a set of artifacts When investigating a compromise suspected around a certain host, then looking at anomalies in a representation of all artifacts related one step from it in the graph (its *family*) may yield fruit.def select_family(artifacts_: Union[str, Container[str]], features: np.ndarray, graph: ig.Graph) -> FeaturesRestricted: artifacts: Set[str] if isinstance(artifacts_, str): artifacts = {artifacts_} else: artifacts = set(artifacts_) vs = set(graph.vs(name_in=artifacts)) return _select_features(features, vs | set(sum([v.neighbors() for v in vs], []))) %%test Select family of a single vertex graph, features = graph_and_features_for_selection_test() selected, a2i = select_family("host_focus:C87", features, graph) assert_( eq, expected=["host_focus:C87", "host_init:C23", "userdomain_focus:U34@DOM1", "userdomain_init:U34@DOM1"], a2i_keys=sorted(list(a2i.keys())) ) assert_(eq, expected=list(range(4)), a2i_indexes=sorted(list(a2i.values()))) for name, i in a2i.items(): v, *_ = graph.vs(name=name) assert_(arrays_equal, expected=features[v.index, :], selected=selected[i, :]) %%test Select family of two neighbouring vertices graph, features = graph_and_features_for_selection_test() selected, a2i = select_family(["host_focus:C87", "host_init:C23"], features, graph) assert_( eq, expected=["host_focus:C87", "host_init:C23", "userdomain_focus:U34@DOM1", "userdomain_init:U34@DOM1"], a2i_keys=sorted(list(a2i.keys())) ) assert_(eq, expected=list(range(4)), a2i_indexes=sorted(list(a2i.values()))) for name, i in a2i.items(): v, *_ = graph.vs(name=name) assert_(arrays_equal, expected=features[v.index, :], selected=selected[i, :]) %%test Select family of vertices with overlapping but not identical neighbourhoods graph, features = graph_and_features_for_selection_test() selected, a2i = select_family(["host_focus:C87", "host_focus:C27"], features, graph) assert_( eq, expected=[ "host_focus:C27", "host_focus:C87", "host_init:C23", "process:P8", "userdomain_focus:U34@DOM1", "userdomain_init:U34@DOM1" ], a2i_keys=sorted(list(a2i.keys())) ) assert_(eq, expected=list(range(6)), a2i_indexes=sorted(list(a2i.values()))) for name, i in a2i.items(): v, *_ = graph.vs(name=name) assert_(arrays_equal, expected=features[v.index, :], selected=selected[i, :]) %%test Select family of vertices without a overlapping neighbourhoods graph, features = graph_and_features_for_selection_test() selected, a2i = select_family(["host_focus:C87", "host_focus:C49"], features, graph) assert_( eq, expected=[ "host_focus:C49", "host_focus:C87", "host_init:C23", "process:P8", "userdomain_focus:U12@DOM1", "userdomain_focus:U34@DOM1", "userdomain_init:U34@DOM1" ], a2i_keys=sorted(list(a2i.keys())) ) assert_(eq, expected=list(range(7)), a2i_indexes=sorted(list(a2i.values()))) for name, i in a2i.items(): v, *_ = graph.vs(name=name) assert_(arrays_equal, expected=features[v.index, :], selected=selected[i, :])Test Select family of vertices without a overlapping neighbourhoods passed.Labeling artifacts The `redteam.txt.gz` file carries direct information about authentication (`auth.txt.gz`) records corresponding to logon actions by attackers. The following function provides a labeling oracle for *artifacts* from any record within a time interval.@DataStoreLosAlamos.method def get_redteam(self) -> pd.DataFrame: return read_lanl_csv(op.join(self.dir_base, "redteam.txt.gz")) import dask.dataframe as ddf import pandas as pd Artifact = Tuple[str, str] @DataStoreLosAlamos.method def artifacts_malicious( self, interval_time: Tuple[pd.Timestamp, pd.Timestamp], lister_artifacts: Callable[[str, pd.DataFrame], Sequence[str]] ) -> Delayed: # Mapping[Artifact, float] lower, upper = interval_time auth = self.get_stream("auth").loc[lower:upper] redteam = self.get_redteam().loc[lower:upper] auth_malicious_dask: ddf.DataFrame = auth.merge( redteam, on=["time", "userdomain_focus", "host_init", "host_focus"] ) @dask.delayed def extract_artifacts(auth_malicious: pd.DataFrame): return { (col, value): 1.0 for col in lister_artifacts("auth", auth_malicious) for value in auth_malicious[col] } return extract_artifacts(auth_malicious_dask) def test_label( interval_time: Tuple[pd.Timestamp, pd.Timestamp], expected: List[Tuple[Artifact, float]] ) -> None: DATA = { "proc": b"", "flows": b"", "dns": b"", "auth": b"""\ 63,U34@DOM1,U23@DOM1,C98,C98,Kerberos,Network,LogOn,Success 91,U67@DOM1,SYSTEM@C89,C89,C89,Negotiate,Service,LogOn,Success 304,U45@DOM1,U45@DOM1,C234,C329,Kerberos,Network,LogOff,Success 897,U93@DOM1,U93@DOM1,C123,C123,Kerberos,Network,LogOn,Success 956,U93@DOM1,U93@DOM1,C123,C123,Kerberos,Network,LogOff,Success 3456,U67@DOM1,U45@DOM1,C89,C329,Kerberos,Network,LogOn,Failure 4127,U980@DOM1,U980@DOM1,C23,C32,Kerberos,Service,LogOn,Success """, "redteam": b"""\ 91,SYSTEM@C89,C89,C89 897,U93@DOM1,C123,C123 3456,U45@DOM1,C89,C329 """ } import gzip DIR_LANL = ("./...lanl...") try: os.makedirs(DIR_LANL) for path, content in DATA.items(): with gzip.open(op.join(DIR_LANL, path + ".txt.gz"), "wb") as file: file.write(content) assert_( eq, expected=expected, labeled=DataStoreLosAlamos(DIR_LANL).artifacts_malicious( interval_time, lambda name, df: [col for col in df.columns if col.startswith("userdomain") or col.startswith("host")] ).compute() ) finally: if os.path.isdir(DIR_LANL): shutil.rmtree(DIR_LANL) %%test Artifact labeling across the whole timeframe test_label( (Time.START, Time.END), { ("userdomain_init", "U67@DOM1"): 1.0, ("userdomain_focus", "SYSTEM@C89"): 1.0, ("host_init", "C89"): 1.0, ("host_focus", "C89"): 1.0, ("userdomain_init", "U93@DOM1"): 1.0, ("userdomain_focus", "U93@DOM1"): 1.0, ("host_init", "C123"): 1.0, ("host_focus", "C123"): 1.0, ("userdomain_init", "U67@DOM1"): 1.0, ("userdomain_focus", "U45@DOM1"): 1.0, ("host_focus", "C329"): 1.0 } ) %%test Artifact labeling on a prefix of the timeframe test_label( (Time.START, Time.START + pd.Timedelta(seconds=1000)), { ("userdomain_init", "U67@DOM1"): 1.0, ("userdomain_focus", "SYSTEM@C89"): 1.0, ("host_init", "C89"): 1.0, ("host_focus", "C89"): 1.0, ("userdomain_init", "U93@DOM1"): 1.0, ("userdomain_focus", "U93@DOM1"): 1.0, ("host_init", "C123"): 1.0, ("host_focus", "C123"): 1.0 } )Test Artifact labeling on a prefix of the timeframe passed.Test summaryif __name__ == "__main__": _ = summarize_results(suite)32 passed, 0 failed, 0 raised an errorDeveloping Image ClassifierIn this project, an image classifier is trained to recognize different species of flowers. We can export this classifier into phone application so that with the help of camera we can tell the name of flower. Dataset Link [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, Few examples below. The project is broken down into multiple steps:* Load and preprocess the image dataset* Train the image classifier on your dataset* Use the trained classifier to predict image contentPlease run this notebook in the workspace that you have chosen GPU rather than CPU mode.# Imports here %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import torch import numpy as np from torch import nn from torch import optim import torch.nn.functional as F from torchvision import datasets, transforms, models from workspace_utils import active_session from PIL import Image from collections import OrderedDict import jsonLoad the dataHere `torchvision` to load the data. The dataset Link [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. The pre-trained networks used were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.data_dir = 'flowers' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' using_gpu = torch.cuda.is_available() # TODO: Define your transforms for the training, validation, and testing sets train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) testval_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # TODO: Load the datasets with ImageFolder image_trainset = datasets.ImageFolder(train_dir, transform=train_transforms) image_testset = datasets.ImageFolder(test_dir, transform=testval_transforms) image_valset = datasets.ImageFolder(valid_dir, transform=testval_transforms) # TODO: Using the image datasets and the trainforms, define the dataloaders image_trainloader = torch.utils.data.DataLoader(image_trainset, batch_size=64, shuffle=True) image_testloader = torch.utils.data.DataLoader(image_testset, batch_size=64, shuffle=True) image_valloader = torch.utils.data.DataLoader(image_valset, batch_size=64, shuffle=True)Label mappingYou'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.with open('cat_to_name.json', 'r') as f: cat_to_name = json.load(f)Building and training the classifierThings you'll need to do:* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout* Train the classifier layers using backpropagation using the pre-trained network to get the features* Track the loss and accuracy on the validation set to determine the best hyperparameters# TODO: Build and train your network epochs = 4 lr = 0.001 print_every = 10 # Freeze parameters so we don't backprop through them hidden_layers = [10240, 1024] def make_model(structure, hidden_layers, lr): if structure=="densenet161": model = models.densenet161(pretrained=True) input_size = 2208 else: model = models.vgg16(pretrained=True) input_size = 25088 output_size = 102 for param in model.parameters(): param.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('dropout',nn.Dropout(0.5)), ('fc1', nn.Linear(input_size, hidden_layers[0])), ('relu1', nn.ReLU()), ('fc2', nn.Linear(hidden_layers[0], hidden_layers[1])), ('relu2', nn.ReLU()), ('fc3', nn.Linear(hidden_layers[1], output_size)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier return model model = make_model('vgg16', hidden_layers, lr) criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=lr) def cal_accuracy(model, dataloader): validation_loss = 0 accuracy = 0 for i, (inputs,labels) in enumerate(dataloader): optimizer.zero_grad() inputs, labels = inputs.to('cuda') , labels.to('cuda') model.to('cuda') with torch.no_grad(): outputs = model.forward(inputs) validation_loss = criterion(outputs,labels) ps = torch.exp(outputs).data equality = (labels.data == ps.max(1)[1]) accuracy += equality.type_as(torch.FloatTensor()).mean() validation_loss = validation_loss / len(dataloader) accuracy = accuracy /len(dataloader) return validation_loss, accuracy with active_session(): def my_DLM(model, image_trainloader, image_valloader, epochs, print_every, criterion, optimizer, device='gpu'): epochs = epochs print_every = print_every steps = 0 # change to cuda model.to('cuda') for e in range(epochs): running_loss = 0 for ii, (inputs, labels) in enumerate(image_trainloader): steps += 1 inputs, labels = inputs.to('cuda'), labels.to('cuda') optimizer.zero_grad() # Forward and backward passes outputs = model.forward(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every == 0: model.eval() val_loss, train_ac = cal_accuracy(model, image_valloader) print("Epoch: {}/{}... | ".format(e+1, epochs), "Loss: {:.4f} | ".format(running_loss/print_every), "Validation Loss {:.4f} | ".format(val_loss), "Accuracy {:.4f}".format(train_ac)) running_loss = 0 my_DLM(model, image_trainloader, image_valloader, epochs, print_every, criterion, optimizer, 'gpu')Epoch: 1/4... | Loss: 6.4524 | Validation Loss 0.3465 | Accuracy 0.0295 Epoch: 1/4... | Loss: 4.4584 | Validation Loss 0.3212 | Accuracy 0.1000 Epoch: 1/4... | Loss: 4.0043 | Validation Loss 0.2779 | Accuracy 0.2329 Epoch: 1/4... | Loss: 3.4299 | Validation Loss 0.2149 | Accuracy 0.3459 Epoch: 1/4... | Loss: 2.8110 | Validation Loss 0.1520 | Accuracy 0.4691 Epoch: 1/4... | Loss: 2.3905 | Validation Loss 0.1409 | Accuracy 0.5067 Epoch: 1/4... | Loss: 2.1229 | Validation Loss 0.1574 | Accuracy 0.5879 Epoch: 1/4... | Loss: 1.8654 | Validation Loss 0.0935 | Accuracy 0.6126 Epoch: 1/4... | Loss: 1.7904 | Validation Loss 0.0804 | Accuracy 0.6531 Epoch: 1/4... | Loss: 1.7925 | Validation Loss 0.0588 | Accuracy 0.6823 Epoch: 2/4... | Loss: 0.9521 | Validation Loss 0.0989 | Accuracy 0.6787 Epoch: 2/4... | Loss: 1.4292 | Validation Loss 0.0681 | Accuracy 0.7240 Epoch: 2/4... | Loss: 1.4400 | Validation Loss 0.0685 | Accuracy 0.7422 Epoch: 2/4... | Loss: 1.[...]Testing your network# TODO: Do validation on the test set def testing(dataloader): model.eval() model.to('cuda') correct = 0 total = 0 with torch.no_grad(): for inputs, labels in image_testloader: inputs, labels = inputs.to('cuda'), labels.to('cuda') outputs = model(inputs) _ , prediction = torch.max(outputs.data, 1) total += labels.size(0) correct += (prediction == labels.data).sum().item() print('Accuracy on the test set: %d %%' % (100 * correct / total)) testing(image_testloader)Accuracy on the test set: 85 %Save the checkpointNow that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.```model.class_to_idx = image_datasets['train'].class_to_idx```Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.model.class_to_idx = image_trainset.class_to_idx # TODO: Save the checkpoint state = { 'structure' :'vgg16', 'learning_rate': lr, 'epochs': epochs, 'hidden_layers':hidden_layers, 'state_dict':model.state_dict(), 'class_to_idx':model.class_to_idx } torch.save(state, 'checkpoint.pth')Loading the checkpointAt this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.# TODO: Write a function that loads a checkpoint and rebuilds the model def loading_checkpoint(path): # Loading the parameters state = torch.load(path) lr = state['learning_rate'] structure = state['structure'] hidden_layers = state['hidden_layers'] epochs = state['epochs'] # Building the model from checkpoints model = make_model(structure, hidden_layers, lr) class_to_idx = state['class_to_idx'] model.load_state_dict(state['state_dict']) loading_checkpoint('checkpoint.pth')Inference for classificationNow you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like ```pythonprobs, classes = predict(image_path, model)print(probs)print(classes)> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]> ['70', '3', '45', '62', '55']```First you'll need to handle processing the input image such that it can be used in your network. Image PreprocessingYou'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.htmlPIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.htmlPIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' # TODO: Process a PIL image for use in a PyTorch model pil_image = Image.open(image) image_transforms = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img = image_transforms(pil_image) return img # Demo image_path = (test_dir + '/100/' + 'image_07939.jpg') processed_image = process_image(image_path) processed_imageTo check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).def imshow(image, ax=None, title=None): if ax is None: fig, ax = plt.subplots() # PyTorch tensors assume the color channel is the first dimension # but matplotlib assumes is the third dimension image = image.transpose((1, 2, 0)) # Undo preprocessing mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = std * image + mean # Image needs to be clipped between 0 and 1 or it looks like noise when displayed image = np.clip(image, 0, 1) ax.imshow(image) return ax image_path = (test_dir + '/100/' + 'image_07939.jpg') imshow(processed_image.numpy())Class PredictionOnce you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.htmltorch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.```pythonprobs, classes = predict(image_path, model)print(probs)print(classes)> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]> ['70', '3', '45', '62', '55']```def predict(image_path, model, topk=5): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' # TODO: Implement the code to predict the class from an image file model.eval() model.cpu() img = process_image(image_path) img = img.unsqueeze_(0) img = img.float() with torch.no_grad(): output = model.forward(img) probs, classes = torch.topk(input=output, k=topk) top_prob = probs.exp() # Convert indices to classes idx_to_class = {val: key for key, val in model.class_to_idx.items()} top_classes = [idx_to_class[each] for each in classes.cpu().numpy()[0]] print('Top Classes: ', top_classes) print('Top Probs: ', top_prob) return top_prob, top_classes #return top_prob.numpy()[0], mapped_classes image_path = (test_dir + '/29/' + 'image_04095.jpg') probs, classes = predict(image_path, model) # Converting from tensor to numpy-array print(probs) print(classes)Top Classes: ['29', '14', '13', '10', '30'] Top Probs: tensor([[ 0.7722, 0.1411, 0.0865, 0.0002, 0.0000]]) tensor([[ 0.7722, 0.1411, 0.0865, 0.0002, 0.0000]]) ['29', '14', '13', '10', '30']Sanity CheckingNow that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.# TODO: Display an image along with the top 5 classes def sanity_checking(): plt.rcParams["figure.figsize"] = (3,3) plt.rcParams.update({'font.size': 12}) # Showing actual image image_path = (test_dir + '/29/' + 'image_04095.jpg') probs, classes = predict(image_path, model) #classes = classes.cpu().numpy() image_to_show = process_image(image_path) image = imshow(image_to_show.numpy(), ax = plt) image.axis('off') image.title(cat_to_name[str(classes[0])]) image.show() # Showing Top Classes labels = [] for class_index in classes: labels.append(cat_to_name[str(class_index)]) y_pos = np.arange(len(labels)) probs = probs[0] plt.barh(y_pos, probs, align='center', color='green') plt.yticks(y_pos, labels) plt.xlabel('Probability') plt.title('Top Classes') plt.show() sanity_checking()Top Classes: ['29', '14', '13', '10', '30'] Top Probs: tensor([[ 0.7722, 0.1411, 0.0865, 0.0002, 0.0000]])Traindata = pd.read_csv('data/terra/train.csv') data = pd.read_csv('data/terra/train.csv') data['ds'] = pd.to_datetime(data['day'], unit='d') data['unique_id'] = data['cultivar'] + data['sitename'] data = data.rename(columns={'canopy_height':'y'}) #Series must be complete in the frequency data = ffill_missing_dates_per_serie(data,'D') data = data.drop_duplicates(['unique_id','ds']) data.head() X_train = data[['unique_id','ds']] X_train['x'] = '1' y_train = data[['unique_id','ds','y']] data_plot = y_train[y_train['unique_id']=='PI157030MAC Field Scanner Season 4 Range 17 Column 14'] plt.plot(data_plot['ds'], data_plot['y'], label = 'y') plt.show()/Users/cchallu/anaconda3/envs/esrnn_torch/lib/python3.6/site-packages/pandas/plotting/_matplotlib/converter.py:103: FutureWarning: Using an implicitly registered datetime converter for a matplotlib plotting method. The converter was registered by pandas on import. Future versions of pandas will require you to explicitly register matplotlib converters. To register the converters: >>> from pandas.plotting import register_matplotlib_converters >>> register_matplotlib_converters() warnings.warn(msg, FutureWarning)Testdata_test = pd.read_csv('data/terra/test_real.csv') data_test['ds'] = pd.to_datetime(data_test['day'], unit='d') data_test['unique_id'] = data_test['cultivar'] + data_test['sitename'] data_test.head() X_test = data_test[['unique_id','ds','canopy_height']] X_test.columns = ['unique_id', 'ds', 'y'] uniques = X_test['unique_id'].unique() # Train model esrnn = ESRNN(max_epochs=1, batch_size=16, learning_rate=3e-4, gradient_clipping_threshold=20, dilations=[[1, 7], [28]], add_nl_layer=True, per_series_lr_multip=1.0, seasonality=[], input_size=7, output_size=50, max_periods=20, level_variability_penalty=80, training_percentile=60, rnn_weight_decay=0) esrnn.fit(X_train, y_train) y_hat = esrnn.predict(X_test[['unique_id']]) X_plot = y_train.append(y_hat) plot_id = 0 y_test_plot = X_plot.loc[X_plot['unique_id']==uniques[plot_id]] plot_prediction(y_test_plot['y'], y_test_plot['y_hat']) y_hat = esrnn.predict(X_test[['unique_id','ds','y']]) np.abs(y_hat['y_hat']-y_hat['y']).mean()a = "parth" b = "gazal" c = a[0:2] + b[2:5] d = "sankalp" e = "priya" f = d[0:2] + e[1:5] print(c) print(f)pazal sariyaMódulo 5 01. Machine Learning e Saúde Desafio 01: Se você não conhece o Kaggle, explore um pouco os datasets, competições e discussões para verificar o quão rico é esta plataforma. Desafio 02: Leia a descrição do problema proposta pelo Sírio Libanês, tentando entender qual é o real problema. Antes de continuar para as próximas aulas, debata no Discord suas conclusões sobre o projeto. * 02. Desenvolvimento no Kaggleimport os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import pandas as pd dados = pd.read_excel('/kaggle/input/covid19/Kaggle_Sirio_Libanes_ICU_Prediction.xlsx') dados.head() dados.describe()**Observação:** Algumas colunas, no mundo real, podem estar **anonimizadas**, por fatores éticos humanos (elas podem descobrir quais pessoas estão sendo usadas para a base de dados, de acordo com tal característica nos dados), ou até mesmo por conta das próprias empresas não quererem que você saiba o que é aquela variável.dados['ICU'] dados['ICU'].value_counts()- Aprendizado Supervisionado: quando você quer prever o valor de alguma *label*, dado que você sabe qual é esta variável;- Aprendizado Não-Supervisionado: quando você quer prever o valor de alguma *label*, dado que você **não** sabe qual é esta variável. O que acontece na realidade é que, a partir das outras características eu tento agrupar em determinados grupos **sem saber o que realmente é aquele dado.** Ou seja, estamos aprendendo o **conceito** e não a palavra em si. Desafio 01 -pesquisar aprendizado supervisionado, não supervisionado, por reforço e mais (forma). Desafio 02 - Pesquisar sobre classificação, regressão e outros problemas que podem ser solucionados com Machine Learning. 03. Modelos de Machine Learningx_columns = dados.describe().columns dados_limpos = dados.dropna() y = dados_limpos['ICU'] x = dados_limpos[x_columns].drop('ICU', axis=1) from sklearn.linear_model import LogisticRegressionUma explicação introdutória da Regressão LogísticaO modelo de Regressão Logística é utilizado para descrever uma variável quantitativa como algo classificatório (binário). O método utiliza a probabililidade de ocorrer tal valor (quantitativo), sendo assim, se for uma probabilidade menor que 50%, então ele classifica como "não possível" (0), já se for acima, como "possível" (1).A função que ela utiliza é a **sigmoid**, com a fórmula abaixo:Dado uma função linear *t*, temos: Então:Vai um exemplo abaixo:**Observação:** o gráfico e as fórmulas são tiradas do Wikipedia.modelo = LogisticRegression() modelo.fit(x, y) modelo.predict([x.iloc[4]]) y.iloc[4] sum(modelo.predict(x) == y) sum(modelo.predict(x) != y) accuracy = sum(modelo.predict(x) == y) / len(y) * 100 accuracyDesafio 03 - Qual o motivo de jogar fora o ICU do dado X? O que acontece se estiver no x? Pq seria bom/ruim? . Desafio 04 - Pesquisar sobre ética em data science/ciência de dados e inteligência artificial . 04. Métricas e Avaliaçõesy.value_counts() from sklearn.dummy import DummyClassifierDummyClassifierEsse modelo na verdade chuta o valor de acordo com a **proporcionalidade**. Sendo assim, ele é uma **base** de um algoritmo ingênuo, estimando pela maioria, ou seja, pela moda.Então, se fosse para chutar algum valor no exemplo acima, o modelo chutaria **0** todos os valores.modelo = DummyClassifier("most_frequent") modelo.fit(x, y) sum(modelo.predict(x) == y) / len(y) * 100 from sklearn.metrics import accuracy_scoreAcuráciaBasicamente, é a taxa de acerto do que foi predito em relação ao que foi testado.accuracy_score(y, modelo.predict(x))Repare que o resultado foi o mesmo da célula passada.import numpy as np np.random.seed(73246) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, stratify=y) print(f'Nº de pacientes para treinar (input -> x): {len(x_train)}') print(f'Nº de pacientes para treinar (o que é tal amostra -> y): {len(y_train)}') print(f'Nº de pacientes para testar (input -> x): {len(x_test)}') print(f'Nº de pacientes para testar (o que é tal amostra -> y): {len(y_test)}') modelo = LogisticRegression() modelo.fit(x_train, y_train) y_prediction = modelo.predict(x_test) accuracy_score(y_test, y_prediction)/opt/conda/lib/python3.7/site-packages/sklearn/linear_model/_logistic.py:764: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: https://scikit-learn.org/stable/modules/preprocessing.html Please also refer to the documentation for alternative solver options: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)**Observação 1:** se rodarmos nosso modelo novamente, muito provavelmente chegaremos em acurácias distintas. Isso acontece justamente porque o modelo matemático é sim o mesmo, porém os valores que vamos pegando do dataframe, de forma aleatória, é **diferente**! Para definir um único "modo" aleatório, precisamos definir o argumento ***random_state***, no *train_test_split*.**Observação 2:** o train_test_split utiliza o numpy por trás dos panos, ou seja, se definirmos a seed antes, com o numpy, não precisamos mexer no *random_state*.**Observação 3:** o *train_test_split* divide o treino e o teste de forma aleatória, e por padrão, coloca uma proporção de 75% para treino e 25% para teste. Entretanto, ninguém garante que os valores que estão dentro do treino são proporcionais aos que estão dentro do teste, por exemplo, 50% de 0 e 50% de 1 no treino, assim como 50% de 0 e 50% de 1 no teste. Para definir essas porcentagens internas, precisamos definir o argumento ***stratify*** (estratificar), no train_test_split.**Observação 4:** se colocarmos ***stratify*** = y, estamos dizendo: "estratifique de acordo com a proporcionalidade dos valores que existem no y"y_train.value_counts(normalize=True) y_test.value_counts(normalize=True)Desafio 05 - Decida quanto você vai escolher para treino e quanto para teste. Quanto as pessoas utilizam e qual o motivo? > In a draft copy currently being written by , he discusses about the amount of data in train-test dataset. My understanding from the book, The traditional and most common value is 70-30 or 75-25. If you have 10k or 30k samples, it is fine to go with 70-30 split. But when dealing with Big-data, for example if you have 1 million samples, it is not recommended to have 30k samples as test data, so in that case, 90-10 is actually okay. Because 10k test samples can pretty much provide an intuition about the model.> in brief: for less samples, go with recommended 70-30 split, for much higher samples, go with number of samples 90-10.link: https://datascience.stackexchange.com/questions/15804/which-train-test-split-performs-better-5050-or-6040 Desafio 06 - Olhe a documentação da LogisticRegression... o que tem de parâmetro em comum com o train_test_split? O que isso significa pra nós? . O parâmetro ***random_state*** é comum às duas funções, entretanto, fazem coisas distintas.O r*andom_state* da *LogisticRegression()* é um código para padronizar a aleatoriedade, assim como no train_test_split, porém esse é definido de acordo com um outro argumento da *LogisticRegression()*, ***o solver***. Este argumento basicamente otimiza o modelo de acordo com algumas condições, são estas em inglês (documentação):- For small datasets, ‘*liblinear*’ is a good choice, whereas ‘*sag*’ and ‘*saga*’ are faster for large ones.- For multiclass problems, only ‘*newton-cg*’, ‘*sag*’, ‘saga’ and ‘*lbfgs*’ handle multinomial loss; ‘*liblinear*’ is limited to one-versus-rest schemes.- ‘*newton-cg*’, ‘*lbfgs*’, ‘sag’ and ‘saga’ handle L2 or no penalty- ‘*liblinear*’ and ‘*saga*’ also handle L1 penalty- ‘*saga*’ also supports ‘*elasticnet*’ penalty- ‘*liblinear*’ does not support setting penalty='*none*'Já o do *train_test_split* é o que foi relatado anteriormente. 05. Compreendendo o problema realdados = pd.read_excel('/kaggle/input/covid19/Kaggle_Sirio_Libanes_ICU_Prediction.xlsx') dados.head()Antes de colocarmos o nosso modelo de ML em prática, deveríamos ter entendido melhor nossa base de dados. Se repararmos, a coluna "*PATIENT_VISIT_IDENTIFIER*" é o ID da pessoa que está sendo avaliada. Entretanto, existem vários ID's iguais, por quê?A resposta está [aqui](https://www.kaggle.com/S%C3%ADrio-Libanes/covid19), isso simplesmente acontece pois a avaliação do sangue do paciente foi feita em momentos distintos. Neste link, eles usam janelas separadas de 2 em 2 horas ("*WINDOW*"), até a categoria "*ABOVE_12*".Na teoria, não há a necessidade de saber o que acontece depois que a pessoa foi para a UTI, e sim o que houve **antes**, pois queremos predizer a partir de características anteriores a ida para este lugar. Além disso, se a pessoa foi para a UTI direto (antes da categoria "0-2"), nem precisamos avaliar a situação do paciente, só irá atrapalhar o nosso modelo! Ou seja, podemos descartar tal pessoa.Também perceba que existem valores faltantes no dataframe, e a resposta mais imediata para estes é preenchê-los de acordo com o momento anterior ou posterior a aquele valor vazio, pois não haveriam oscilações tão drásticas de um momento para o outro, no exame de sangue.Sendo assim, precisamos fazer 2 coisas antes de modelarmos os dados:- Preencher os valores contínuos faltantes com os números anteriores ou posteriores na coluna;- Retirar os pacientes que, teoricamente, foram direto para a UTI ("*WINDOW*" = "0-2").# Selecionando as features que são contínuas features_continuas = dados.iloc[:, 13:-2].columns features_continuas def preenche_tabela(df): features_continuas_colunas = df.iloc[:, 13:-2].columns features_continuas = df.groupby('PATIENT_VISIT_IDENTIFIER', as_index=False)[features_continuas_colunas].fillna(method='bfill').fillna(method='ffill') features_categoricas = df.iloc[:, :13] saida = df.iloc[:, -2:] dados_finais = pd.concat([features_categoricas, features_continuas, saida], ignore_index=True, axis=1) dados_finais.columns = df.columns return dados_finais # Preenchendo os valores faltantes dados_limpos = preenche_tabela(dados) dados_limpos.head() dados_limpos.describe() # Retirando todos os valores faltantes que sobraram dados_limpos = dados_limpos.dropna() # Removendo os dados em que a "WINDOW" é igual a "0-2" e o "ICU" igual a 1 (direto pra UTI) a_remover = dados_limpos.query('WINDOW == "0-2" and ICU == 1')['PATIENT_VISIT_IDENTIFIER'].values dados_limpos = dados_limpos.query('PATIENT_VISIT_IDENTIFIER not in @a_remover') display(dados_limpos.describe())Agora, para finalizar, eu vou querer apenas **um momento** do paciente, poderia ser qualquer um para falar a verdade. Sendo assim, apenas porque é o primeiro em que aparece, vou escolher todos os pacientes na janela "0-2". Para isso, temos que selecionar os pacientes no momento "0-2" que realmente foram para a UTI e os que não foram.def prepare_window(rows): if np.any(rows['ICU']): rows.loc[rows['WINDOW'] == '0-2', 'ICU'] = 1 return rows.loc[rows['WINDOW'] == '0-2'] dados_limpos = dados_limpos.groupby('PATIENT_VISIT_IDENTIFIER').apply(prepare_window) dados_limpos.head() # Dropando as colunas não importantes para o modelo dados_limpos = dados_limpos.drop(['PATIENT_VISIT_IDENTIFIER', 'WINDOW'], axis=1).reset_index(drop=True) dados_limpos.head() x_columns = dados_limpos.describe().columns y = dados_limpos['ICU'] x = dados_limpos[x_columns].drop('ICU', axis=1) np.random.seed(73246) x_train, x_test, y_train, y_test = train_test_split(x, y, stratify=y) modelo = DummyClassifier() modelo.fit(x_train, y_train) y_prediction = modelo.predict(x_test) accuracy_score(y_test, y_prediction) modelo = LogisticRegression() modelo.fit(x_train, y_train) y_prediction = modelo.predict(x_test) accuracy_score(y_test, y_prediction)/opt/conda/lib/python3.7/site-packages/sklearn/linear_model/_logistic.py:764: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: https://scikit-learn.org/stable/modules/preprocessing.html Please also refer to the documentation for alternative solver options: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)Import temperature data from the DWD and process itThis notebook pulls historical temperature data from the DWD server and formats it for future use in other projects. The data is reported hourly for each of the available weather stations and packaged in a zip file. To use the data, we need to store everything in a single .csv file, all stations side-by-side. Also, we need the daily average.To reduce computing time, we also exclude data earlier than 2007. Files should be executed in the following pipeline:* 1-dwd_konverter_download* 2-dwd_konverter_extract* 3-dwd_konverter_build_df* 4-dwd_konverter_final_processing 1.) Download files from the DWD-APIHere we download all relevant files from the DWS Server. The DWD Server is http-based, so we scrape the download page for all links that match 'stundenwerte_TU_.\*_hist.zip' and download them to the folder 'download'. Link to the relevant DWD-page: https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/hourly/air_temperature/historical/import requests import re from bs4 import BeautifulSoup from pathlib import Path # Set base values download_folder = Path.cwd() / 'download' base_url = 'https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/hourly/air_temperature/historical/' # Initiate Session and get the Index-Page with requests.Session() as s: resp = s.get(base_url) # Parse the Index-Page for all relevant soup = BeautifulSoup(resp.content) links = soup.findAll("a", href=re.compile("stundenwerte_TU_.*_hist.zip")) # For testing, only download 10 files file_max = 10 dl_count = 0 #Download the .zip files to the download_folder for link in links: zip_response = requests.get(base_url + link['href'], stream=True) # Limit the downloads while testing dl_count += 1 if dl_count > file_max: break with open(Path(download_folder) / link['href'], 'wb') as file: for chunk in zip_response.iter_content(chunk_size=128): file.write(chunk) print('Done')DoneShuffle Arrays in Unison A function for NumPy arrays in unison. > from mlxtend.preprocessing import shuffle_arrays_unison Example 1 - Scaling a Pandas DataFrameimport numpy as np from mlxtend.preprocessing import shuffle_arrays_unison X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) y = np.array([1, 2, 3]) print('X:\n%s' % X) print('y:\n%s' % y) X2, y2 = shuffle_arrays_unison(arrays=[X, y], random_seed=3) print('X2:\n%s' % X2) print('y2:\n%s' % y2)X2: [[4 5 6] [1 2 3] [7 8 9]] y2: [2 1 3]APIwith open('../../api_modules/mlxtend.preprocessing/shuffle_arrays_unison.md', 'r') as f: print(f.read())## shuffle_arrays_unison *shuffle_arrays_unison(arrays, random_seed=None)* Shuffle NumPy arrays in unison. **Parameters** - `arrays` : array-like, shape = [n_arrays] A list of NumPy arrays. - `random_seed` : int (default: None) Sets the random state. **Returns** - `shuffled_arrays` : A list of NumPy arrays after shuffling. **Examples** >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/](http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/)Toy example: Fitting a linear function with neural networks- We want to fit data from the function y=2x-1- Using a NN with only one layer and one unit- Using SGD and mean squared error- Training for 200 epochsimport tensorflow as tf import numpy as np from tensorflow import keras model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])]) model.compile(optimizer='sgd', loss='mean_squared_error') xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float) model.fit(xs, ys, epochs=200) print(model.predict([10.0]))[[18.451828]]Restart Kernal!pip install keras-image-helper !pip install --extra-index-url https://google-coral.github.io/py-repo/ tflite_runtime #import tensorflow.lite as tflite import tflite_runtime.interpreter as tflite from keras_image_helper import create_preprocessor interpreter = tflite.Interpreter(model_path='Best_restaurant_to_serve.tflite') interpreter.allocate_tensors() input_index = interpreter.get_input_details()[0]['index'] output_index = interpreter.get_output_details()[0]['index'] preprocessor = create_preprocessor('xception', target_size=(200, 200)) url = 'https://upload.wikimedia.org/wikipedia/commons/a/a5/Cars_Cupcakes_%284725761659%29.jpg' url X = preprocessor.from_url(url) interpreter.set_tensor(input_index, X) # put input in the interpreter interpreter.invoke() #model work preds = interpreter.get_tensor(output_index) #get or fetch output preds classes = ['apple_pie', 'baby_back_ribs', 'baklava', 'beef_carpaccio', 'beef_tartare', 'beet_salad', 'beignets', 'bibimbap', 'bread_pudding', 'breakfast_burrito', 'bruschetta', 'caesar_salad', 'cannoli', 'caprese_salad', 'carrot_cake', 'ceviche', 'cheese_plate', 'cheesecake', 'chicken_curry', 'chicken_quesadilla', 'chicken_wings', 'chocolate_cake', 'chocolate_mousse', 'churros', 'clam_chowder', 'club_sandwich', 'crab_cakes', 'creme_brulee', 'croque_madame', 'cup_cakes','deviled_eggs', 'donuts', 'dumplings', 'edamame', 'eggs_benedict', 'escargots', 'falafel', 'filet_mignon', 'fish_and_chips', 'foie_gras', 'french_fries', 'french_onion_soup', 'french_toast', 'fried_calamari', 'fried_rice', 'frozen_yogurt', 'garlic_bread', 'gnocchi', 'greek_salad', 'grilled_cheese_sandwich', 'grilled_salmon', 'guacamole', 'gyoza', 'hamburger', 'hot_and_sour_soup', 'hot_dog','huevos_rancheros', 'hummus', 'ice_cream', 'lasagna', 'lobster_bisque', 'lobster_roll_sandwich', 'macaroni_and_cheese', 'macarons', 'miso_soup', 'mussels', 'nachos', 'omelette', 'onion_rings', 'oysters', 'pad_thai', 'paella', 'pancakes', 'panna_cotta', 'peking_duck', 'pho', 'pizza', 'pork_chop', 'poutine', 'prime_rib', 'pulled_pork_sandwich', 'ramen', 'ravioli', 'red_velvet_cake', 'risotto', 'samosa', 'sashimi', 'scallops', 'seaweed_salad', 'shrimp_and_grits', 'spaghetti_bolognese', 'spaghetti_carbonara', 'spring_rolls', 'steak', 'strawberry_shortcake', 'sushi', 'tacos', 'takoyaki', 'tiramisu', 'tuna_tartare', 'waffles' ] dict(zip(classes, preds[0]))**Chapter 1.2 Elements of Linear Algebra**--- **1.2.1 Linear Spaces****Linear Combination**: an expression constructed from a subset by multiplying each term by a constant and adding the results.**Linear Subspace**: a subset U⊆V that is closed under vector addition ($u_1$ + $u_2 \epsilon U$) and scalar multiplication ($\alpha u_1 \epsilon U$)**Span**: the span($w_1$,...,$w_m$) is the set of all linear combinations of $w_j$'s given that $w_1$,...,$w_m \epsilon V$a1 = 1 a2 = 1 u1 = 1 u2 = 1 x = a1 * u1 + a2 * u2 print(x)2In the code above, $a_1$ and $a_2$ are mutable constants while $u_1$ and $u_2$ are variables. Any combination of values for $a_1$ and $a_2$ will result in *x* being a linear combination. The set of all possible linear combinations of $u_1$ and $u_2$ is the span and is denoted as span($u_1$,$u_2$). span($u_1$,$u_2$) as well as all other spans are linear subspaces because they are comprised of linear combinations, which are inherently closed under vector addition and scalar multiplication. **Linear Independence**: A list of vector $u_1,...,u_m$ is said to be linearly independent if none of them can be written as a linear combination of the othersdef dependence_check(vector1, vector2): matrix = np.stack((vector1, vector2), axis = -1) det = np.linalg.det(matrix) if det == 0: print("the vectors are linearly dependent") else: print("the vectors are linearly independent") a = np.array([1, 2]) b = np.array([2, 4]) c = np.array([2, 5]) dependence_check(a, b) dependence_check(a, c)the vectors are linearly dependent the vectors are linearly independentThe above code checks if the sets of vectors *a* and *b* as well as *a* and *c* are linearly independent. *a* and *b* are not linearly independent because *b* = *a* $\cdot$ 2. *a* and *c* are linearly independent because neither can be written as a linear combination of another. **Column Space**: the column space of *A*, where *A* is an n x m matrix and *A* ∈ $R^{n \cdot m}$, is the span of columns of A**Basis**: A basis of *U*, where *U* is a linear subspace of *V*, is a list of vectors $u_1,...,u_m$ that span *U* and are linearly independent. A basis is a unique representation and will always have the same dimension as other bases of *U*.**Dimension**: the number of rows by the number of columns of a matrix.**Rank**: the dimension of the vector space spanned by a matrix's columns.import numpy as np import pprint def column_space(matrix): b = a.transpose() rank = 0 for row in b: rank += 1 print(row) print(f'rank: {rank}') print(f'dimension: {matrix.size}') a = np.array([[1, 2], [3, 4]]) column_space(a)[1 3] [2 4] rank: 2 dimension: 4The column space of *a* would be $\begin{pmatrix}1\\ 3\end{pmatrix}c_1 + \begin{pmatrix}2\\4\end{pmatrix}c_2$ for the code above. The basis of the column space would also be {$\begin{pmatrix}1\\3\end{pmatrix}, \begin{pmatrix}2\\4\end{pmatrix}$} since these vectors are minimally spanning. The rank is equal to the number of vectors in the column space, while the dimension is equal to the total number of elements in the array. 1.2.2 Orthogonality**Inner Product**: $ = u \cdot v = \sum_i^n u_iv_i$**Norm**: $\|v\| = \sqrt {\sum_1^n u_i^2}$**Orthonormal**: A list of vectors $u_1,...,u_m$ is orthonormal if the $u_i$'s are pairwise orthogonal and each has norm 1.a = np.identity(3, dtype = int) print(a)[[1 0 0] [0 1 0] [0 0 1]]In the code above, the basis of *a* is orthonormal because the $u_i$'s are pairwise orthogonal and each has norm 1. **Best Approximation Theorem**: we have a linear subspace $U \subseteq V$ and a vector $v \nsubseteq U$ and we want to find the vector $v^*$ in $U$ that is closest to $v$ in 2-norm. We want to solve $min_{v^* \epsilon U} \|v^* - v \|$. To confirm the optimality of $v^*$, we use the Pythagorean theorem and find that $\|v - \alpha u_1 \|^2 \geq \|v - v^* \|^2$ **Orthogonal Projection**: Let $U \subseteq V$ be a linear subspace with orthonormal basis $q_1,...,q_m$ and let $v \epsilon V$. For any $u \epsilon U$, $\| v - P_Uv \| \leq \| v - u \|$. Furthermore, if $u \epsilon U$ and the previous inequality is an equality, then $u = P_Uv$ 1.2.3 Eigenvalues and Eigenvectors**Eigenvalue**: Let $A \epsilon R^{dxd}$ be a square matrix. Then $\lambda \epsilon R$ is an eigenvalue of A if there exists a nonzero vector $x \neq 0$ such that $Ax = \lambda x$**Eigenvector**: The vector x in the previous equation is referred to as the eigenvectora = np.array([[0, 1], [-2, -3]]) val, vect = np.linalg.eig(a) print("eigenvalues: ", val) print("eigenvectors: ", vect)eigenvalue: [-1. -2.] eigenvector: [[ 0.70710678 -0.4472136 ] [-0.70710678 0.89442719]]The above code solves for the eigenvalues and eigenvectors of matrix $a$.a = np.array([[0, -1], [1, 0]]) val, vect = np.linalg.eig(a) print("eigenvalues: ", val) print("eigenvectors: ", vect)eigenvalue: [0.+1.j 0.-1.j] eigenvector: [[0.70710678+0.j 0.70710678-0.j ] [0. -0.70710678j 0. +0.70710678j]]Looking at the code above, the matrix $\begin{pmatrix}0 & -1\\1 & 0\end{pmatrix}$ has no real solution when solving for eigenvalues.a = np.array([[5, 2], [2, 5]]) val, vect = np.linalg.eig(a) print("eigenvalues: ", val) print("eigenvectors: ", vect)eigenvalue: [7. 3.] eigenvector: [[ 0.70710678 -0.70710678] [ 0.70710678 0.70710678]]Initcd c:\Workspace\Jupyter\plasticc:\Workspace\Jupyter\plasticMySQLfrom plastic.connectors.mysql import PlasticMysql class Task(PlasticMysql): pass Task.find(Task.id[3:])Quickstart (SQLite)File-based testing (as used in the committed entry) Get some data outfrom plastic.connectors.sqlite import PlasticSqlite PlasticSqlite._dbInfo = './dev/sqlite-test.db' class Task(PlasticSqlite): pass Task.find(Task.id[3:])Change a recordtask = Task(id=4) task task.title = "A bit more interesting" task._commit() Task(id=4)Find and filterprint({task.title: task.description for task in Task.find(Task.active[0])}){'Skipped': None, 'A bit more interesting': 'Not much to say here.'}Add an entrynewTask = Task() newTask.title = "A new task to do" newTask.active = True newTask._commit() Task.find(Task.id[5:]) # get tasks after the last addedSQLiteIn-memory testingfrom plastic.connectors.sqlite import PlasticSqlite, Sqlite_Connector PlasticSqlite._connection = Sqlite_Connector() with open('./test/plastic/connectors/sqlite.base.sql') as rawsql: statements = rawsql.read().split(';') for statement in statements: PlasticSqlite._connection._execute_query(statement, []) class Task(PlasticSqlite): _connection = PlasticSqlite._connection pass Task._pending t = Task(id=2) Task._pending Task.find(Task.id[1:]) Task.find(Task.id[7]) t7 = Task(active=1, title="New Task") t7._pending t7._commit() t7._pending Task.find(Task.id[1:])pip install easyocr import easyocr reader = easyocr.Reader(['pt']) resultados = reader.readtext('texto.jpg', paragraph=False) for resultado in resultados: print(f'Texto encontrado:\n' f'\tPosição: {resultado[0]}\n' f'\tTexto: {resultado[1]}\n')Texto encontrado: Posição: [[31, 33], [231, 33], [231, 99], [31, 99]] Texto: texto Texto encontrado: Posição: [[34, 148], [459, 148], [459, 188], [34, 188]] Texto: Conjunto organizado de palavras; Texto encontrado: Posição: [[36, 188], [368, 188], [368, 218], [36, 218]] Texto: expressões, frases de uma Texto encontrado: Posição: [[365, 185], [517, 185], [517, 226], [365, 226]] Texto: língua; que; Texto encontrado: Posição: [[34, 221], [539, 221], [539, 259], [34, 259]] Texto: escrito por um autor, compõe uma obra; Texto encontrado: Posição: [[35, 259], [304, 259], [304, 290], [35, 290]] Texto: livro; documento etc. Texto encontrado: Posição: [[37, 291], [543, 291], [543, 331], [37, 331]] Texto: As próprias palavras que se leem em um Texto encontrado: Posição: [[35, 332], [282, 332], [282, 362], [35, 362]] Texto: autor, numa lei etc. Texto encontrado: Posição: [[236, 412], [372, 412], [372, 440], [236, 440]] Texto: [] Dicio com brBring's curveverification of the period matrix given in http://arxiv.org/pdf/1206.6004.pdf [Braden]# Initialize jupyter notebook. Calling twice allegedly avoids a bug %matplotlib notebook %matplotlib notebook from cyclepainter import * # Define the Bring's curve (affine projection) k.=CyclotomicField(5) A2.=AffineSpace(k,2) curve = x*y^5 + x + x^2*y^2 - x^4*y - 2*y^3 # Define the cohomology basis (top of page 18 in [Braden]) den = (-5*x*y^4 + x^4 - 2*x^2*y + 6*y^2) diffs = [ (y^3-x)/den, (-x*y^2 + 1)/den, (-x^2 + y)/den, y*(x^2-y)/den ] # Automorphism rot = lambda x, y: (zeta^3*x, zeta*y) # Load CyclePainter cp = CyclePainter(curve) cp.start() # Load the pre-defined paths. These were hand-replicated using Figure 6 in [Braden] # and successive application of the rot automorphism cp.load_paths('brings_curve_paths.pickle') cp.saved_paths() # Show one of these paths cp.get_path('a8').display() # This is precisely the intersection matrix from the bottom of page 13 in [Braden] I = cp.intersection_matrix(['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8']) I # We will also need the transormation (4.2) in [Braden], to transform the above paths into a canonical basis M = np.matrix([ [1, 0, 0, 0,-2, 0, 1, 0], [1,-1, 0,-1,-1, 1, 1, 1], [1,-1, 0, 0,-1, 2, 1,-1], [0,-1, 0, 0, 1, 2, 0, 0], [1,-1, 1, 0,-1, 1,-1,-1], [1,-1, 1,-1, 0, 0,-1, 1], [1,-1, 1,-1, 0, 1, 0, 1], [0,-1, 0,-1, 1, 1, 1, 2] ]) # Verify that M above indeed transforms the paths into a canonical basis M.T*I*M # Calculate the period matrix (using the transormation M) pm = cp.period_matrix(['a1', 'a2', 'a3', 'a4'], ['a5', 'a6', 'a7', 'a8'], diffs) * M # Finally, find the Riemann matrix A, B = pm[:,:4], pm[:,4:] rm = np.matmul(np.linalg.inv(A), B) np.round(np.matmul(np.linalg.inv(A), B), 3) # Finally, setting t0, the form of the riemann matrix (1.1) in [Braden] is verified t0 = np.array(rm)[0][1] t0 np.round(rm/t0, 9)Analyze A/B Test Results Table of Contents- [Introduction](intro)- [Part I - Probability](probability)- [Part II - A/B Test](ab_test)- [Part III - Regression](regression) IntroductionA/B tests are very commonly performed to test the performance of an old website page compared to a newly developed website page using bootstrapping for hypothesis testing. we also apply logistic regression . Part I - ProbabilityTo get started, let's import our libraries.import pandas as pd import numpy as np import random import matplotlib.pyplot as plt %matplotlib inline #We are setting the seed to assure you get the same answers on quizzes as we set up random.seed(42)`1.` Now, read in the `ab_data.csv` data. Store it in `df`. **Use your dataframe to answer the questions in Quiz 1 of the classroom.**a. Read in the dataset and take a look at the top few rows here:#Loading dataset to dataframe df = pd.read_csv('ab_data.csv') df.head()b. Use the below cell to find the number of rows in the dataset.df.shape[0]c. The number of unique users in the dataset.df.user_id.nunique()d. The proportion of users converted.df.converted.mean()e. The number of times the `new_page` and `treatment` don't line up.len(df.query("(group != 'treatment' and landing_page=='new_page') or ( group == 'treatment' and landing_page !='new_page')"))f. Do any of the rows have missing values?df.isnull().sum()`2.` For the rows where **treatment** is not aligned with **new_page** or **control** is not aligned with **old_page**, we cannot be sure if this row truly received the new or old page. Use **Quiz 2** in the classroom to provide how we should handle these rows. a. Now use the answer to the quiz to create a new dataset that meets the specifications from the quiz. Store your new dataframe in **df2**.df2 = df.query("(group == 'control' and landing_page == 'old_page') or (group == 'treatment' and landing_page == 'new_page')") # Double Check all of the correct rows were removed - this should be 0 df2[((df2['group'] == 'treatment') == (df2['landing_page'] == 'new_page')) == False].shape[0]`3.` Use **df2** and the cells below to answer questions for **Quiz3** in the classroom. a. How many unique **user_id**s are in **df2**?df2.user_id.nunique()b. There is one **user_id** repeated in **df2**. What is it?df2[df2.duplicated(['user_id'])]['user_id'].unique()c. What is the row information for the repeat **user_id**?df2[df2.duplicated(['user_id'], keep=False)]d. Remove **one** of the rows with a duplicate **user_id**, but keep your dataframe as **df2**.# dropping the duplicates df2 = df2.drop_duplicates(['user_id'], keep='first')`4.` Use **df2** in the below cells to answer the quiz questions related to **Quiz 4** in the classroom.a. What is the probability of an individual converting regardless of the page they receive?# mean of dataframe after dropping nulls df2.converted.mean()b. Given that an individual was in the `control` group, what is the probability they converted?control_convert = df2.query("group =='control'").converted.mean() control_convertc. Given that an individual was in the `treatment` group, what is the probability they converted?treat_convert = df2[df2["group"] =='treatment']['converted'].mean() treat_convertd. What is the probability that an individual received the new page?len(df2.query("landing_page == 'new_page'"))/len(df2)e. Use the results in the previous two portions of this question to suggest if you think there is evidence that one page leads to more conversions? Write your response below. These results suggest that there is not sufficient evidence to say that the treatment page leads to more conversions as the probability of conversion for the treatment group is less than that for the control group. Part II - A/B TestHypotheses`1.` For now, consider you need to make the decision just based on all the data provided. If you want to assume that the old page is better unless the new page proves to be definitely better at a Type I error rate of 5%, what should your null and alternative hypotheses be? You can state your hypothesis in terms of words or in terms of **$p_{old}$** and **$p_{new}$**, which are the converted rates for the old and new pages. Hypothesis:$$H_{0} : p_{new} - p_{old} \le 0$$$$H_{1} : p_{new} - p_{old} \gt 0$$ `2.` Assume under the null hypothesis, $p_{new}$ and $p_{old}$ both have "true" success rates equal to the **converted** success rate regardless of page - that is $p_{new}$ and $p_{old}$ are equal. Furthermore, assume they are equal to the **converted** rate in **ab_data.csv** regardless of the page. Use a sample size for each page equal to the ones in **ab_data.csv**. Perform the sampling distribution for the difference in **converted** between the two pages over 10,000 iterations of calculating an estimate from the null. Use the cells below to provide the necessary parts of this simulation. If this doesn't make complete sense right now, don't worry - you are going to work through the problems below to complete this problem. You can use **Quiz 5** in the classroom to make sure you are on the right track. a. What is the **convert rate** for $p_{new}$ under the null?p_new = df2['converted'].mean() p_newb. What is the **convert rate** for $p_{old}$ under the null?p_old = df2['converted'].mean() p_oldc. What is $n_{new}$?n_new = df2.query('landing_page == "new_page"').shape[0] n_newd. What is $n_{old}$?n_old = df2.query('landing_page == "old_page"').shape[0] n_olde. Simulate $n_{new}$ transactions with a convert rate of $p_{new}$ under the null. Store these $n_{new}$ 1's and 0's in **new_page_converted**.new_page_converted = np.random.choice([0, 1], size=n_new, p=[p_new, (1-p_new)])f. Simulate $n_{old}$ transactions with a convert rate of $p_{old}$ under the null. Store these $n_{old}$ 1's and 0's in **old_page_converted**.old_page_converted = np.random.choice([0, 1], size=n_old, p=[p_old, (1-p_old)])g. Find $p_{new}$ - $p_{old}$ for your simulated values from part (e) and (f).p_diff = new_page_converted.mean() - old_page_converted.mean() p_diffh. Simulate 10,000 $p_{new}$ - $p_{old}$ values using this same process similarly to the one you calculated in parts **a. through g.** above. Store all 10,000 values in **p_diffs**.p_diffs = [] for _ in range(10000): new_page_converted = np.random.choice([0, 1], size=n_new, p=[p_new, (1-p_new)]).mean() old_page_converted = np.random.choice([0, 1], size=n_old, p=[p_old, (1-p_old)]).mean() diff = new_page_converted - old_page_converted p_diffs.append(diff)i. Plot a histogram of the **p_diffs**. Does this plot look like what you expected? Use the matching problem in the classroom to assure you fully understand what was computed here.plt.hist(p_diffs) plt.xlabel('p_diffs') plt.ylabel('Frequency') plt.title('Plot of 10K simulated p_diffs') plt.axvline(treat_convert - control_convert, color='r');j. What proportion of the **p_diffs** are greater than the actual difference observed in **ab_data.csv**?act_diff = treat_convert - control_convert act_diff p_diffs = np.array(p_diffs) p_diffs (act_diff < p_diffs).mean()k. In words, explain what you just computed in part **j.**. What is this value called in scientific studies? What does this value mean in terms of whether or not there is a difference between the new and old pages? Results: The p-value calculated is 0.9065. This is far greater than the typical $\alpha$ level of 0.05 in business studies. (An $\alpha$ level of 0.05 indicates that we have a 5% chance of committing a Type I error if the null is true.) As such, we would fail to reject the null and conclude that there is not sufficient evidence to say that that there is a difference between the two values. l. We could also use a built-in to achieve similar results. Though using the built-in might be easier to code, the above portions are a walkthrough of the ideas that are critical to correctly thinking about statistical significance. Fill in the below to calculate the number of conversions for each page, as well as the number of individuals who received each page. Let `n_old` and `n_new` refer the the number of rows associated with the old page and new pages, respectively.import statsmodels.api as sm convert_old = df2.query('group == "control" & converted == 1')['converted'].count() convert_new = df2.query('group == "treatment" & converted == 1')['converted'].count() n_old = df2.query('landing_page == "new_page"').shape[0] n_new = df2.query('landing_page == "old_page"').shape[0]m. Now use `stats.proportions_ztest` to compute your test statistic and p-value. [Here](http://knowledgetack.com/python/statsmodels/proportions_ztest/) is a helpful link on using the built in.sm.stats.proportions_ztest([convert_new, convert_old], [n_new, n_old], alternative='larger')n. What do the z-score and p-value you computed in the previous question mean for the conversion rates of the old and new pages? Do they agree with the findings in parts **j.** and **k.**? The calculated values align with those obtained during the bootstrapped hypothesis testing. Part III - A regression approach`1.` In this final part, you will see that the result you acheived in the previous A/B test can also be acheived by performing regression.a. Since each row is either a conversion or no conversion, what type of regression should you be performing in this case? Logistic Regression b. The goal is to use **statsmodels** to fit the regression model you specified in part **a.** to see if there is a significant difference in conversion based on which page a customer receives. However, you first need to create a colun for the intercept, and create a dummy variable column for which page each user received. Add an **intercept** column, as well as an **ab_page** column, which is 1 when an individual receives the **treatment** and 0 if **control**.df2[['ab_page', 'old_page']] = pd.get_dummies(df2['landing_page']) df2['intercept'] = 1 df2.head()c. Use **statsmodels** to import your regression model. Instantiate the model, and fit the model using the two columns you created in part **b.** to predict whether or not an individual converts.#Instantiate and fit the model log_mod = sm.Logit(df2['converted'], df2[['intercept', 'ab_page']]) result = log_mod.fit()Optimization terminated successfully. Current function value: 0.366118 Iterations 6d. Provide the summary of your model below, and use it as necessary to answer the following questions.# Workaround for known bug with .summary() with updated scipy from scipy import stats stats.chisqprob = lambda chisq, df: stats.chi2.sf(chisq, df) result.summary()e. What is the p-value associated with **ab_page**? Why does it differ from the value you found in the **Part II**? **Hint**: What are the null and alternative hypotheses associated with your regression model, and how do they compare to the null and alternative hypotheses in the **Part II**? The p-value 0.190 here remains above an $\alpha$ level of 0.05 but is different because this is a two tailed test. We will still reject the null in this situation.# Calculate area of lower tail p_lower = (p_diffs < act_diff).mean() # Calculate area of upper tail upper = p_diffs.mean() - act_diff p_upper = (p_diffs > upper).mean() # Calculate total tail area p_lower + p_upperf. Now, you are considering other things that might influence whether or not an individual converts. Discuss why it is a good idea to consider other factors to add into your regression model. Are there any disadvantages to adding additional terms into your regression model? Considering other factors is a good idea as these factors may contribute to the significance of our test results and leads to more accurate decisions. One of the disadvantages of adding additional terms into the regression model is Simpson's paradox where the combined impact of different variables disappears or reverses when these variables are combined, but appears where these variables are tested individually. g. Now along with testing if the conversion rate changes for different pages, also add an effect based on which country a user lives. You will need to read in the **countries.csv** dataset and merge together your datasets on the approporiate rows. [Here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.join.html) are the docs for joining tables. Does it appear that country had an impact on conversion? Don't forget to create dummy variables for these country columns - **Hint: You will need two columns for the three dummy varaibles.** Provide the statistical output as well as a written response to answer this question.countries_df = pd.read_csv('countries.csv') # Making an inner join by using two dataframes df_new = countries_df.set_index('user_id').join(df2.set_index('user_id'), how='inner') df_new.head() # finding unique countries in a column df_new['country'].unique() # creating dummies df_new[['CA', 'UK', 'US']] = pd.get_dummies(df_new['country']) df_new.head() # applying logistic regression and diplaying the result summary log_mod = sm.Logit(df_new['converted'], df_new[['intercept', 'CA', 'UK']]) result = log_mod.fit() result.summary()Optimization terminated successfully. Current function value: 0.366116 Iterations 6Once again, the p-values are greater than $\alpha$ And so we fail to reject the null. tthere is no significant contribution from country to differences in conversion rates for the two pagesdf_new['CA_page'] = df_new['CA'] * df_new['ab_page'] df_new['UK_page'] = df_new['UK'] * df_new['ab_page'] df_new.head()h. Though you have now looked at the individual factors of country and page on conversion, we would now like to look at an interaction between page and country to see if there significant effects on conversion. Create the necessary additional columns, and fit the new model. Provide the summary results, and your conclusions based on the results.log_mod = sm.Logit(df_new['converted'], df_new[['intercept', 'ab_page', 'CA', 'UK', 'CA_page', 'UK_page']]) result = log_mod.fit() result.summary()Optimization terminated successfully. Current function value: 0.366109 Iterations 6Result: None of the considered variables have significant p-values. Therefore, we will fail to reject the null and conclude that there is not sufficient evidence to suggest that there is an interaction between country and page received that will predict whether a user converts or not. Resources:https://classroom.udacity.com/courses/ud257https://www.scipy-lectures.org/packages/statistics/index.htmlhttps://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.htmlhttp://www.win-vector.com/blog/2015/06/designing-ab-tests/from subprocess import call call(['python', '-m', 'nbconvert', 'Analyze_ab_test_results_notebook.ipynb'])VS SNAP/ TANF Data Intake and Operations> This notebook uses data to generate a portion of BNIA's Vital Signs report. This colab and more can be found at https://github.com/BNIA/vitalsigns. Whats Inside?: __The Guided Walkthrough__This notebook was made to create the following Housing Vital Signs Indicators: __Indicators Used__- ✅ 106 - __TANF__ - (TANF) Percent of Families Receiving TANF - ✅ ??? - __SNAP__ - (TANF) Percent of Families Receiving TANF __Datasets Used__- ✅ TANF.TANF_201X __(106-columns)__- ✅ SNAP.SNAP_201X __(???-columns)__❌year = '19'Guided Walkthrough SETUP Enviornment: Import Modules%%capture ! pip install -U -q PyDrive ! pip install geopy ! pip install geopandas ! pip install geoplot ! pip install dataplay ! pip install matplotlib ! pip install psycopg2-binary %%capture ! apt-get install build-dep python-psycopg2 ! apt-get install libpq-dev ! apt-get install libspatialindex-dev %%capture !pip install rtree !pip install dexplot from dataplay.geoms import workWithGeometryData %%capture # These imports will handle everything import os import sys import csv import matplotlib.pyplot as plt import numpy as np import pandas as pd import geopandas as gpd from geopandas import GeoDataFrame import psycopg2 import pyproj from pyproj import Proj, transform # conda install -c conda-forge proj4 from shapely.geometry import Point from shapely import wkb from shapely.wkt import loads # https://pypi.org/project/geopy/ from geopy.geocoders import Nominatim # In case file is KML, enable support import fiona fiona.drvsupport.supported_drivers['kml'] = 'rw' fiona.drvsupport.supported_drivers['KML'] = 'rw' from IPython.display import clear_output clear_output(wait=True) import ipywidgets as widgets from ipywidgets import interact, interact_manualConfigure Enviornment# This will just beautify the output pd.set_option('display.expand_frame_repr', False) pd.set_option('display.precision', 2) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # pd.set_option('display.expand_frame_repr', False) # pd.set_option('display.precision', 2) # pd.reset_option('max_colwidth') pd.set_option('max_colwidth', 20) # pd.reset_option('max_colwidth')Prep Datasets TPOP CSA and Baltimore Get Baltimore#collapse_output #collapse_input csa = pd.read_csv('Families Denominator 2010 for TANF.csv')Get CSAcsa.head(3) csa.tail(3)SNAPimport pandas as pd import geopandas original = gpd.read_file("SNAP20"+year+"_CSACity.shp", geometry='geometry'); original.columns original.rename(columns={ 'CSA':'CSA2010', 'BaltCity':'InBaltimore'}, inplace=True) df = original[ original['CSA2010'].notnull() | original['InBaltimore'].notnull() ] print('After filtering records where a CSA or Baltimore geo-code match Exists') print( 'All rows Before Filter: ', original.shape[0] ) # rows, columns print( '# w BCity.isnull: ', df.InBaltimore.isnull().sum() ); bmorow = df[ df.CSA2010.isnull() ].shape[0] print( '# w CSA2010.isnull: ', bmorow ); csarow = df[ df.CSA2010.notnull() ].shape[0] print( '# w CSA2010.notnull: ', csarow ); print( '# rows After Filter: ', df.shape[0],'==',csarow,'+',bmorow,'==', csarow + bmorow); # add baltimore city df.CSA2010 = df.CSA2010.fillna('Baltimore City') snapdf = df.copy() snapdf = snapdf[['CSA2010','InBaltimore']] snapdf.head(1)TANFimport pandas as pd import geopandas original = gpd.read_file("TANF20"+year+"_CSACity.shp", geometry='geometry'); original.columns original.rename(columns={ 'CSA':'CSA2010', 'BaltCity':'InBaltimore'}, inplace=True) df = original[ original['CSA2010'].notnull() | original['InBaltimore'].notnull() ] print('After filtering records where a CSA or Baltimore geo-code match Exists') print( 'All rows Before Filter: ', original.shape[0] ) # rows, columns print( '# w BCity.isnull: ', df.InBaltimore.isnull().sum() ); bmorow = df[ df.CSA2010.isnull() ].shape[0] print( '# w CSA2010.isnull: ', bmorow ); csarow = df[ df.CSA2010.notnull() ].shape[0] print( '# w CSA2010.notnull: ', csarow ); print( '# rows After Filter: ', df.shape[0],'==',csarow,'+',bmorow,'==', csarow + bmorow); # add baltimore city df.CSA2010 = df.CSA2010.fillna('Baltimore City') tandf = df.copy() tandf = tandf[['CSA2010','InBaltimore']] tandf.head(1)106 tanf - G https://bniajfi.org/indicators/Children%20And%20Family%20Health/tanfTemporary Assistance for Needy Families (TANF) is a federal assistance program. The Act provides temporary financial assistance while aiming to get people off of that assistance, primarily through employment. Percent of Families Receiving TANFTemporary Assistance for Needy Families (TANF) is a federal assistance program. The Act provides temporary financial assistance while aiming to get people off of that assistance, primarily through employment.WORKS BUT NEEDS TO BE DIVIDED BY ? Normalization Source Population, # Houses, Avg HH Size We need the Family Households. From 2010. Census not ACS Data. df1['FamHHChildrenUnder18'] = df['B11005_003E_Total_Households_with_one_or_more_people_under_18_years_Family_households'] df1['FamHHChildrenOver18'] = df['B11005_012E_Total_Households_with_no_people_under_18_years_Family_households'] df1['FamHH'] = df1['FamHHChildrenOver18'] + df1['FamHHChildrenUnder18'] FINAL NOTE ^ EVERYTHING ABOVE WAS WRITTEN PRIOR TO THIS NOTICE: Normalization Source Location V P:\Project Libraries\Vital Signs\Vital Signs 12\5 Chapter Health Cheryl found this source after Seema and I were struggling. It appears to be coming from the 2010 data. Not the 5 yr aggregates. def tanf(df, csa, yr): # tanf.drop( columns=['geometry', 'Shape__Length','Shape__Area'], inplace=True) # Baltimoire has records not in the tanf.at[55,'count']=tanf['count'].sum() # Perform the calculation tanf['106-tanf'+year] = tanf['count'] / tanf['FamHH_2010'] * 1000 """ compareYears = gpd.read_file("https://services1.arcgis.com/mVFRs7NF4iFitgbY/ArcGIS/rest/services/Tanf/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=true&f=pgeojson"); prevYear = 'tanf'+ str( int(year) - 1 ) if prevYear in compareYears.columns: tanf = tanf.merge( compareYears[['CSA2010', prevYear]], left_on='CSA2010', right_on='CSA2010', how='outer' ) tanf['change'] = tanf['106-tanf'+year] - tanf[ prevYear ] tanf['percentChange'] = tanf['change' ] / tanf[ prevYear ] * 100 tanf['change'] = tanf['change'].apply(lambda x: "{:.2f}".format(x) ) """ print( 'Records Matching Query: ', tanf.size / len(tanf.columns) ) return tanf fin = tanf(tandf, csa, year) fin.to_csv('106-tanf'+year+'.csv', index=False) fin.head(60) #export def tanf(df, csa, yr): # Create the Numerator tanf = df.copy() tanf['count'] = 1 tanf = tanf.groupby('CSA2010').sum(numeric_only=True) # Make sure ALL csas and BaltimoreCity are included and sorted. tanf = csa.merge( tanf, left_on='CSA2010', right_on='CSA2010', how='outer' ) # Baltimoire may have records not in the CSA (not actually the case right now but..) tanf.at[55,'count']=tanf['count'].sum() # Perform the calculation tanf['106-tanf'+year] = tanf['count'] / tanf['FamHH_2010'] * 100 compareYears = gpd.read_file("https://services1.arcgis.com/mVFRs7NF4iFitgbY/ArcGIS/rest/services/Tanf/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=true&f=pgeojson"); prevYear = 'tanf'+ str( int(year) - 1 ) if prevYear in compareYears.columns: tanf = tanf.merge( compareYears[['CSA2010', prevYear]], left_on='CSA2010', right_on='CSA2010', how='outer' ) tanf['change'] = tanf['106-tanf'+year] - tanf[ prevYear ] tanf['percentChange'] = tanf['change' ] / tanf[ prevYear ] * 100 tanf['change'] = tanf['change'].apply(lambda x: "{:.2f}".format(x) ) print( 'Records Matching Query: ', tanf.size / len(tanf.columns) ) return tanf fin = tanf(tandf, csa, year) fin.to_csv('106-tanf'+year+'.csv', index=False) fin.head(60)??? SNAP - G [DESCRIPTION](https://bniajfi.org/indicators/Children%20And%20Family%20Health/SNAP)def snap(df, csa, yr): id = '107' shortname = 'snap' df['count'] = 1 # Create the Numerator numer = df.copy() # Group by CSA numer = numer.groupby('CSA2010').sum(numeric_only=True) # Make sure ALL csas and BaltimoreCity are included and sorted. numer = csa.merge( numer, left_on='CSA2010', right_on='CSA2010', how='outer' ) numer.drop( columns=['geometry', 'Shape__Length','Shape__Area'], inplace=True) # Do after sortViaCsaMerge to get index right. False records would show underneath it but still get added to the sum. numer.at[55,'count']=numer['count'].sum() # Perform the calculation numer[id+'-'+shortname+year] = numer['count'] / numer['tpop10'] * 100 netyet = """ compareYears = gpd.read_file("https://services1.arcgis.com/mVFRs7NF4iFitgbY/ArcGIS/rest/services/"+shortname+"/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=true&f=pgeojson"); prevYear = shortname+ str( int(year) - 1 ) if prevYear in compareYears.columns: numer = numer.merge( compareYears[['CSA2010', prevYear]], left_on='CSA2010', right_on='CSA2010', how='outer' ) numer['change'] = numer[id+'-'+shortname+year] - numer[ prevYear ] numer['percentChange'] = numer['change' ] / numer[ prevYear ] * 100 numer['change'] = numer['change'].apply(lambda x: "{:.2f}".format(x) ) print( 'Records Matching Query: ', numer.size / len(numer.columns) ) """ return numer.drop(columns=[]) fin = snap(snapdf, csa, year) fin.to_csv('107-snap'+year+'.csv', index=False) fin.head(6) #export def snap(df, csa, yr): # Create the Numerator snap = df.copy() snap['count'] = 1 snap = snap.groupby('CSA2010').sum(numeric_only=True) # Make sure ALL csas and BaltimoreCity are included and sorted. snap = csa.merge( snap, left_on='CSA2010', right_on='CSA2010', how='outer' ) # Baltimoire may have records not in the CSA (not actually the case right now but..) snap.at[55,'count']=snap['count'].sum() # Perform the calculation snap['???-snap'+year] = snap['count'] compareYears = gpd.read_file("https://services1.arcgis.com/mVFRs7NF4iFitgbY/ArcGIS/rest/services/Snap/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=true&f=pgeojson"); prevYear = 'snap'+ str( int(year) - 1 ) if prevYear in compareYears.columns: snap = snap.merge( compareYears[['CSA2010', prevYear]], left_on='CSA2010', right_on='CSA2010', how='outer' ) snap['change'] = snap['???-snap'+year] - snap[ prevYear ] snap['percentChange'] = snap['change' ] / snap[ prevYear ] * 100 snap['change'] = snap['change'].apply(lambda x: "{:.2f}".format(x) ) print( 'Records Matching Query: ', snap.size / len(snap.columns) ) return snap fin = snap(tandf, csa, year) fin.to_csv('???-snap'+year+'.csv', index=False) fin.head(60)Softmax Regression (without Scikit-Learn)This notebook provides a solution to Chapter 4 Exercise 12:>*Implement Batch Gradient Descent with early stopping for Softmax Regression (without using Scikit-Learn)*For this task I will use the [IRIS](https://archive.ics.uci.edu/ml/datasets/iris) that is packaged with Scikit-Learn and accessible via the `datasets` module.import numpy as np from sklearn.datasets import load_iris import matplotlib.pyplot as plt iris = load_iris() iris.keys() iris['data'].shape # 150 samples with 4 features iris['feature_names']In order to allow me to compare my results to those in the corresponding chapter notebook, I will focus on the `petal length (cm)` and `petal_width (cm)` features. Data Preparationfeats = ['petal length (cm)', 'petal width (cm)'] feats_idx = [iris['feature_names'].index(ft) for ft in feats] feats_idx X = iris['data'][:,feats_idx] y = iris['target'] # Need to add x0 = 1 for the bias term - note that the Logistic Regression # models in Scikit-Learn will automatically add this by default X_b = np.c_[np.ones((X.shape[0],1)),X] X_b.shape, y.shape # m = number of samples # n = number of features(use X as we dont want to count the bias term) m, n = X.shapeTrain/Test splitratio_valid = 0.2 ratio_test = 0.2 valid_count = int(m * ratio_valid) test_count = int(m * ratio_test) train_count = m - valid_count - test_count print(f'Number of training samples: {train_count}') print(f'Number of validation samples: {valid_count}') print(f'Number of test samples: {test_count}') # set random seed to same as exercise solution np.random.seed(2042) perms = np.random.permutation(m) X_train = X_b[perms[:train_count]] y_train = y[perms[:train_count]] X_valid = X_b[perms[train_count:train_count + valid_count]] y_valid = y[perms[train_count:train_count + valid_count]] X_test = X_b[perms[train_count + valid_count:]] y_test = y[perms[train_count + valid_count:]] print(X_train.shape, y_train.shape, X_valid.shape, y_valid.shape, X_test.shape, y_test.shape, sep='\n')Number of training samples: 90 Number of validation samples: 30 Number of test samples: 30 (90, 3) (90,) (30, 3) (30,) (30, 3) (30,)Class Probabilities# For multiclass classifications we need to convert the single target # class integer into an array of values indictating whether or not # the sample belongs to each class - 0/1. # This is similar to One Hot Encoding! # The result is a 1 x num_classes vector for each record and therefore # an m x num_classes matrix. def to_one_hot(y): n_classes = y.max() + 1 # only works if the provided y captures all classes m = len(y) Y_one_hot = np.zeros((m, n_classes)) # indexes are determined pairwise i.e. [sample_1,target_1] = 1 # therefore this has the effect of, for each sample, setting # a value of 1 in the one hot column where the index = target class y # e.g. # sample_1 = row 0, target_1 = y[0] = 2 # Y_one_hot[0,2] = 1 Y_one_hot[np.arange(m), y] = 1 return Y_one_hot to_one_hot(y_train[:10]) # Now create the one hot target vectors for the training, validation, # and test data y_train_oh = to_one_hot(y_train) y_valid_oh = to_one_hot(y_valid) y_test_oh = to_one_hot(y_test) # Check shapes are as expected y_train_oh.shape, y_valid_oh.shape, y_test_oh.shapeTraining the ModelFirst let's revisit the key functions we need to train the model:Softmax function:$\hat{p}_k = \sigma\left(\mathbf{s}(\mathbf{x})\right)_k = \dfrac{\exp\left(s_k(\mathbf{x})\right)}{\sum\limits_{j=1}^{K}{\exp\left(s_j(\mathbf{x})\right)}}$Cross Entropy cost function:$J(\mathbf{\Theta}) =- \dfrac{1}{m}\sum\limits_{i=1}^{m}\sum\limits_{k=1}^{K}{y_k^{(i)}\log\left(\hat{p}_k^{(i)}\right)}$Derivitives wrt. theta:$\nabla_{\mathbf{\theta}^{(k)}} \, J(\mathbf{\Theta}) = \dfrac{1}{m} \sum\limits_{i=1}^{m}{ \left ( \hat{p}^{(i)}_k - y_k^{(i)} \right ) \mathbf{x}^{(i)}}$Note that in the case of this multi class problem, each class has its own weight vector, forming a weight matrix $\mathbf{\Theta}$.# Write softmax function which outputs the probability that an instance # belongs to each class. def softmax(logits): exps = np.exp(logits) # The next step calculates the sums across each column i.e. # the sum across the classes per training sample x. # Keepdims ensure that the rows dimesion is kept which enables # the subsequent division step, diving each of the elements in # each row by the sum of the row. exp_sums = np.sum(exps, axis=1, keepdims=True) return exps / exp_sums # Determine Theta shape # The weights of each class are column vectors num_features = X_train.shape[1] # Bias(x0), x1, x2 num_classes = len(np.unique(y_train)) # number of unique values in the original training labels print(num_features) print(num_classes) # Train the model! eta = 0.01 # Learning rate n_iterations = 5001 m = len(X_train) # A small value epsilon will be added to the softmax probabilites # since a probability of 0 would lead to log(prob) = NaN. # This way we can avoid NaN values epsilon = 1e-7 # No seed set here so Theta will be different each run Theta = np.random.randn(num_features, num_classes) for iteration in range(n_iterations): logits = X_train.dot(Theta) # aka Softmax Score S(X) y_proba = softmax(logits) loss = -np.mean(np.sum(y_train_oh * np.log(y_proba + epsilon), axis=1)) # Cross Entropy error = y_proba - y_train_oh # used for Gradients i.e. models probabilities - actual probabilities if iteration % 500 == 0: print(iteration, loss) gradients = 1/m * X_train.T.dot(error) Theta = Theta - eta * gradients # Optimum Theta ThetaEvaluating the Model# Predictions logits = X_valid.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) # finds the index of the maxiumum value (probability) in each row accuracy_score = np.mean(y_predict == y_valid) # simple proportion of correct predictions accuracy_scoreAdding L2 Regularisationeta = 0.1 # Also increased learning rate n_iterations = 5001 m = len(X_train) epsilon = 1e-7 alpha = 0.1 # regularization hyperparameter # No seed set here so Theta will be different each run Theta = np.random.randn(num_features, num_classes) for iteration in range(n_iterations): logits = X_train.dot(Theta) Y_proba = softmax(logits) xentropy_loss = -np.mean(np.sum(y_train_oh * np.log(Y_proba + epsilon), axis=1)) l2_loss = 1/2 * np.sum(np.square(Theta[1:])) # bias weights excluded loss = xentropy_loss + alpha * l2_loss error = Y_proba - y_train_oh if iteration % 500 == 0: print(iteration, loss) # Update the gradient vector by adding alpha * weights - ensuring bias weights # i.e. theta row 1 are set to 0 gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, num_classes]), alpha * Theta[1:]] Theta = Theta - eta * gradients logits = X_valid.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_valid) accuracy_scoreThis shows a greater loss than previously (model is less flexible due to regularisation) but a better ability to generalise (No overfitting training data)!However, the train/validation sets are relatively small - it could just be luck that the model predicted everything correct on the validation set. Cross validation should provide a better evaluation metric. Early StoppingTo implement early stopping we need to evaluate the model at each epoch and stop when the validation metric starts to deteriorate. Here I use the loss.eta = 0.1 n_iterations = 5001 m = len(X_train) epsilon = 1e-7 alpha = 0.1 best_loss = np.inf # No seed set here so Theta will be different each run Theta = np.random.randn(num_features, num_classes) for iteration in range(n_iterations): logits = X_train.dot(Theta) Y_proba = softmax(logits) xentropy_loss = -np.mean(np.sum(y_train_oh * np.log(Y_proba + epsilon), axis=1)) l2_loss = 1/2 * np.sum(np.square(Theta[1:])) loss = xentropy_loss + alpha * l2_loss error = Y_proba - y_train_oh gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, num_classes]), alpha * Theta[1:]] new_Theta = Theta - eta * gradients # new variable so that the starting Theta is not lost # Evaluate on validation set logits = X_valid.dot(new_Theta) Y_proba = softmax(logits) xentropy_loss = -np.mean(np.sum(y_valid_oh * np.log(Y_proba + epsilon), axis=1)) l2_loss = 1/2 * np.sum(np.square(new_Theta[1:])) loss = xentropy_loss + alpha * l2_loss # regularised loss if iteration % 500 == 0: print(iteration, loss) if loss < best_loss: best_loss = loss # Can update Theta with the new_Theta since the loss is now the best Theta = new_Theta else: # Don't update Theta to new_Theta since it has a greater loss and we want to stop print(iteration - 1, best_loss) print(iteration, loss, "early stopping!") break logits = X_valid.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba,axis=1) accuracy_score = np.mean(y_predict == y_valid) accuracy_scoreModel still has the same high accuracy but needed around half the epochs! Plot Model Predictions# For this we want to sample points in the feature space # i.e. all combinations of x1,x2 (petal_length, petal_width) min_x1, min_x2 = X.min(axis=0) max_x1, max_x2 = X.max(axis=0) print(f'Petal Length: {min_x1} - {max_x1}') print(f'Petal Width: {min_x2} - {max_x2}') # Meshgrid is used to sample the x1, x2 values over this feature space (and a little bit more) x0, x1 = np.meshgrid( np.linspace(0, 8, 500).reshape(-1, 1), # 500 points for petal length np.linspace(0, 3.5, 200).reshape(-1, 1), # 200 points for petal width ) X_new = np.c_[x0.ravel(), x1.ravel()] # Add the bias term X_new_with_bias = np.c_[np.ones([len(X_new), 1]), X_new] logits = X_new_with_bias.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) zz1 = Y_proba[:, 1].reshape(x0.shape) # probability that the instance is in the Iris Versicolour class zz = y_predict.reshape(x0.shape) plt.figure(figsize=(10, 4)) plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris virginica") plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris versicolor") plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris setosa") from matplotlib.colors import ListedColormap custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0']) plt.contourf(x0, x1, zz, cmap=custom_cmap) contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg) plt.clabel(contour, inline=1, fontsize=12) plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.legend(loc="upper left", fontsize=14) plt.axis([0, 7, 0, 3.5]) plt.show()Evaluate on Test Setlogits = X_test.dot(Theta) Y_proba = softmax(logits) y_predict = np.argmax(Y_proba, axis=1) accuracy_score = np.mean(y_predict == y_test) accuracy_scoreRecurrent Neural Networks Import moduleimport pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense import tensorflow.keras.backend as K from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import LSTM, GRU df = pd.read_csv('../data/cansim-0800020-eng-6674700030567901031.csv', skiprows=6, skipfooter=9, engine='python') df.head() from pandas.tseries.offsets import MonthEnd # Đổi string sang date *** Rất quan trọng khi làm RNN!!!! df['Adjustments'] = pd.to_datetime(df['Adjustments']) + MonthEnd(1) df = df.set_index('Adjustments') df.head() df.plot() split_date = pd.Timestamp('01-01-2011') train = df.loc[:split_date, ['Unadjusted']] test = df.loc[split_date:, ['Unadjusted']] ax = train.plot() test.plot(ax=ax) plt.legend(['train', 'test'])Scaling, tách train-testfrom sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler() train_sc = sc.fit_transform(train) test_sc = sc.transform(test) train_sc[:4] X_train = train_sc[:-1] y_train = train_sc[1:] X_test = test_sc[:-1] y_test = test_sc[1:] early_stop = EarlyStopping(monitor='loss', patience=3, verbose=1) train_sc.shape train_sc_df = pd.DataFrame(train_sc, columns=['Scaled'], index=train.index) test_sc_df = pd.DataFrame(test_sc, columns=['Scaled'], index=test.index) train_sc_df.head()Tạo các rolling window có kích thước 12 thángfor s in range(1, 13): train_sc_df['shift_{}'.format(s)] = train_sc_df['Scaled'].shift(s) test_sc_df['shift_{}'.format(s)] = test_sc_df['Scaled'].shift(s) train_sc_df.head(13) X_train = train_sc_df.dropna().drop('Scaled', axis=1) y_train = train_sc_df.dropna()[['Scaled']] X_test = test_sc_df.dropna().drop('Scaled', axis=1) y_test = test_sc_df.dropna()[['Scaled']] X_train.head() X_train.shape X_train = X_train.values X_test= X_test.values y_train = y_train.values y_test = y_test.values X_train_t = X_train.reshape(X_train.shape[0], 1, 12) X_test_t = X_test.reshape(X_test.shape[0], 1, 12) X_train_t.shapeTạo model RNNK.clear_session() model = Sequential() # Thêm lớp LSTM model.add(LSTM(6, input_shape=(1, 12))) # Thêm lớp DNN đầu ra model.add(Dense(1)) # Thiết lập thông số model.compile(loss='mean_squared_error', optimizer='adam') model.summary() model.fit(X_train_t, y_train, epochs=100, batch_size=1, verbose=1, callbacks=[early_stop]) y_pred = model.predict(X_test_t) plt.plot(y_test) plt.plot(y_pred) from sklearn.metrics import mean_squared_error mean_squared_error(y_test, y_pred, squared=False)Clean column names (delete spaces, special characters, ...)params = preprocessing.params dfs = preprocessing._transform_column_names(INPUT_FILE_PATH, OUTPUT_FILE_PATH) dfs['ingredients'].head()Generate a dataframe with statistics of ingredientsorig_ing_df = dfs['ingredients'] desc_ingred_df = preprocessing._get_describe_ingredients(orig_ing_df) print(desc_ingred_df.shape) desc_ingred_df.head(5)(213, 3)Merge original ingredients dataset with statistics (min, max)ing_df = orig_ing_df.merge(desc_ingred_df, how='left', left_on='Ingredients', right_on='Ingredients') print(ing_df.shape) ing_df.head(5)(5671, 9)Create sparse matrix with ingredientskey_columns = ['Ciqual_AGB', 'Nom_Francais', 'Groupe_aliment', 'Sous-groupe_aliment', 'LCI_Name' , 'Ingredients'] pivot_column = 'Ingredients' value_columns = 'Score_unique_EF_' drop_cols = ['Nom_Francais', 'Groupe_aliment', 'Sous-groupe_aliment', 'LCI_Name'] sparse_ing_df = preprocessing._create_sparse_ingredients(ing_df,key_columns, pivot_column, value_columns) sparse_ing_df.head(3)Create sparse matrix where min_EF, max_EF of each ingredient are in columnspivot_index = ['Ciqual_AGB', 'Nom_Francais', 'Groupe_aliment', 'Sous-groupe_aliment', 'LCI_Name'] pivot_columns = ['Ingredients'] pivot_values = ['min_EF', 'max_EF'] drop_cols = ['Nom_Francais', 'Groupe_aliment', 'Sous-groupe_aliment', 'LCI_Name'] pivot_ing_minmax = preprocessing._create_features_min_max_ing(ing_df, pivot_index, pivot_columns, pivot_values) pivot_ing_minmax.head()Merge sparse ingredients data with pivoted min max valuesdrop_cols = ['Nom_Francais', 'Groupe_aliment', 'Sous-groupe_aliment', 'LCI_Name'] new_ing_df = sparse_ing_df.merge(pivot_ing_minmax, how = 'inner', left_on='Ciqual_AGB', right_on="Ciqual_AGB") dfs['ingredients'] = new_ing_df new_ing_df.head(5) synthese_key = params.synthese.index_key.to_list()#get_param('synthese', 'index_key') ingredients_key = params.ingredients.index_key.to_list() #get_param('ingredients', 'index_key') etapes_key = params.etapes.index_key.to_list()#get_param('etapes', 'index_key') new_ingred_df = preprocessing.merge_dataset(dfs['synthese'], dfs['ingredients'], synthese_key,ingredients_key) new_etape_df = preprocessing.merge_dataset(dfs['synthese'], dfs['etapes'], synthese_key, etapes_key) new_ingred_df.head(5)Clean nan values and save the cleaned files in OUPUT_FILE_PATHnew_ingred_df.fillna(0, inplace=True) new_etape_df.fillna(0, inplace=True) OUTPUT_FILE_PATH = './../data/processed' new_ingred_df.to_csv(os.path.join(OUTPUT_FILE_PATH, params.ingredients.file_name), index=False) new_etape_df.to_csv(os.path.join(OUTPUT_FILE_PATH, params.etapes.file_name), index=False) new_ingred_df_*Using Qiskit Aqua for set packing problems*_Given a collection $S$ of subsets of a set $X$, the set packing problem tries to find the subsets that are pairwise disjoint (in other words, no two of them share an element). The goal is to maximize the number of such subsets.We will go through two examples to show:1. How to run the optimization2. How how to run the optimization with the VQE. The problem and the brute-force method.The problem is as follows. First, let us print the list of subsets.import numpy as np import json from qiskit import BasicAer from qiskit.optimization.ising import set_packing from qiskit.aqua.algorithms import ExactEigensolver from qiskit.optimization.ising.common import sample_most_likely input_file = 'sample.setpacking' with open(input_file) as f: list_of_subsets = json.load(f) print(list_of_subsets)[[4, 5], [4], [5]]The brute-force method is as follows. Basically, we exhaustively try all the binary assignments. In each binary assignment, the entry of a subset is either 0 (meaning the subset is not taken) or 1 (meaning the subset is taken). We print the binary assignment that satisfies the definition of the set packing.def brute_force(): # brute-force way: try every possible assignment! def bitfield(n, L): result = np.binary_repr(n, L) return [int(digit) for digit in result] # [2:] to chop off the "0b" part L = len(list_of_subsets) max = 2**L max_v = -np.inf for i in range(max): cur = bitfield(i, L) cur_v = set_packing.check_disjoint(cur, list_of_subsets) if cur_v: if np.count_nonzero(cur) > max_v: max_v = np.count_nonzero(cur) return max_v size = brute_force() print("Size of set packing", size) qubit_op, offset = set_packing.get_operator(list_of_subsets)Part I: Run the optimization Here we directly construct the algorithm and then run() it to get the result.algo = ExactEigensolver(qubit_op) result = algo.run() x = sample_most_likely(result['eigvecs'][0]) ising_sol = set_packing.get_solution(x) np.testing.assert_array_equal(ising_sol, [0, 1, 1]) oracle = brute_force() print("Size of set packing", np.count_nonzero(ising_sol))Size of set packing 2Part II: Run the optimization with the VQE We can create the objects directly ourselves too and run VQE for the resultfrom qiskit.aqua import aqua_globals from qiskit.aqua.algorithms import VQE from qiskit.aqua.components.optimizers import COBYLA from qiskit.aqua.components.variational_forms import RY aqua_globals.random_seed = 100 optimizer = COBYLA() var_form = RY(qubit_op.num_qubits, depth=5, entanglement='linear') vqe = VQE(qubit_op, var_form, optimizer) backend = BasicAer.get_backend('statevector_simulator') result = vqe.run(backend) x = sample_most_likely(result['eigvecs'][0]) ising_sol = set_packing.get_solution(x) print("Size of set packing", np.count_nonzero(ising_sol))Size of set packing 2Import libraries, define const values, and set URLs pathSet the root address (REST_API_ADDRESS) based on your docker exposed IDimport json import folium from geopandas import GeoDataFrame from pysal.viz.mapclassify import Natural_Breaks import requests id_field = 'id' value_field = 'score' num_bins = 4 fill_color = 'YlOrRd' fill_opacity = 0.9 # REST URL REST_API_ADDRESS = 'http://localhost:4646/' Alive_URL = REST_API_ADDRESS + 'alive' BRS_URL = REST_API_ADDRESS + 'BRS' RemoveTables_URL=REST_API_ADDRESS + 'removeTables' Flush_URL = REST_API_ADDRESS + 'flushBuffer' ChangeProteus_URL = REST_API_ADDRESS + 'changeProteus' ChangeAlgo_URL = REST_API_ADDRESS + 'changeAlgo' ChangeMemorySize_URL = REST_API_ADDRESS + 'changeMemorySize'Check BRS is aliveCheck the status of BRSresponse = requests.get(Alive_URL) print(response.text)I am alive algorithm 9 memorySize 10, and Spark cluster is free.Set Proteus credentialSet the proteus credentials. BRS needs this information to fetch tables.ProteusURL="" ProteusUsername="" ProteusPassword="" data={'url' : ProteusURL, 'username' : ProteusUsername, 'pass':ProteusPassword} response = requests.get(ChangeProteus_URL,params=data) print(response.text)Change algorithmChange algorithm to unif, single, multi, and hybrid. The fastest is the hybrid, and unif is an uniform grid. Default is hybrid.algo="hybrid" data={'algo':algo} response = requests.get(ChangeAlgo_URL,params=data) print(response.text)Algorithm set to hybridRemove previous resultsBRS buffers previous results to avoid repeating the same query. To remove buffered resultsresponse = requests.get(Flush_URL) print(response.text)buffer.tmp is flushed.Change memory sizeIndicate the size of RAM for the spark instance. Default is 10G.memorySize=11 data={'memorySize':memorySize} response = requests.get(ChangeMemorySize_URL,params=data) print(response.text)Identify industrial districtsAn example of a BRS query. The table must include columns lat and lon which are coordinates. The f indicate the column name (revenue, numberOfEmployees, etc) for the scoring function. Keywords are used to apply filter on the records. You can define two columns for filtering at the same time. Separate keywords with comma. For examle, if you need to filter companies with ATECO code of 10.10 or 10.11, set keywordsColumn to "ATECO" and keywords to "10.10,10.11". Moreover, at the same query, if you want to filter companies of a specific province (e,g,. pisa), set keywordsColumn2 to "province" and set keywords2 to "pisa".This query detects top 5 regions sized 10km*10km that contain the most number (f is null) of startup companies( filter column flags with startup-registroimprese). The second keywordColumn2 is set empty.CAUTION: You can run this query with a pre-fetched table (BRSflags which is been injected into the docker image) in order to check the REST API and results.table = "BRSflags" # This table already exists in the docker image topk = 10 # eps = .1 # We measure distance in radians, where 1 radian is around 100km, and epsilon is the length of each side of the region f = "null" # Set f to null if the scoring fucntion is number of elements dist = True keywordsColumn = "flags" keywords = "startup-registroimprese" keywordsColumn2 = "" keywords2 = "" data = {'topk' : topk, 'eps' : eps, 'f' : f, 'input' : table, "keywordsColumn" : keywordsColumn, "keywords" : keywords,"keywordsColumn2":keywordsColumn2,"keywords2":keywords2,"dist":dist} response = requests.get(BRS_URL, params=data) print(response.text)[ { "rank":1, "center":[9.185410000000001,45.484415000000006], "score":1699.0 } ,{ "rank":2, "center":[12.488605000000002,41.900499999999994], "score":660.0 } ,{ "rank":3, "center":[7.661465,45.066535], "score":306.0 } ,{ "rank":4, "center":[14.238444999999999,40.86993], "score":247.0 } ,{ "rank":5, "center":[11.376294999999999,44.49163500000001], "score":224.0 } ,{ "rank":6, "center":[11.90895,45.402884795000006], "score":167.0 } ,{ "rank":7, "center":[13.343405,38.139575], "score":136.0 } ,{ "rank":8, "center":[8.920905,44.44473], "score":135.0 } ,{ "rank":9, "center":[9.64011,45.689725], "score":118.0 } ,{ "rank":10, "center":[11.235539535000001,43.788819805], "score":107.0 } ]Initialize the map and visualize the output regionsThis code helps you to visualize the output of previous cellres = json.loads(response.text) results_geojson={"type":"FeatureCollection","features":[]} for region in res: results_geojson['features'].append({"type": "Feature", "geometry": { "type": "Point", "coordinates": region['center']}, "properties": { "id": region['rank'], "score": region['score'] }}) m = folium.Map( location=[45.474989560000004,9.205786594999998], tiles='Stamen Toner', zoom_start=11 ) gdf = GeoDataFrame.from_features(results_geojson['features']) gdf.crs = {'init': 'epsg:4326'} gdf['geometry'] = gdf.buffer(data['eps']).envelope threshold_scale = Natural_Breaks(gdf[value_field], k=num_bins).bins.tolist() threshold_scale.insert(0, gdf[value_field].min()) choropleth = folium.Choropleth(gdf, data=gdf, columns=[id_field, value_field], key_on='feature.properties.{}'.format(id_field), fill_color=fill_color, fill_opacity=fill_opacity, threshold_scale=threshold_scale).add_to(m) fields = list(gdf.columns.values) fields.remove('geometry') tooltip = folium.features.GeoJsonTooltip(fields=fields) choropleth.geojson.add_child(tooltip) m/usr/local/lib/python3.6/dist-packages/pyproj/crs/crs.py:53: FutureWarning: '+init=:' syntax is deprecated. ':' is the preferred initialization method. When making the change, be mindful of axis order changes: https://pyproj4.github.io/pyproj/stable/gotchas.html#axis-order-changes-in-proj-6 return _prepare_from_string(" ".join(pjargs))--------------------------------------------------------------------------------------------------------- Identify industrial districtsFind the top 10 regions sized 50km*50km that contains the highest number of employees( f is numberOfEmployees) working in production of pasta.topk=20 eps=0.5 keywordsColumn="ATECO" keywords="10.73" f="numberOfEmployees" dist = True table = "BRS" keywordsColumn2 = "" keywords2 = "" data = {'topk' : topk, 'eps' : eps, 'f' : f, 'input' : table, "keywordsColumn" : keywordsColumn, "keywords" : keywords,"keywordsColumn2":keywordsColumn2,"keywords2":keywords2,"dist":dist} response = requests.get(BRS_URL, params=data) print(response.text)[ { "rank":1, "center":[14.153735145,42.292225], "score":1426.0 } ,{ "rank":2, "center":[14.43571968,40.867641045], "score":1235.0 } ,{ "rank":3, "center":[9.16166053,45.61729499999999], "score":1034.0 } ,{ "rank":4, "center":[12.064605,45.59483], "score":918.0 } ,{ "rank":5, "center":[12.589753030000002,41.89389267], "score":863.0 } ,{ "rank":6, "center":[7.827308894999999,44.888835], "score":721.0 } ,{ "rank":7, "center":[8.99588,44.54887], "score":631.0 } ,{ "rank":8, "center":[14.685430000000002,41.370707675000006], "score":619.0 } ,{ "rank":9, "center":[11.367768439999999,44.646479525], "score":593.0 } ,{ "rank":10, "center":[13.653664999999998,43.07360500000001], "score":553.0 } ,{ "rank":11, "center":[10.767778665000002,45.373664999999995], "score":536.0 } ,{ "rank":12, "center":[12.770374260000002,43.84423], "score":449.0 } ,{ "rank":13, "center":[10.564810000000001,44.629635], "score":425.0 } ,{ "rank":14, "center":[10.19115184,43.965001865], "score":378.0 } ,{ "rank":15, "cen[...]Initialize the map and visualize the output regionsres = json.loads(response.text) results_geojson={"type":"FeatureCollection","features":[]} for region in res: results_geojson['features'].append({"type": "Feature", "geometry": { "type": "Point", "coordinates": region['center']}, "properties": { "id": region['rank'], "score": region['score'] }}) m = folium.Map( location=[44.629635,10.563514999999999], tiles='Stamen Toner', zoom_start=11 ) gdf = GeoDataFrame.from_features(results_geojson['features']) gdf.crs = {'init': 'epsg:4326'} gdf['geometry'] = gdf.buffer(data['eps']/2).envelope threshold_scale = Natural_Breaks(gdf[value_field], k=num_bins).bins.tolist() threshold_scale.insert(0, gdf[value_field].min()) choropleth = folium.Choropleth(gdf, data=gdf, columns=[id_field, value_field], key_on='feature.properties.{}'.format(id_field), fill_color=fill_color, fill_opacity=fill_opacity, threshold_scale=4).add_to(m) fields = list(gdf.columns.values) fields.remove('geometry') tooltip = folium.features.GeoJsonTooltip(fields=fields) choropleth.geojson.add_child(tooltip) m/usr/local/lib/python3.6/dist-packages/pyproj/crs/crs.py:53: FutureWarning: '+init=:' syntax is deprecated. ':' is the preferred initialization method. When making the change, be mindful of axis order changes: https://pyproj4.github.io/pyproj/stable/gotchas.html#axis-order-changes-in-proj-6 return _prepare_from_string(" ".join(pjargs)) /home/hamid/.local/lib/python3.6/site-packages/pysal/viz/mapclassify/classifiers.py:1428: UserWarning: Warning: Not enough unique values in array to form k classes Warn(ms, UserWarning) /home/hamid/.local/lib/python3.6/site-packages/pysal/viz/mapclassify/classifiers.py:1429: UserWarning: Warning: setting k to 2 Warn("Warning: setting k to %d" % uvk, UserWarning)--------------------------------------------------------------------------------------------------------- Identify areas with a high concentration of restaurants or hotelsThis is example of applying filter on two columns at the same time where it identifies top 10 hotspots in Pisa province (look at the keywordsColumn) for number of (f in null) restaurants, ice-cream parlour, pastry shop, etc (look at the keywordsColumn2).table = "BRS" topk = 10 eps = 0.01 f = "null" dist = True keywordsColumn = "null" keywords = "null" keywordsColumn2 = "" keywords2 = "" data = {'topk' : topk, 'eps' : eps, 'f' : f, 'input' : table, "keywordsColumn" : keywordsColumn, "keywords" : keywords,"keywordsColumn2":keywordsColumn2,"keywords2":keywords2,"dist":dist} response = requests.get(BRS_URL, params=data) print(response.text[:-22])[ { "rank":1, "center":[10.40094993,43.71824257499999], "score":24.0 } ,{ "rank":2, "center":[10.294949595,43.62871611], "score":13.0 } ,{ "rank":3, "center":[10.3908863,43.72260633999999], "score":9.0 } ,{ "rank":4, "center":[10.378360520000001,43.72100330999999], "score":7.0 } ,{ "rank":5, "center":[10.547071110000001,43.67660426000001], "score":6.0 } ,{ "rank":6, "center":[10.638674175,43.66692147499999], "score":6.0 } ,{ "rank":7, "center":[10.861097015000002,43.402155565], "score":6.0 } ,{ "rank":8, "center":[10.618839295,43.667777785], "score":6.0 } ,{ "rank":9, "center":[10.40261665,43.706955435000005], "score":6.0 } ,{ "rank":10, "center":[10.78296901,43.708708949999995], "score":6.0 } ]Initialize the map and visualize the output regionsres = json.loads(response.text[:-22]) results_geojson={"type":"FeatureCollection","features":[]} for region in res: results_geojson['features'].append({"type": "Feature", "geometry": { "type": "Point", "coordinates": region['center']}, "properties": { "id": region['rank'], "score": region['score'] }}) m = folium.Map( location=[43.71682982000001,10.401120675000001], tiles='Stamen Toner', zoom_start=11 ) gdf = GeoDataFrame.from_features(results_geojson['features']) gdf.crs = {'init': 'epsg:4326'} gdf['geometry'] = gdf.buffer(data['eps']/2).envelope threshold_scale = Natural_Breaks(gdf[value_field], k=num_bins).bins.tolist() threshold_scale.insert(0, gdf[value_field].min()) choropleth = folium.Choropleth(gdf, data=gdf, columns=[id_field, value_field], key_on='feature.properties.{}'.format(id_field), fill_color=fill_color, fill_opacity=fill_opacity, threshold_scale=threshold_scale).add_to(m) fields = list(gdf.columns.values) fields.remove('geometry') tooltip = folium.features.GeoJsonTooltip(fields=fields) choropleth.geojson.add_child(tooltip) m/usr/local/lib/python3.6/dist-packages/pyproj/crs/crs.py:53: FutureWarning: '+init=:' syntax is deprecated. ':' is the preferred initialization method. When making the change, be mindful of axis order changes: https://pyproj4.github.io/pyproj/stable/gotchas.html#axis-order-changes-in-proj-6 return _prepare_from_string(" ".join(pjargs))Remove TablesTo remove the intermediate tables which are downloaded form proteusresponse = requests.get(RemoveTables_URL) print(response.text)1. Regressão Linear 1.1. UnivariadaExistem diversos problemas na natureza para os quais procura-se obter valores de saída dado um conjunto de dados de entrada. Suponha o problema de predizer os valores de imóveis de uma determinada cidade, conforme apresentado na Figura 1, em que podemos observer vários pontos que representam diferentes imóveis, cada qual com seu preço de acordo com o seu tamanho. Em problemas de **regressão**, objetiva-se estimar valores de saída de acordo com um conjunto de valores de entrada. Desta forma, considerando o problema anterior, a ideia consiste em estimar o preço de uma casa de acordo com o seu tamanho, isto é, gostaríamos de encontrar uma **linha reta** que melhor se adequa ao conjunto de pontos na Figura 1.from matplotlib import pyplot import numpyimport numpy as np np.random.seed(42) import cv2 as cv2 print("cv2 version:",cv2.__version__) import time %matplotlib inline import matplotlib.pyplot as plt def fixColor(image): return(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) labels_file = "/cxldata/projects/yolov4/coco.names" LABELS = open(labels_file).read().strip().split("\n") COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8") LABELS[:5] weights="/cxldata/projects/yolov4/yolov4.weights" config="/cxldata/projects/yolov4/yolov4.cfg" net = cv2.dnn.readNetFromDarknet(config, weights) ln = net.getLayerNames() print (len(ln), ln ) ln = net.getLayerNames() print (len(ln), ln ) net.getUnconnectedOutLayers() ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] print (ln) img = cv2.imread("/cxldata/projects/yolov4/soccer.jpg") img = cv2.resize(img, (608, 608)) print(img.shape) (H,W) = img.shape[:2] plt.imshow(fixColor(img)) blob = cv2.dnn.blobFromImage(img, 1 /255.0, (608,608), swapRB=True, crop=False) print("shape of blob", blob.shape) split_blob = np.hstack([blob[0,0, :, :],blob[0,1,:,:], blob[0,2,:,:],]) plt.imshow(fixColor(split_blob)) net.setInput(blob) t0 = time.time() layerOutputs = net.forward(ln) t = time.time() print('time=', t-t0) (np.array(layerOutputs)).shape boxes = [] confidences = [] classIDs = [] for output in layerOutputs: print ("Shape of each output", output.shape) # loop over each of the detections for detection in output: # extract the class ID and confidence (i.e., probability) # of the current object detection scores = detection[5:] classID = np.argmax(scores) confidence = scores[classID] # filter out weak predictions by ensuring the detected # probability is greater than the minimum probability if confidence > 0.3: # scale the bounding box coordinates back relative to # the size of the image, keeping in mind that YOLO # actually returns the center (x, y)-coordinates of # the bounding box followed by the boxes' width and # height box = detection[0:4] * np.array([W, H, W, H]) (centerX, centerY, width, height) = box.astype("int") # use the center (x, y)-coordinates to derive the top # and and left corner of the bounding box x = int(centerX - (width / 2)) y = int(centerY - (height / 2)) # update our list of bounding box coordinates, # confidences, and class IDs boxes.append([x, y, int(width), int(height)]) confidences.append(float(confidence)) classIDs.append(classID) print (LABELS[classID], detection[4], confidence) print(len(boxes)) idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3) print (len(idxs)) # ensure at least one detection exists if len(idxs) > 0: # loop over the indexes we are keeping for i in idxs.flatten(): # extract the bounding box coordinates (x, y) = (boxes[i][0], boxes[i][1]) (w, h) = (boxes[i][2], boxes[i][3]) # draw a bounding box rectangle and label on the frame color = [int(c) for c in COLORS[classIDs[i]]] cv2.rectangle(img, (x, y), (x + w, y + h), color, 2) text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i]) cv2.putText(img, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) plt.imshow(fixColor(img))Columnar data analysis Array programming is a programming language paradigm like Object-Oriented Programming (OOP) and Functional Programming (FP).As physicists, we are mostly familiar with imperative, procedural, structured, object-oriented programming (see this list).from IPython.display import IFrame IFrame("http://zoom.it/6rJp", width="100%", height="440")Array programming is common to languages and systems designed for interactive data analysis. Special keyboard for all the symbols.A program was a struggle to write, but T-shirt fodder when it worked.APL (1963) pioneered programming language conciseness—and discovered the mistake of being too concise.| APL | | Numpy ||:---:|:----:|:-----:|| ι4 | | numpy.arange(4) || (3+ι4) | | numpy.arange(4) + 3 || +/(3+ι4) | | (numpy.arange(4) + 3).sum() || m ← +/(3+ι4) | | m = (numpy.arange(4) + 3).sum() |(The other extreme is writing for loops for each of the above.) The fundamental data type in this world is an array. (Some array languages don't even have non-arrays.)Unlike the others (APL, IDL, MATLAB, R), Numpy is a library, not a language, though it goes all the way back to the beginning of Python (1995) and significantly influenced Python's grammar.# Assortment of ways to make Numpy arrays import numpy, uproot print(numpy.arange(20), end="\n\n") print(numpy.linspace(-5, 5, 21), end="\n\n") print(numpy.empty(10000, numpy.float16), end="\n\n") print(numpy.full((2, 7), 999), end="\n\n") print(numpy.random.normal(-1, 0.0001, 10000), end="\n\n") print(uproot.open("data/Zmumu.root")["events"]["E1"].array(), end="\n\n")[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19] [-5. -4.5 -4. -3.5 -3. -2.5 -2. -1.5 -1. -0.5 0. 0.5 1. 1.5 2. 2.5 3. 3.5 4. 4.5 5. ] [3.422e+00 1.714e-02 6.378e+04 ... 1.881e-04 7.889e-03 8.057e-03] [[999 999 999 999 999 999 999] [999 999 999 999 999 999 999]] [-0.9999356 -1.00016087 -0.99994365 ... -1.00011919 -0.9999445 -0.99995971] [82.20186639 62.34492895 62.34492895 ... 81.27013558 81.27013558 81.56621735]a = numpy.array([2**30, 2**30 + 2**26, -1, 0, 2**30 + 2**24, 2**30 + 2**20], numpy.int32) # a = a.view(numpy.float32) # a = a.reshape((2, 3)) # a = a.astype(numpy.int64) print("data:\n", a, end="\n\n") print("type:", type(a), end="\n\n") print("dtype (type of the data it contains):", a.dtype, end="\n\n") print("shape: (size of each dimension):", a.shape, end="\n\n") # Any mathematical function that would map scalar arguments to a scalar result # maps array arguments to an array result. a_array = numpy.random.uniform(5, 10, 10000); a_scalar = a_array[0] b_array = numpy.random.uniform(10, 20, 10000); b_scalar = b_array[0] c_array = numpy.random.uniform(-0.1, 0.1, 10000); c_scalar = c_array[0] def quadratic_formula(a, b, c): return (-b + numpy.sqrt(b**2 - 4*a*c)) / (2*a) print("scalar:\n", quadratic_formula(a_scalar, b_scalar, c_scalar), end="\n\n") print("array:\n", quadratic_formula(a_array, b_array, c_array), end="\n\n") # Each step in the calculation is performed over whole arrays before moving on to the next. a, b, c = a_array, b_array, c_array roots1 = (-b + numpy.sqrt(b**2 - 4*a*c)) / (2*a) tmp1 = numpy.negative(b) # -b tmp2 = numpy.square(b) # b**2 tmp3 = numpy.multiply(4, a) # 4*a tmp4 = numpy.multiply(tmp3, c) # tmp3*c tmp5 = numpy.subtract(tmp2, tmp4) # tmp2 - tmp4 tmp6 = numpy.sqrt(tmp5) # sqrt(tmp5) tmp7 = numpy.add(tmp1, tmp6) # tmp1 + tmp6 tmp8 = numpy.multiply(2, a) # 2*a roots2 = numpy.divide(tmp7, tmp8) # tmp7 / tmp8 roots1, roots2 # Even comparison operators are element-by-element. roots1 == roots2 # So use a reducer (e.g. sum, max, min, any, all) to turn the array into a scalar. (roots1 == roots2).all()Just as a Numpy array is a common data type, this is a common function type: "universal functions" or "ufuncs."px, py, pz = uproot.open("data/Zmumu.root")["events"].arrays("p[xyz]1", outputtype=tuple) p = numpy.sqrt(px**2 + py**2 + pz**2) p # But what if there are multiple values per event? uproot.open("data/HZZ.root")["events"].array("Muon_Px") # JaggedArray can be used in place of a Numpy array in some contexts, # such as array-at-a-time math. Functions like numpy.sqrt recognize it. px, py, pz = uproot.open("data/HZZ.root")["events"].arrays(["Muon_P[xyz]"], outputtype=tuple) numpy.sqrt(px**2 + py**2 + pz**2)E, px, py, pz = uproot.open("data/Zmumu.root")["events"].arrays(["E1", "p[xyz]1"], outputtype=tuple) # Numpy arrays # array array array scalar energy = numpy.sqrt(px**2 + py**2 + pz**2 + 0.1056583745**2) energy, E E, px, py, pz = uproot.open("data/HZZ.root")["events"].arrays(["Muon_E", "Muon_P[xyz]"], outputtype=tuple) # JaggedArrays # array array array scalar energy = numpy.sqrt(px**2 + py**2 + pz**2 + 0.1056583745**2) energy, E import awkward # the library that defines JaggedArrays and other "awkward" arrays scalar = 1000 flat = numpy.array([100, 200, 300]) jagged = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5]]) # With JaggedArrays, there are more broadcasting cases: print(f"scalar + flat: {scalar + flat}") print(f"\nscalar + jagged: {scalar + jagged}") print(f"\n flat + jagged: {flat + jagged}") # Using jagged broadcasting in physics jetx, jety, metx, mety = uproot.open("data/HZZ.root")["events"].arrays( ["Jet_P[xy]", "MET_p[xy]"], outputtype=tuple) jet_phi = numpy.arctan2(jety, jetx) met_phi = numpy.arctan2(mety, metx) print(f"multi per event: {jet_phi}") print(f"one per event: {met_phi}") print(f"\ndifference: {jet_phi - met_phi}") # Q: What about ensuring that each delta-phi is between -pi and pi without if/then? # A: You start to pick up tricks, like this: raw_diff = jet_phi - met_phi bounded_diff = (raw_diff + numpy.pi) % (2*numpy.pi) - numpy.pi # Should dphi be a library function? That's the kind of question we think about... raw_diff, bounded_diff # bounded_diff.flatten().min(), bounded_diff.flatten().max()Reducers: any, all, count, count_nonzero, sum, prod, min, max# Another way JaggedArrays extend Numpy arrays: # Reducers, like sum, min, max, turn flat arrays into scalars. met_phi.min(), met_phi.max() # Another way JaggedArrays extend Numpy arrays: # Reducers, like sum, min, max, turn jagged arrays into flat arrays. jet_phi.min(), jet_phi.max() # The meaning of flat.sum() is "sum of all elements of the flat array." # The meaning of jagged.sum() is "sum of all elements in each inner array." jagged = awkward.fromiter([[1.0, 2.0, 3.0], [], [4.0, 5.0]]) jagged.sum() # min, max # jagged.sum().sum() completes the process, resulting in a scalar. But, # jagged.flatten().sum() does the same thing. Why? jagged.sum().sum(), jagged.flatten().sum() # mean, var, std are also available, just like Numpy, but these aren't associative. # "Don't do a mean of means unless you mean it!" jet_phi.mean() # Also worth noting that any and all are reducers... of booleans. same_semicircle = (abs(bounded_diff) < numpy.pi/2) print(f"same_semicircle: {same_semicircle}") print(f"same_semicircle.any(): {same_semicircle.any()}") print(f"same_semicircle.any().any(): {same_semicircle.any().any()}") print(f"same_semicircle.any().all(): {same_semicircle.any().all()}") print(f"same_semicircle.all(): {same_semicircle.all()}") print(f"same_semicircle.all().any(): {same_semicircle.all().any()}") print(f"same_semicircle.all().all(): {same_semicircle.all().all()}")same_semicircle: [[] [False] [] ... [False] [False True] []] same_semicircle.any(): [False False False ... False True False] same_semicircle.any().any(): True same_semicircle.any().all(): False same_semicircle.all(): [ True False True ... False False True] same_semicircle.all().any(): True same_semicircle.all().all(): FalseSlicing: single-item extraction, filtering (cuts), rearrangement# Basic array slicing is the same as Python list slicing a = numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) for expr in ["a[3] ", "a[3:] ", "a[:3] ", "a[3:7] ", "a[3:7:2] ", "a[::2] "]: print(expr, "=", eval(expr)) print() for expr in ["a[-3] ", "a[-3:] ", "a[:-3] ", "a[-7:-3] ", "a[-7:-3:2]", "a[::-1] "]: print(expr, "=", eval(expr)) # But multidimensional arrays can be sliced with an extension of list slicing. a = numpy.array([[ 0, 1, 2, 3, 4, 5], [10, 11, 12, 13, 14, 15], [20, 21, 22, 23, 24, 25], [30, 31, 32, 33, 34, 35]]) for expr in "a[2:, 1:]", "a[:, 1:-1]", "a[::2, ::2]", "a[:, 3]": print(expr, " =\n", eval(expr), sep="", end="\n\n")a[2:, 1:] = [[21 22 23 24 25] [31 32 33 34 35]] a[:, 1:-1] = [[ 1 2 3 4] [11 12 13 14] [21 22 23 24] [31 32 33 34]] a[::2, ::2] = [[ 0 2 4] [20 22 24]] a[:, 3] = [ 3 13 23 33]# Masking: using an array of booleans as a slice a = numpy.array([ 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) mask = numpy.array([False, False, False, False, True, False, True, False, True]) # 5.5 7.7 9.9 for expr in "a[mask]", "a < 5", "a[a < 5]": print(expr, " =\n", eval(expr), sep="", end="\n\n") # Five-minute exercise: plot masses with (1) opposite charges and # (2) both muon abs(eta) < 1 arrays = uproot.open("data/Zmumu.root")["events"].arrays(namedecode="utf-8") print(arrays.keys()) for n in arrays: exec(f"{n} = arrays['{n}']") import matplotlib.pyplot matplotlib.pyplot.hist(M, bins=100); # What if the boolean mask is jagged? E, px, py, pz, q = uproot.open("data/HZZ.root")["events"].arrays( ["Muon_E", "Muon_P[xyz]", "Muon_Charge"], outputtype=tuple) print(f"q: {q}") print(f"\nq > 0: {q > 0}") print(f"\nE: {E}") print(f"\nE[q > 0]: {E[q > 0]}") # JaggedArray slicing does what Numpy does in the cases that overlap... x = awkward.fromiter([[1.1, 2.2, 3.3, 4.4], [5.5, 6.6], [7.7, 8.8, 9.9]]) print(f"x = {x}") # take the first two inner arrays print(f"\nx[:2] = {x[:2]}") # take the first two of each inner arrays print(f"\nx[:, :2] = {x[:, :2]}") # mask outer lists print(f"\nx[[True, False, True]] = {x[[True, False, True]]}") # ... and naturally extend it in the new cases. x = awkward.fromiter([[ 1.1, 2.2, 3.3], [ 4.4, 5.5], [ 6.6, 7.7, 8.8]]) mask = awkward.fromiter([ True, False, True ]) jmask = awkward.fromiter([[True, False, True], [False, False], [True, True, True]]) print(f"x[mask] = {x[mask]}") # mask outer array print(f"\nx[jmask] = {x[jmask]}") # mask inner arrays # In Numpy, arrays of integers can also be used as indexes. a = numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) print("selects elements, possibly out of order") index = numpy.array([3, 5, 0, 9]) print("a[[3, 5, 0, 9]] =", a[index]) print("\nmay use negative indexing, just like single integers and slices") index = numpy.array([3, 5, 0, -1, -2, -3]) print("a[[3, 5, 0, -1, -2, -3]] =", a[index]) print("\nmay include repetitions(!)") index = numpy.array([3, 5, 0, 9, 9, 9, 3, 5, 0]) print("a[[3, 5, 0, 9, 9, 9, 3, 5, 0]] =", a[index]) # What is integer indexing good for? permutation = eta1.argsort() # also try abs(eta1).argsort() print(f"permutation:\n{permutation}") print(f"\n\nsorted eta1:\n{eta1[permutation]}") print(f"\n\nE1 sorted by eta1:\n{E1[permutation]}") # Integer indexes with JaggedArrays: x = awkward.fromiter([[ 1.1, 2.2, 3.3, 4.4], [5.5, 6.6], [7.7, 8.8, 9.9]]) index = awkward.fromiter([-1, 0, 0]) jindex = awkward.fromiter([[0, 0, -1], [0, 0, -1], [0, 0, -1]]) print(f"x[index] = {x[index]}") # rearrange outer array print(f"\nx[jindex] = {x[jindex]}") # rearrange inner arrays # Use case for jagged indexing: argmin and argmax E, px, py, pz, q = uproot.open("data/HZZ.root")["events"].arrays( ["Muon_E", "Muon_P[xyz]", "Muon_Charge"], outputtype=tuple) eta = numpy.arctanh(pz / numpy.sqrt(px**2 + py**2 + pz**2)) print(f"eta: {eta}") maxabseta = abs(eta).argmax() print(f"\nmaxabseta: {maxabseta}") print(f"\neta[maxabseta]: {eta[maxabseta]}") # eta with max |eta| per event print(f"\nE[maxabseta]: {E[maxabseta]}") # energy with max |eta| per event # Array indexing is useful in surprising ways because it's a basic mathematical # operation: thinking of f[x] as a function, array indexing is function composition. # Take any two non-negative functions of integers... def f(x): return x**2 - 5*x + 10 def g(y): return max(0, 2*y - 10) + 3 # ... and sample them as arrays F = numpy.array([f(i) for i in numpy.arange(10)]) # F is f at 10 elements G = numpy.array([g(i) for i in numpy.arange(100)]) # G is g at enough elements to include max(f) GoF = numpy.array([g(f(i)) for i in numpy.arange(10)]) # GoF is g∘f at 10 elements print("G\u2218F =", G[F]) # integer indexing print("g\u2218f =", GoF) # array of the composed functions # Consider the following application: text = """Four score and seven years ago our fathers brought forth on this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a great civil war, testing whether that nation, or any nation so conceived and so dedicated, can long endure. We are met on a great battle-field of that war. We have come to dedicate a portion of that field, as a final resting place for those who here gave their lives that that nation might live. It is altogether fitting and proper that we should do this. But, in a larger sense, we can not dedicate—we can not consecrate—we can not hallow—this ground. The brave men, living and dead, who struggled here, have consecrated it, far above our poor power to add or detract. The world will little note, nor long remember what we say here, but it can never forget what they did here. It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so nobly advanced. It is rather for us to be here dedicated to the great task remaining before us—that from these honored dead we take increased devotion to that cause for which they gave the last full measure of devotion—that we here highly resolve that these dead shall not have died in vain—that this nation, under God, shall have a new birth of freedom—and that government of the people, by the people, for the people, shall not perish from the earth.""" words = text.replace(".", " ").replace(",", " ").replace("-", " ").replace("\u2014", " ").split() # Dictionary encoding: for compression or textual analysis words = numpy.array(words) dictionary, index = numpy.unique(words, return_inverse=True) print("len(words) =", len(words), "\nwords[:25] =\n" + str(words[:25])) print("\nlen(dictionary) =", len(dictionary), "\ndictionary[:25] =\n" + str(dictionary[:25])) print("\nlen(index) =", len(index), "\nindex[:25] =\n" + str(index[:25])) # Recovering the original text is function composition: # # index : positions in corpus → integer codes # dictionary : integer codes → words dictionary[index] # Five minute exercise: dense array → sparse array → dense array. dense1 = 1.1 * numpy.array( [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 1, 0, 0, 0, 0, 4, 1, 0, 3, 0, 1, 2, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) sparse_indexes = numpy.nonzero(dense1)[0] sparse_values = dense1[sparse_indexes] print("sparse indexes:", sparse_indexes, "\nsparse values: ", sparse_values) dense2 = numpy.zeros(len(dense1)) dense2[sparse_indexes] = sparse_values # <---- solution! print("recovered dense:", dense2, sep="\n")sparse indexes: [37 38 40 45 46 48 50 51 53 58] sparse values: [1.1 2.2 1.1 4.4 1.1 3.3 1.1 2.2 1.1 1.1] recovered dense: [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.1 2.2 0. 1.1 0. 0. 0. 0. 4.4 1.1 0. 3.3 0. 1.1 2.2 0. 1.1 0. 0. 0. 0. 1.1 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]Summary of slicing * **if X is an integer:** selects individual elements; * **if X is a slice:** selects a contiguous or regularly strided subrange (strides can be backward); * **if X is a tuple** (any commas between square brackets): applies selections to multiple dimensions; * **if X is a boolean array:** filters arbitrarily chosen elements (preserving order); * **if X is an integer array:** applies a function of integers, arbitrarily chosen, in any order, and may have duplicates.See [Numpy's advanced indexing documentation](https://docs.scipy.org/doc/numpy/reference/arrays.indexing.htmladvanced-indexing) for more (e.g. slicing by a tuple of arrays). Awkward arrays: extensions to Numpy for particle physics We've seen some examples of jagged arrays and how they extend Numpy.JaggedArray is one of the classes in awkward-array to provide the kinds of data structures needed by particle physics in array form.# PyROOT's jagged array is an RVec object for every event (viewed through PyROOT). import ROOT rdf = ROOT.RDataFrame("events", "data/HZZ.root") rdf.AsNumpy(columns=["Muon_E"]) # root_numpy's jagged array is a Numpy array for every event. import root_numpy root_numpy.root2array("data/HZZ.root", "events", branches=["Muon_E"]) # Awkward/uproot's jagged array consists of three arrays: starts/stops and content. # # The number of Python objects does not scale with the number of events. import uproot array = uproot.open("data/HZZ.root")["events"].array("Muon_E") print(array.layout) array.starts, array.stops, array.contentlayout [ ()] JaggedArray(starts=layout[0], stops=layout[1], content=layout[2]) [ 0] ndarray(shape=2421, dtype=dtype('int64')) [ 1] ndarray(shape=2421, dtype=dtype('int64')) [ 2] ndarray(shape=3825, dtype=dtype('float32'))# Recent projects outside of particle physics (Arrow, Parquet, Zarr, XND, and # TensorFlow) also have jagged arrays represented as contiguous flat arrays. # # Using a similar format lets us easily (and quickly!) convert between them. awkward.toparquet("tmp.parquet", array) awkward.toarrow(array)In particular,# Pandas is a data analysis environment built around in-memory tables. # # "Numpy with an index" ... "Programmatic Excel" ... "SQL with an ordering" uproot.open("data/Zmumu.root")["events"].pandas.df() # Pandas deals with jaggedness by putting structure in an index, not the values. df = uproot.open("data/HZZ.root")["events"].pandas.df(["Muon_E", "Muon_P[xyz]"]) df # This seems a little odd (to me), but you can definitely work with it. df.unstack() # This has some interesting features: nested objects become multi-level columns... array = awkward.fromiter([{"x": i, "y": {"y1": i, "y2": i}, "z": {"z1": {"z2": i}}} for i in range(10)]) print(array[:2].tolist()) awkward.topandas(array, flatten=True) # ... and nested lists become multi-level rows. f = lambda i: [{"x": i, "y": i}] * i array = awkward.fromiter([[f(1), f(2)], [f(3)], [f(4), f(5), f(6)]]) print(array[:2].tolist()) awkward.topandas(array, flatten=True) # One-per-event data must be duplicated for each particle, and are inaccessible # when there are no particles. # (Switch between flatten=False and flatten=True.) uproot.open("data/HZZ.root")["events"].pandas.df(["MET_*", "Jet_P[xyz]"], flatten=False) # One-per-event data must be duplicated for each particle, and are inaccessible # when there are no particles. # (Switch between flatten=False and flatten=True.) uproot.open("data/HZZ.root")["events"].pandas.df(["MET_*", "Jet_P[xyz]"], flatten=True) # And there isn't a way to deal with different jaggedness in the same table. # (Switch between flatten=False and flatten=True.) uproot.open("data/HZZ.root")["events"].pandas.df(["Muon_P[xyz]", "Jet_P[xyz]"], flatten=False) # And there isn't a way to deal with different jaggedness in the same table. # (Switch between flatten=False and flatten=True.) uproot.open("data/HZZ.root")["events"].pandas.df(["Muon_P[xyz]", "Jet_P[xyz]"], flatten=True)# Awkward-array is designed to handle arbitrary data structures in a way that # fits both ROOT and Arrow/Parquet. array = awkward.Table(uproot.open("data/HZZ-objects.root")["events"].arrays( ["MET", "muonp4", "muonq", "jetp4"], namedecode="utf-8")) array[:10].tolist() # ROOT has objects like TLorentzVector, but they translate to generic Tables # in Arrow/Parquet. awkward.toarrow(array) # awkward.fromarrow(awkward.toarrow(array))[:10].tolist() # You can iterate over these objects in for loops, like PyROOT... for i, event in enumerate(array): print("new event", event.MET) for muon in event.muonp4: print(" muon", muon) for jet in event.jetp4: print(" jet ", jet) if i > 10: break # ... but if you need to scale up, use array-at-a-time operations. mu1 = array.muonp4[array.muonp4.counts >= 2, 0] mu2 = array.muonp4[array.muonp4.counts >= 2, 1] (mu1 + mu2).mass # The "combinatorics" we need for particle physics requires a few new operations. # Take any two muons from events that have them, not necessarily the first two. pairs = array.muonp4.choose(2) pairs # Get the first and second element of each pair. first, second = pairs.unzip() first, second # Compute the mass and plot. # # ("flatten" because Matplotlib needs a flat array, not a jagged array.) matplotlib.pyplot.hist((first + second).mass.flatten(), bins=100, range=(0, 150)); # Five-minute exercise: plot masses with (1) opposite charges and # (2) both muon abs(eta) < 1 # This time, it's jagged. array.muonq, array.muonp4.eta # first, second = array.muonp4.choose(2).unzip() # matplotlib.pyplot.hist((first + second).mass.flatten(), bins=100, range=(0, 150)); # Advanced combinatorics: muons that are close to jets # Step 1: jet-muon pairs with a doubly-jagged structure # so we have one of these for every jet jets, muons = array.jetp4.cross(array.muonp4, nested=True).unzip() jets, muons # Advanced combinatorics: muons that are close to jets # Step 2: ΔR between each jet and muon distance = jets.delta_r(muons) distance # Advanced combinatorics: muons that are close to jets # Step 3: mask those that have any within ΔR < 1.0 mask = (distance < 1.0).any() print(f"mask: {mask}") # Step 4: index of the closest one index = distance.argmin() print(f"index: {index}") # Advanced combinatorics: muons that are close to jets # Step 5: select those jets jets_near_muons = jets[index][mask] jets_near_muons # (Use this to see just the events that have one.) # jets_near_muons[jets_near_muons.counts > 0] # Advanced combinatorics: muons that are close to jets # Choice A: we want just those jets. Need to flatten the inner arrays so that # the result is singly jagged, like the original jets. array["jets_near_muons"] = jets_near_muons.flatten(axis=1) for i, event in enumerate(array): if mask[i].any(): print(event.jetp4) print(event.jets_near_muons) print() if i > 100: break # Advanced combinatorics: muons that are close to jets # Choice B: we want to link to the relevant muons, with the ΔR distance array["nearest_muon"] = muons[index].pad(1, axis=1).flatten(axis=1) array["distance"] = distance[index].pad(1, axis=1).flatten(axis=1) # Set link to None if nearest_muon or distance doesn't pass the cut array.nearest_muon.content.mask |= ~mask.flatten() array.distance.content.mask |= ~mask.flatten() for i, event in enumerate(array): if mask[i].any(): print(event.jetp4) print(event.nearest_muon) print(event.distance) print() if i > 100: break # Apologies for using functions that have not yet been introduced: just as with # Numpy, working with awkward arrays means learning a vocabulary of single-step # functions and putting them together. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6, 7.7, 8.8, 9.9]]) # "pad" means fill inner arrays with None until it has at least N elements. a.pad(3) # Apologies for using functions that have not yet been introduced: just as with # Numpy, working with awkward arrays means learning a vocabulary of single-step # functions and putting them together. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6, 7.7, 8.8, 9.9]]) # You can use it with "fillna" and "regular" to make a regular Numpy array. a.pad(3, clip=True).fillna(999).regular() # Apologies for using functions that have not yet been introduced: just as with # Numpy, working with awkward arrays means learning a vocabulary of single-step # functions and putting them together. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6, 7.7, 8.8, 9.9]]) # In the previous example, we used it with argmax, which makes inner arrays of # length 0 or 1, to ensure that they're always length 1. a.argmax().pad(1) # Apologies for using functions that have not yet been introduced: just as with # Numpy, working with awkward arrays means learning a vocabulary of single-step # functions and putting them together. a = awkward.fromiter([[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6, 7.7, 8.8, 9.9]]) # Once we've done that, we don't need the inner structure anymore and can flatten # it to get a non-jagged array. a.argmax().pad(1).flatten() # Apologies for using functions that have not yet been introduced: just as with # Numpy, working with awkward arrays means learning a vocabulary of single-step # functions and putting them together. a = awkward.fromiter([[[1.1, 2.2, 3.3]], [[], [4.4, 5.5]], [[6.6, 7.7, 8.8, 9.9]]]) # But all of that happened inside a doubly-jagged array, in which we wanted to # collapse the inner dimension, so we used axis=1. (Same meaning as in Numpy.) a.argmax().pad(1, axis=1).flatten(axis=1)Although we've only talked about variable-length lists, objects, and None, awkward-array types form a complete type system: Primitive types: numbers, booleans, and fixed-size binary blobs via Numpy, Lists: variable-length lists via JaggedArray, Union (sum) types: heterogeneous lists via UnionArray, Record (product) types: objects (Table), implicitly in our previous examples, Pointers: cross-references and circular references via IndexedArray, Non-contiguous data: via ChunkedArray, Lazy data: via VirtualArray.# Just to demonstrate, let's make a tree... tree = awkward.fromiter([ {"value": 1.23, "left": 1, "right": 2}, # node 0 {"value": 3.21, "left": 3, "right": 4}, # node 1 {"value": 9.99, "left": 5, "right": 6}, # node 2 {"value": 3.14, "left": 7, "right": None}, # node 3 {"value": 2.71, "left": None, "right": 8}, # node 4 {"value": 5.55, "left": None, "right": None}, # node 5 {"value": 8.00, "left": None, "right": None}, # node 6 {"value": 9.00, "left": None, "right": None}, # node 7 {"value": 0.00, "left": None, "right": None}, # node 8 ]) left = tree.contents["left"].content right = tree.contents["right"].content left[(left < 0) | (left > 8)] = 0 right[(right < 0) | (right > 8)] = 0 tree.contents["left"].content = awkward.IndexedArray(left, tree) tree.contents["right"].content = awkward.IndexedArray(right, tree) print("Physical layout:") print("------------------------------------------------------------------") for i, x in tree.layout.items(): if x.cls == numpy.ndarray: print("{0:10s} {1}".format(repr(i), x.array)) import json print("\nLogical meaning:") print("------------------------------------------------------------------") print(json.dumps(tree[0].tolist(), indent=4)) # For those of you who were here yesterday, do you remember this? import sklearn.datasets, matplotlib.pyplot X1, y1 = sklearn.datasets.make_gaussian_quantiles( cov=2.0, n_samples=200, n_features=2, n_classes=2, random_state=1) X2, y2 = sklearn.datasets.make_gaussian_quantiles( mean=(3, 3), cov=1.5, n_samples=400, n_features=2, n_classes=2, random_state=1) X = numpy.concatenate((X1, X2)) y = numpy.concatenate((y1, -y2 + 1)) matplotlib.pyplot.scatter(X[y == 0, 0], X[y == 0, 1], c="deepskyblue", edgecolor="black"); matplotlib.pyplot.scatter(X[y == 1, 0], X[y == 1, 1], c="orange", edgecolor="black"); # We made a decision tree using Scikit-Learn... import sklearn.tree decision_tree = sklearn.tree.DecisionTreeClassifier(max_depth=8) decision_tree.fit(X, y) xx, yy = numpy.meshgrid(numpy.arange(-5, 8, 0.02), numpy.arange(-5, 8, 0.02)) Z = decision_tree.predict(numpy.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape) matplotlib.pyplot.contourf(xx, yy, Z); matplotlib.pyplot.scatter(X[y == 0, 0], X[y == 0, 1], c="deepskyblue", edgecolor="black", alpha=0.2); matplotlib.pyplot.scatter(X[y == 1, 0], X[y == 1, 1], c="orange", edgecolor="black", alpha=0.2); matplotlib.pyplot.xlim(-5, 8); matplotlib.pyplot.ylim(-5, 8); # Scikit-Learn is already using columnar trees: we can just cast it. mask = decision_tree.tree_.children_left < 0 left = decision_tree.tree_.children_left.copy() right = decision_tree.tree_.children_right.copy() left[mask] = 0 right[mask] = 0 tree = awkward.Table() tree["feature"] = awkward.MaskedArray(mask, decision_tree.tree_.feature) tree["threshold"] = awkward.MaskedArray(mask, decision_tree.tree_.threshold) tree["left"] = awkward.MaskedArray(mask, awkward.IndexedArray(left, tree)) tree["right"] = awkward.MaskedArray(mask, awkward.IndexedArray(right, tree)) tree["value"] = decision_tree.tree_.value[:, 0, 0] - decision_tree.tree_.value[:, 0, 1] tree[0].tolist()!pip install category_encoders !wget https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip !unzip bank-additional.zip import pandas as pd df = pd.read_csv("bank-additional/bank-additional-full.csv", delimiter=";") y_col = "y" train_ratio = 0.8 val_ratio = 0.1 shuffle = True override_model = None override_params = {} override_scorer = None from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.metrics import accuracy_score from sklearn.metrics import r2_score model = None params = {} scorer = None if not override_model: if df[y_col].dtype == 'object': if len(df[y_col].astype('category').cat.categories) < 3 and df[y_col].isna().sum() == 0: m = {x: i for i,x in enumerate(df[y_col].astype('category').cat.categories)} n_col = "{}:{}".format(y_col, df[y_col].astype('category').cat.categories[-1]) df[n_col] = df[y_col].map(m) df = df.drop(columns=[y_col], axis=1) y_col = [n_col] model = LogisticRegression params = {'solver': 'lbfgs'} scorer = accuracy_score else: dummy = pd.get_dummies(df[y_col]) Y = [] for v in dummy: df["{}:{}".format(y_col,v)] = dummy[v] Y.append("{}:{}".format(y_col,v)) df.drop(columns=[y_col], axis=1) y_col = Y model = LogisticRegression params = {'solver': 'lbfgs', 'multi_class': 'multinomial'} scorer = accuracy_score else: model = LinearRegression params = {} scorer = r2_score else: model = override_model if override_params != {}: params = override_params if override_scorer != None: scorer = override_scorer from sklearn.model_selection import train_test_split cols = [x for x in df if x not in y_col] X_train = None X_val = None X_test = None y_train = None y_val = None y_test = None if train_ratio < 1.0: if cvs > 0: X_train, X_test, y_train, y_test = train_test_split(df[cols], df[y_col], train_size=train_ratio, shuffle=shuffle) elif val_ratio > 0.0: X_train, X_test, y_train, y_test = train_test_split(df[cols], df[y_col], train_size=train_ratio+val_ratio, shuffle=shuffle) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, train_size=train_ratio/(train_ratio+val_ratio), shuffle=shuffle) else: X_train, X_test, y_train, y_test = train_test_split(df[cols], df[y_col], train_size=train_ratio, shuffle=shuffle) X_val = X_test.copy() y_val = y_test.copy() else: X_val = X_train.copy() y_val = y_train.copy() X_test = X_train.copy() y_test = y_train.copy() import numpy as np from sklearn.metrics import mean_squared_error from scipy import stats def calc_p(pipeline, X, y, y_pred): for i in range(y.shape[1]): params = np.append(pipeline.named_steps['model'].intercept_[i],pipeline.named_steps['model'].coef_[i]) colX = pipeline.named_steps['drop'].transform(pipeline.named_steps['encode'].transform(X)) newX = pd.DataFrame({"Constant":np.ones(len(X))}).join(pd.DataFrame(pipeline.named_steps['transform'].transform(colX))) var_b = mean_squared_error(y, y_pred)*(np.linalg.pinv(np.dot(newX.T,newX)).diagonal()) sd_b = np.sqrt(var_b) ts_b = params/ sd_b return pd.Series([2*(1-stats.t.cdf(np.abs(i),(len(newX)-1))) for i in ts_b], index=["Intercept"]+[x for x in colX]) class DropFeatures(object): def __init__(self, cols) : self.cols = cols def set_params(self, cols): self.cols = cols def transform(self, x) : dat = x.copy() return dat[[i for i in x if i not in self.cols]] def fit(self, x, y=None) : return self from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler import category_encoders as ce import warnings warnings.filterwarnings("ignore") remove_cols = [] pipeline = Pipeline([('encode', ce.OneHotEncoder(use_cat_names=True)), ('drop', DropFeatures(remove_cols)), ('transform', StandardScaler()), ('model', model(**params))]) while True: pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_test) print(scorer(y_test, y_pred)) p_vals = calc_p(pipeline, X_test, y_test, pipeline.predict(X_test)) rem = [p_vals.index[i] for i in range(len(p_vals)) if p_vals.index[i] != "Intercept" and (p_vals[i] > 0.05 or p_vals.isna()[i])] if len(rem) == 0: break remove_cols += rem pipeline.set_params(drop__cols=remove_cols) coef_table = None for i in range(len(y_col)): if i == 0: coef_table = p_vals.to_frame(name="p_{}".format(i)) else: coef_table = coef_table.join(p_vals.to_frame(name="p_{}".format(i))) coef_table = coef_table.join(pd.Series(np.append(pipeline.named_steps['model'].intercept_[i], pipeline.named_steps['model'].coef_[i]), index=["Intercept"]+[x for x in pipeline.named_steps['drop'].transform(pipeline.named_steps['encode'].transform(X_test))]).to_frame(name="Coefficient_{}".format(i))) coef_table["abs(Coefficient_{})".format(i)] = coef_table["Coefficient_{}".format(i)].abs() coef_table = coef_table.sort_values(by=["p_0", "abs(Coefficient_0)"], ascending=[False, True]) with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(coef_table)jupyter 연습import tensorflow as tf tf.set_random_seed(777) # for reproducibilityx_data, y_data 준비x1_data = [73., 93., 89., 96., 73.] x2_data = [80., 88., 91., 98., 66.] x3_data = [75., 93., 90., 100., 70.] y_data = [152., 185., 180., 196., 142.]placeholder 생성x1 = tf.placeholder(tf.float32) x2 = tf.placeholder(tf.float32) x3 = tf.placeholder(tf.float32) Y = tf.placeholder(tf.float32)variables 생성w1 = tf.Variable(tf.random_normal([1]), name='weight1') w2 = tf.Variable(tf.random_normal([1]), name='weight2') w3 = tf.Variable(tf.random_normal([1]), name='weight3') b = tf.Variable(tf.random_normal([1]), name='bias')expression, cost function, trainhypothesis = x1 * w1 + x2 * w2 + x3 * w3 + b cost = tf.reduce_mean(tf.square(hypothesis - Y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5) train = optimizer.minimize(cost)session 생성하고 runsess = tf.Session() sess.run(tf.global_variables_initializer()) for step in range(2001): cost_val, hy_val, _ = sess.run([cost, hypothesis, train], feed_dict={x1: x1_data, x2: x2_data, x3: x3_data, Y: y_data}) if step % 10 == 0: print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)0 Cost: 11407.703 Prediction: [63.916138 65.81676 70.42296 78.04809 46.316566] 10 Cost: 34.956184 Prediction: [158.23204 179.21907 182.1386 199.7054 132.82286] 20 Cost: 34.673935 Prediction: [158.4955 179.57722 182.46996 200.06802 133.10493] 30 Cost: 34.496758 Prediction: [158.47446 179.59335 182.46434 200.06367 133.12605] 40 Cost: 34.320503 Prediction: [158.45264 179.6084 182.45775 200.05823 133.14636] 50 Cost: 34.14513 Prediction: [158.43083 179.6234 182.45116 200.05281 133.16661] 60 Cost: 33.970768 Prediction: [158.40913 179.63838 182.44461 200.04741 133.18681] 70 Cost: 33.79736 Prediction: [158.38745 179.65329 182.43805 200.04199 133.20692] 80 Cost: 33.624947 Prediction: [158.36586 179.66815 182.4315 200.03662 133.227 ] 90 Cost: 33.453346 Prediction: [158.34431 179.68298 182.42499 200.03123 133.24704] 100 Cost: 33.282753 Prediction: [158.32283 179.69778 182.41849 200.02588 133.267 ] 110 Cost: 33.11306 Prediction: [158.3014 179.71254 1[...]`weights`: each row corresponds to an event, each column corresponds to a point`points`: each row corresponds to a point, each column corresponds to a coefficientstart_values, points, weights = parse_lhe_weights(lhe, coefficients) test = [i for i in range(1, len(points)) if i % 10 == 0] train = [i for i in range(1, len(points)) if i % 10 != 0] errs = np.zeros((len(weights), len(points))) fits = np.zeros((len(weights), len(points))) start = time.time() for event in range(len(weights)): constants, _, _, _ = np.linalg.lstsq(scan.model(points[train]), weights[event][train]) fit = np.dot(scan.model(points), constants) mg = weights[event] errs[event] = (mg - fit) / mg * 100 fits[event] = fit print('finished {} {} dimensional fits to {} points in {} seconds'.format( len(weights), points.shape[1], len(points[train]), int(time.time() - start)) ) plt.hist(errs[:,test].ravel(), 50, histtype='step', log=True, fill=False) plt.xlabel('$(\mathrm{weight}_{\mathrm{MG}} - \mathrm{weight}_{\mathrm{fit}}) / \mathrm{weight}_{\mathrm{MG}} * 100$') plt.ylabel('event points') plt.savefig(os.path.join(outdir, 'errs.pdf'), bbox_inches='tight') x = np.concatenate([points[:, indices['cuB']] for i in weights]).ravel() y = weights.ravel() bins = [ np.linspace(x.min(), x.max(), 20), np.logspace(np.log10(y.min()), np.log10(y.max()), 20) ] plt.yscale('log') H, xedges, yedges = np.histogram2d(x, y, bins=bins) plt.pcolormesh(xedges, yedges, H.T, norm=LogNorm()) plt.xlabel('$\mathrm{c}_\mathrm{uB}$') plt.ylabel('weight') plt.colorbar(label='event points') plt.savefig(os.path.join(outdir, 'cuB_vs_weight.pdf'), bbox_inches='tight')Above is the projection from the 8-dimensional space onto the cuB axis.x = np.concatenate([points[test][:, indices['cuB']] for i in errs[:, test]]).ravel() y = np.abs(errs[:, test]).ravel() bins = [ np.linspace(x.min(), x.max(), 20), np.logspace(np.log10(y.min()), np.log10(y.max()), 20) ] print(bins) plt.yscale('log') H, xedges, yedges = np.histogram2d(x, y, bins=bins) plt.pcolormesh(xedges, yedges, H.T, norm=LogNorm()) plt.xlabel('$\mathrm{c}_\mathrm{uB}$') plt.ylabel('$(\mathrm{weight}_{\mathrm{MG}} - \mathrm{weight}_{\mathrm{fit}}) / \mathrm{weight}_{\mathrm{MG}} * 100$') plt.colorbar(label='event points') plt.savefig(os.path.join(outdir, 'cuB_vs_err.pdf'), bbox_inches='tight')[array([ 0.009547 , 0.02252921, 0.03551142, 0.04849363, 0.06147584, 0.07445805, 0.08744026, 0.10042247, 0.11340468, 0.12638689, 0.13936911, 0.15235132, 0.16533353, 0.17831574, 0.19129795, 0.20428016, 0.21726237, 0.23024458, 0.24322679, 0.256209 ]), array([ 8.22197640e-13, 2.90178057e-12, 1.02412487e-11, 3.61444196e-11, 1.27564432e-10, 4.50212907e-10, 1.58893556e-09, 5.60782727e-09, 1.97916942e-08, 6.98507888e-08, 2.46524257e-07, 8.70057593e-07, 3.07069262e-06, 1.08373896e-05, 3.82483787e-05, 1.34989931e-04, 4.76419707e-04, 1.68142717e-03, 5.93425773e-03, 2.09437646e-02])]Note that the plot above only uses test points which were not included in the fit. The blank vertical bars correspond to the `train` points which were included in the fit.x = weights[:, test].ravel() y = np.abs(errs[:, test].ravel()) bins = [ np.logspace(np.log10(x.min()), np.log10(x.max()), 40), np.logspace(np.log10(y.min()), np.log10(y.max()), 40) ] with plt.rc_context(tweaks): plt.xscale('log') plt.yscale('log') H, xedges, yedges = np.histogram2d(x, y, bins=bins) plt.pcolormesh(xedges, yedges, H.T, norm=LogNorm()) plt.xlabel('weight') plt.ylabel('100 $\\frac{\mathrm{weight}_{\mathrm{MG}} - \mathrm{weight}_{\mathrm{fit}}}{\mathrm{weight}_{\mathrm{MG}}}$', fontsize='x-large') plt.colorbar(label='event points') #plt.title('{}d parameterization'.format(len(coefficients))) plt.savefig(os.path.join(outdir, 'weight_vs_err.pdf'), bbox_inches='tight')The only thing more confusing than a log log plot is a log log log plot.with plt.rc_context(tweaks): bins = np.logspace(np.log10(fits.min()), np.log10(fits.max()), 100) plt.xscale('log') plt.yscale('log') plt.hist(fits.ravel(), bins=bins, histtype='step', fill=False, log=True, lw=3, color='red') plt.hist(weights.ravel(), bins=bins, histtype='step', fill=False, log=True, color='blue') fit_label, = plt.plot([0, 0], color='red') # hack to show lines instead of boxes in the legend mg_label, = plt.plot([0, 0], color='blue') plt.xlabel('weight') plt.ylabel('event points') plt.legend([fit_label, mg_label], ['fit', 'MG'], loc='upper right') plt.savefig(os.path.join(outdir, 'fit_and_mg.pdf'), bbox_inches='tight') bins = np.logspace(np.log10(fits.min()), np.log10(fits.max()), 100) plt.xscale('log') plt.yscale('log') plt.hist(fits[:, test].ravel(), bins=bins, histtype='step', fill=False, log=True, lw=3, color='red') plt.hist(weights[:, test].ravel(), bins=bins, histtype='step', fill=False, log=True, color='blue') fit_label, = plt.plot([0, 0], color='red') # hack to show lines instead of boxes in the legend mg_label, = plt.plot([0, 0], color='blue') plt.xlabel('weight') plt.ylabel('event points') plt.legend([fit_label, mg_label], ['fit', 'MG'], loc='upper right') plt.savefig(os.path.join(outdir, 'fit_and_mg_only_test_points.pdf'), bbox_inches='tight') scan.fit() scan.scales(('c2G', 'c3G', 'cH', 'cHu', 'cuB', 'cuG', 'cuW', 'tc3G'), 'ttZ') sm = scan.cross_sections['sm']['ttZ'] scales = [sum(weights[:, i]) / sm for i in range(weights.shape[1])] x = np.concatenate([scales for i in weights]).ravel() y = weights.ravel() bins = [ np.linspace(x.min(), x.max(), 30), np.logspace(np.log10(y.min()), np.log10(y.max()), 30) ] with plt.rc_context(tweaks): plt.yscale('log') H, xedges, yedges = np.histogram2d(x, y, bins=bins) plt.pcolormesh(xedges, yedges, H.T, norm=LogNorm()) plt.xlabel('$\sigma_\mathrm{NP+SM}/\sigma_\mathrm{SM}$') plt.ylabel('weight') plt.colorbar(label='event points') #plt.xlim(xmax=12) plt.savefig(os.path.join(outdir, 'weight_vs_scale.pdf'), bbox_inches='tight')fitting ('c2G', 'c3G', 'cH', 'cHu', 'cuB', 'cuG', 'cuW', 'tc3G') ttZ using 4987 points dimensions None fitting ('c2G', 'c3G', 'cH', 'cHu', 'cuB', 'cuG', 'cuW', 'tc3G') ttH using 4987 points dimensions None fitting ('c2G', 'c3G', 'cH', 'cHu', 'cuB', 'cuG', 'cuW', 'tc3G') ttW using 4995 points dimensions NoneIn the above plot, all of the weights corresponding to each point are added to obtain the total cross section at that point. This plot only means anything if the reweighting makes sense, which remains to be seen.plt.hist(weights.ravel(), 150, histtype='step', log=True, fill=False) plt.xlabel('weight') plt.ylabel('event points') plt.savefig(os.path.join(outdir, 'errs.pdf'), bbox_inches='tight') print("weights vary between {:.0e} and {:.0e}".format(weights.min(), weights.max())) unreweighted = scan.evaluate(coefficients, points, 'ttZ') reweighted = np.vstack([sum(weights[:, i]) / sm for i in range(weights.shape[1])]) try: start_values = start_values.reshape((1, len(start_values))) except: pass plt.plot(points[:, indices['cuB']].ravel(), unreweighted.ravel(), 'o', linestyle='none', label='unreweighted') plt.plot(points[:, indices['cuB']].ravel(), reweighted.ravel(), '+', linestyle='none', label='reweighted') plt.plot(start_values[:, indices['cuB']], scan.evaluate(coefficients, start_values, 'ttZ'), marker='*', markersize=10, linestyle='none', color='red', label='reference model') plt.ylabel('$\sigma_\mathrm{NP+SM}/\sigma_\mathrm{SM}$') plt.xlabel('cuB') plt.legend(loc='upper center') plt.savefig(os.path.join(outdir, 'unreweighted_and_weighted.pdf'), bbox_inches='tight') unreweighted = scan.evaluate(coefficients, points, 'ttZ').ravel() reweighted = np.array([sum(weights[:, i]) / sm for i in range(weights.shape[1])]) reweighted_parameterized = np.array([sum(fits[:, i]) / sm for i in range(weights.shape[1])]) sort = unreweighted.argsort() index = np.array(list(range(len(sort)))) plt.plot(index, reweighted_parameterized[sort], 'o', linestyle='none', markersize=10, label='reweighted (paramaterized)', color='orange') plt.plot(index, reweighted[sort], '+', linestyle='none', markersize=10, label='reweighted') plt.plot(index, unreweighted[sort], '.', linestyle='none', markersize=10, label='unreweighted', color='red') plt.ylabel('$\sigma_\mathrm{NP+SM}/\sigma_\mathrm{SM}$') plt.xlabel('point') plt.legend(loc='upper left', title='{}d scan (reference $\sigma_\mathrm{{NP+SM}}/\sigma_\mathrm{{SM}}={:.1f}$)'.format(points.shape[1], scan.evaluate(coefficients, start_values, 'ttZ')[0])) plt.savefig(os.path.join(outdir, 'unreweighted_and_weighted_by_point.pdf'), bbox_inches='tight') print(outdir)/afs/crc.nd.edu/user/a/awoodard/www/.private/ttV/reweight_v14Colab initialization- install the pipeline in the colab runtime- download files neccessary for this example!pip3 install -U pip > /dev/null !pip3 install -U bio_embeddings[all] > /dev/null !wget http://data.bioembeddings.com/public/embeddings/reference/goa/protbert_reference_embeddings.h5 --output-document protbert_reference_embeddings.h5 !wget http://data.bioembeddings.com/public/embeddings/reference/goa/annotations.csv --output-document annotations.csvEmbed a sequence and find closest hit in an annotated sourceUsing the annotated source from [goPredSim](https://github.com/Rostlab/goPredSim/), we will transfer GO annotations to a user supplied sequence.Some initial steps are explained in greater detail in the `pairwise_distances_and_nearest_neighbours` notebook.from bio_embeddings.embed import ProtTransBertBFDEmbedder from bio_embeddings.extract import pairwise_distance_matrix_from_embeddings_and_annotations, get_k_nearest_neighbours # Initialize the embedder embedder = ProtTransBertBFDEmbedder() sequence = "MALLHSARVLSGVASAFHPGLAAAASARASSWWAHVEMGPPDPILGVTEAYKRDTNSKKMNLGVGAYRDDNGKPYVLPSVRKAEAQIAAKGLDKEYLPIGGLAEFCRASAELALGENSEVVKSGRFVTVQTISGTGALRIGASFLQRFFKFSRDVFLPKPSWGNHTPIFRDAGMQLQSYRYYDPKTCGFDFTGALEDISKIPEQSVLLLHACAHNPTGVDPRPEQWKEIATVVKKRNLFAFFDMAYQGFASGDGDKDAWAVRHFIEQGINVCLCQSYAKNMGLYGERVGAFTVICKDADEAKRVESQLKILIRPMYSNPPIHGARIASTILTSPDLRKQWLQEVKGMADRIIGMRTQLVSNLKKEGSTHSWQHITDQIGMFCFTGLKPEQVERLTKEFSIYMTKDGRISVAGVTSGNVGYLAHAIHQVTK" reduced_embedding = embedder.reduce_per_protein(embedder.embed(sequence)) import h5py with h5py.File("embeddings.h5", "w") as embeddings_file: embeddings_file.create_dataset("my_sequence", data=reduced_embedding) metric="euclidean" pairwise_distances = pairwise_distance_matrix_from_embeddings_and_annotations( 'embeddings.h5', 'protbert_reference_embeddings.h5', metric=metric ) # Get the indices and distances to the k-nearest neighbours, then get their identifiers k = 2 k_nn_indices, k_nn_distances = get_k_nearest_neighbours(pairwise_distances.pairwise_matrix, k) k_nn_identifiers = list(map(pairwise_distances.references.__getitem__, k_nn_indices[0])) # GoPredSim scales distances/similarities to a reliability index. # Note that the following was only asserted for metric='euclidean' or 'cosine' import numpy as np if metric == 'euclidean': k_nn_RI = [0.5/(0.5+dist) for dist in k_nn_distances[0]] elif metric == 'cosine': k_nn_RI = [1-dist for dist in k_nn_distances[0]] else: k_nn_RI = [-np.inf] * len(k_nn_distances[0]) from pandas import DataFrame, read_csv reference_annotations = read_csv("annotations.csv") k_nns = DataFrame({metric: k_nn_distances[0], "RI": k_nn_RI}, index=k_nn_identifiers) k_nn_groups = reference_annotations.join(k_nns, on="identifier").dropna().groupby(["identifier", metric, "RI"]) k_nn_groups = sorted(k_nn_groups, key=lambda x: x[0][1]) print(f"Metric used: {metric}.") print("If you use a distance metric, the smaller the value, the more similar the embeddings.") print("If you use a similarity metric, the smaller the value, the less similar the embeddings.") print("\n\n") for (protein, distance, RI), group in k_nn_groups: print(f"{protein}") print(f" {metric}: {round(distance, 3)}") print(f" RI: {round(RI, 2)}") print("The following GO annotations can be transferred from this protein:") for label in group.label.unique(): print(f" - {label}: http://amigo.geneontology.org/amigo/term/{label}") print("-----------\n")4. Working with Data in Python![](images/logo.png)Welcome to the third lab session!This lab includes some exercises about the material covered in session 4 concerning files and pandas. > **NOTE**: If your are running this lab on your own machine (i.e. not on our JupyterHub server), you need to make sure Pandas has already been installed. To check whether you have installed Pandas try running the cell below:import pandas as pdIf nothing much happens then you're good to go. However, if you get an `ImportError`, this means pandas has not been installed. To install pandas, open your command prompt and type ```pip install pandas```and hit enter. Once the install has finished, you should be good to go. Come back and retry running the cell above. With that done, let's jump in! Exercise 1: Reading in `.txt` files a) `with` statementsIf you change tab to the Jupyter file explorer, you'll notice a folder named `dreams`. If you take a look inside, you'll see 5 files called `dream1.txt`, `dream2.txt` ... , `dream5.txt`. These are short write-ups of dreams people have had, taken from the website [DreamBank](http://www.dreambank.net/), which has a collection of over 20,000 dreams recorded from many different people. Each file is made up of 3 lines. The first gives the name of the dreamer, the second gives a date, and the third has the dream content. Below is some code that reads in `dream1.txt`. In the cell below that, rewrite the code so that it does the exact same thing, but uses the `with` statement.text_file = open('dreams/dream1.txt') contents = text_file.read() print(contents) text_file.close() with open('dreams/dream1.txt') as text_file: contents = text_file.read() print(contents)Name: Alta Date: 19/01/1986 At work (?) was on the elevator with an older woman who wasn't sure what floor she wanted so she pushed extra buttons - I'd already gone up when I meant to go down to 2, and wasn't thrilled at having to stop so much. So I got off (she was surprised I took exception) and found myself outside, sort of, and now I have to get in a car that goes somewhere down some streets like in Oak town - the car was like an old '50s sort of thing with fins and they were coming off and needed to be stuck back down, took a while - arrived at a place that was an "elevator stop", nice old tree-lined street, the door didn't face the street - it was ivy-covered stone, very nice. Inside it was cave-like and open, but there was the elevator door. There was somebody there as well as 2 cats, and I made some comment about them having kittens (like it would be a good idea) and the guy said they were both males or something and I felt the same sort of cuteness about the exchange you usual[...]b) `readlines`Copy and paste your code from above. Replace `.read()` with `.readlines()`. This will create a list, where each element is a new line.Remember, sometimes it may look like text is going over multiple lines when really it's on just one. Using list indexing, select the 3rd element from this list (remember, list indexing starts from 0) so that you extract only the text, not the name or date. Print this text.with open('dreams/dream1.txt') as text_file: contents = text_file.readlines()[2] print(contents)At work (?) was on the elevator with an older woman who wasn't sure what floor she wanted so she pushed extra buttons - I'd already gone up when I meant to go down to 2, and wasn't thrilled at having to stop so much. So I got off (she was surprised I took exception) and found myself outside, sort of, and now I have to get in a car that goes somewhere down some streets like in Oak town - the car was like an old '50s sort of thing with fins and they were coming off and needed to be stuck back down, took a while - arrived at a place that was an "elevator stop", nice old tree-lined street, the door didn't face the street - it was ivy-covered stone, very nice. Inside it was cave-like and open, but there was the elevator door. There was somebody there as well as 2 cats, and I made some comment about them having kittens (like it would be a good idea) and the guy said they were both males or something and I felt the same sort of cuteness about the exchange you usually do talking about kitties.[...]c) Extracting the text from multiple filesComplete the code below to print the text from all five files.os.listdir('dreams') import os for file_name in os.listdir('dreams'): path = 'dreams/' + file_name with open(path) as text_file: print(text_file.readlines()[2])A tall many-storied building stands near a road. A construction crew is looking for a structural problem. I go into the building to the basement and I see a twisted place in a girder, a long beam. I come out and tell the foreman the building is about to collapse. We start running away and hiding in the bushes. I am next to the road and realize I must stop traffic because the building is going to fall on the road. I run onto the road yelling at the pedestrians to stop! Some ignore me and break past me, walking into the dangerous area. I see the building and actually try to visualize it falling because it isn't and I've got all these people stopped and they are annoyed at me. It finally collapses far away from the road. A person glares at me for hindering them. I shrug. Better safe than sorry. At work (?) was on the elevator with an older woman who wasn't sure what floor she wanted so she pushed extra buttons - I'd already gone up when I meant to go down to 2, and wasn't thrilled at havi[...]Exercise 2: Writing filesBelow you will see a function being imported from a file called `random_sentence.py`. (This is another use of `import` - to bring in functions written in external files). Try running the function `generate_random_sentence()` a few times to see what it does.from random_sentence import generate_random_sentence random_sentence = generate_random_sentence() print(random_sentence)The splendid pug really loved the industrious ducka) Writing many lines 1By using the function `generate_random_sentence()`, complete the code below to write 100 random sentences to a file called `random_sentences1.txt`, using the `.write()` function. Check this has worked by opening the file in the Jupyter launcher.Remember to use the `\n` character to signify a new line.n_sentences = 100 with open('random_sentences1.txt', 'w') as my_file: for i in range(n_sentences): my_file.write(generate_random_sentence() + '\n')a) Writing many lines 2Do the same thing again, but this time use a list of sentences with the `.writelines()` function.n_sentences = 100 lines = [] for i in range(n_sentences): lines.append(generate_random_sentence() + '\n') with open('random_sentences2.txt', 'w') as my_file: my_file.writelines(lines)Exercise 3: Pandas a) Creating a DataFrame from a list of listsUse the following data to create a pandas DataFrame. Store it in a variable called `df`.import pandas as pd column_names = ['name', 'calories', 'protein', 'fat', 'sodium', 'fiber'] data = [['100% Bran', 70, 4, 1, 130, 10], ['100% Natural Bran', 120, 3, 5, 15, 2], ['All-Bran', 70, 4, 1, 260, 9], ['All-Bran with Extra Fiber', 50, 4, 0, 140, 14], ['Almond Delight', 110, 2, 2, 200, 1]] df = pd.DataFrame(data, columns=column_names) dfb) Column informationUse the function `df.mean()` to find the mean of each column. Do the same for `.min()` and `.max()`.df.mean() df.min() df.max()c) Loading data from a csv fileBelow is some code that opens the full dataset from a file. Edit it so that the `'name'` column becomes the index column.df = pd.read_csv('cereal.csv', index_col='name') dfd) Getting row informationUse `.loc[]` to find out all information about `'Cheerios'`df.loc['Cheerios']e) Adding a healthy or unhealthy labelWe are now going to add a new column called `'healthy'` which will contain the values `True` or `False` for each cereal type. If the calories are greater than 120 or the sugar is greater than 10, we will mark it as unhealthy. Otherwise we will mark it as healthy. The code below creates a list where each element is `True` or `False`, which specifies the health status of the corresponding cereal. Add this as a new column to the dataframe.healthy = [] for cereal in df.index: row = df.loc[cereal] if row['calories'] > 120 or row['sugars'] > 10: healthy.append(False) else: healthy.append(True) print(healthy) df['healthy'] = healthy dfPrediction using Unsupervised ML It is based on iris dataset,In this section use clustering using kmeans from sklearn library after that I visualize the data, using scatter ploting.# import libraries import numpy as np import pandas as pd from sklearn.cluster import KMeans from matplotlib import pyplot import seaborn data = pd.read_csv('Iris.csv') data # remove unnecessary columns data = data.iloc[:,1:] data.info() data x = data.iloc[:,:-1].values x[:,0] y = data.iloc[:,-1].values y # convert string to int mean machine-readable form. from sklearn.preprocessing import LabelEncoder y_labal = LabelEncoder() y = y_labal.fit_transform(y) yElbow Method# Elbow method to find suitable k for clustering. sum_squer_error = [] for i in range(1,11): km = KMeans(n_clusters=i) km.fit(x) sum_squer_error.append(km.inertia_) pyplot.xlabel('K') pyplot.ylabel('ssc') pyplot.plot(range(1,11),sum_squer_error) pyplot.title('Elbow Method') pyplot.grid(True) # from elbow method K= 3. kmeans = KMeans(n_clusters=3) kmeans.fit(x) y_means = kmeans.predict(x) y_means pd.DataFrame({'precdiction':y_means,'actual':y})Visualization of Iris dataset with 3 clusters Using the elbow method we found that there is 3 optimum cluster for this iris datasetpyplot.scatter(x[:,0],x[:,2],c = y_means) pyplot.xlabel('SepalLength(cm)') pyplot.ylabel('PetalLength(cm)') pyplot.title('SepalLength v/s PetalLenfth') pyplot.scatter(data['Species'],data['SepalWidthCm'])To better understanding we use seaborn library .import seaborn seaborn.swarmplot(x = 'Species',y= 'SepalWidthCm',data=data) seaborn.swarmplot(x = 'Species',y= 'PetalLengthCm',data=data)/home/nachiket/.local/lib/python3.6/site-packages/seaborn/categorical.py:1296: UserWarning: 14.0% of the points cannot be placed; you may want to decrease the size of the markers or use stripplot. warnings.warn(msg, UserWarning)Problem statementGiven an `input_list` and a `target`, return the pair of indices in the list that holds the values which sum to the `target`. For example, `input_list = [1, 5, 9, 7]` and `target = 8`, the answer would be `[0, 3]` **Note**1. The best solution takes O(n) time. *This means that you cannot traverse the given list more than once.* **Hint - Think of an additional data structure that you should use here.** 2. You can assume that the list does not have any duplicates.def pair_sum_to_target(input_list, target): # TODO: Write pair sum to target index_dict = dict() # Avoid element such as 6 - 3 for index, element in enumerate(input_list): difference = target - element if difference in index_dict: return [index_dict[difference], index] index_dict[element] = index return [-1, -1] def test_function(test_case): output = pair_sum_to_target(test_case[0], test_case[1]) print(output) if sorted(output) == test_case[2]: print("Pass") else: print("Fail") test_case_1 = [[1, 5, 9, 7], 8, [0, 3]] test_function(test_case_1) test_case_2 = [[10, 5, 9, 8, 12, 1, 16, 6], 16, [0, 7]] test_function(test_case_2) test_case_3 = [[0, 1, 2, 3, -4], -4, [0, 4]] test_function(test_case_3)[0, 4] PassUnity ML Agents - 3DBallIn this example, we are going to use a evolutionary algorithm to train a dense neural network for the Unity's [3DBall environment](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Learning-Environment-Examples.md). The challenge of this environment is to move the agent in a way that doesn't drop the ball. ![Environment](3DBall-Figure1.png) Install dependencies and environmentFirst, you need to install the libraries to interact with the Unity environment:!pip3 install mlagents==0.27.0 !pip3 install gym_unityFor the environment, there are two options:- Use the pre-built version (Windows): [3DBall Windows Build](https://drive.google.com/file/d/150UDRhKwpFWaKiehBYE3Qk7xfLE2GGTN/view?usp=sharing), download and extract.- Build manually: - Install Unity and clone the ML Agents repository: [Installation Instructions](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Installation.md) - Open the repository project in Unity ("Project" folder) - Open the "3DBall" scene, and keep only one copy of the environment (delete "3DBall (1-11)", keeping only the first "3DBall"), as the interface only works with scenes with an agent. - Build the scene: [Using an Environment Executable](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Learning-Environment-Executable.md), follow the instructions in "Building the 3DBall environment"Once the environment is built, change the path in the block below to the environment path:args = {"env_path": "D:\\Github\\ml-agents\\builds\\3dball_single\\UnityEnvironment.exe"}EvaluatorOur agent will be a dense neural network, which will have the weights encoded by the individual. The class that will be our fitness function is defined in the file ["unity3dball_evaluator.py"](unity3dball_evaluator.py).import evolvepy as ep import numpy as np from matplotlib import pyplot as plt import unity3dball_evaluator as unity3dballWe create the evaluator:evaluator = ep.evaluator.ProcessEvaluator(unity3dball.Unity3DBallEvaluator, args = args)And the descriptor of our individual. Let's use a network with 2 hidden layers, each with 20 units.:import nn descriptor = nn.create_descriptor(input_size=8, output_size=2, units=[20,20])Generatorlet's start by defining our population size. As we are going to parallelize the evaluations, it is interesting to use a size multiple of the number of processors, using all the computational power:import multiprocessing as mp population_size = (100//mp.cpu_count())*mp.cpu_count() population_sizeOur generator will have elitism, keeping the best 5% individuals of the generation, and random predation, exchanging the worst 75% individuals for better individuals (in the other 25%):first = ep.generator.Layer() sort = ep.generator.Sort() first.next = sort concat = ep.generator.Concatenate() predation = ep.generator.RandomPredation(int(0.75*population_size)) combine = ep.generator.CombineLayer(ep.generator.selection.tournament, ep.generator.crossover.one_point) mutation = ep.generator.mutation.NumericMutationLayer(ep.generator.mutation.sum_mutation, 1.0, 0.9, (-0.5, 0.5)) filter0 = ep.generator.FilterFirsts(int(np.floor(0.95*population_size))) sort.next = predation predation.next = combine combine.next = mutation mutation.next = filter0 filter0.next = concat filter1 = ep.generator.FilterFirsts(int(np.ceil(0.05*population_size))) sort.next = filter1 filter1.next = concat generator = ep.generator.Generator(first_layer=first, last_layer=concat, descriptor=descriptor)EvolverFinally, we define our Evolver. We gonna use dynamic mutation to avoid getting stuck at a local maximum:dyn_mut = ep.callbacks.DynamicMutation([mutation.name], refinement_patience=5, exploration_patience=5, refinement_steps=5) evolver = ep.Evolver(generator, evaluator, population_size, [dyn_mut])Evaluation and resultsLet's evolve for 70 generations:hist, last_pop = evolver.evolve(70)And see the results:plt.plot(hist.max(axis=1)) plt.xlabel("Generation") plt.ylabel("Fitness") plt.title("Evolution History") plt.show()We can see that the agent manages to improve in the environment, reaching the maximum possible fitness of 100:![](3DBall-Figure2.jpg) Let's see how the best individual fared in the environment:best = last_pop[np.argmax(hist[-1])] test_evaluator = unity3dball.Unity3DBallEvaluator(show=True, args = args) test_evaluator.evaluate([best])Examplesimport pickle from google.colab import drive drive.mount('/content/drive') # Example - ploting X_train X_train = pickle.load(open('/content/drive/My Drive/pTSA_microlensing/X_train.p', 'rb')) y_train = pickle.load(open('/content/drive/My Drive/pTSA_microlensing/y_train.p', 'rb')) plot=plot_data(X_train, y_label=y_train) plot.savefig('/content/drive/My Drive/pTSA_microlensing/X_train.svg') # Save in SVG for high-quality vectorized image plot.show() # Inline in PNG for easy copy-and-pasteimport numpy as np import matplotlib.pyplot as plt P = np.array([[2,8], [7,4] ]) a = np.arange(0,1,.15) plt.scatter(a*P[0,0], a*P[1,0], color = 'Pink') plt.scatter(a*P[0,1], a*P[1,1], color = 'Green') plt.xlim(-10, 10) plt.ylim(-10,10) plt.grid() plt.show() Pa = np.array([[-2, -8], [-7, -4] ]) a = np.arange(0,1,.20) plt.scatter(a*Pa[0,0], a*Pa[1,0], color = 'Pink') plt.scatter(a*Pa[0,1], a*Pa[1,1], color = 'Green') plt.xlim(-10,10) plt.ylim(-10,10) plt.grid() plt.show() E = np.array([[4,1], [2,4] ]) a = np.arange(0,1,.20) plt.scatter(a*E[0,0], a*E[1,0], color = 'Pink') plt.scatter(a*E[0,1], a*E[1,1], color = 'Green') plt.xlim(-10,10) plt.ylim(-10,10) plt.grid() plt.show() E1 = np.array([[-4,-0], [0,-4] ]) a = np.arange(0,1,.15) plt.scatter(a*E1[0,0], a*E1[1,0], color = 'Pink') plt.scatter(a*E1[0,1], a*E1[1,1], color = 'Green') plt.xlim(-10,10) plt.ylim(-10,10) plt.grid() plt.show()Band structure of bulk bismuthfrom tb import Hamiltonian, Orbitals from tb import set_tb_params, get_k_coords_ _ _ _ _ | \ | | __ _ _ __ ___ | \ | | ___| |_ | \| |/ _` | '_ \ / _ \| \| |/ _ \ __| | |\ | (_| | | | | (_) | |\ | __/ |_ |_| \_|\__,_|_| |_|\___/|_| \_|\___|\__| Vesion 1.0Below we set a LCAO sp3 basis set for Bi atoms.bi_orb = Orbitals('Bi') bi_orb.add_orbital("s", energy=-10.906, principal=0, orbital=0, magnetic=0, spin=0) bi_orb.add_orbital("px", energy=-0.486, principal=0, orbital=1, magnetic=-1, spin=0) bi_orb.add_orbital("py", energy=-0.486, principal=0, orbital=1, magnetic=1, spin=0) bi_orb.add_orbital("pz", energy=-0.486, principal=0, orbital=1, magnetic=0, spin=0) bi_orb.add_orbital("s", energy=-10.906, principal=0, orbital=0, magnetic=0, spin=1) bi_orb.add_orbital("px", energy=-0.486, principal=0, orbital=1, magnetic=-1, spin=1) bi_orb.add_orbital("py", energy=-0.486, principal=0, orbital=1, magnetic=1, spin=1) bi_orb.add_orbital("pz", energy=-0.486, principal=0, orbital=1, magnetic=0, spin=1)The primitive cell of crystalline bismuth has two atoms:xyz_coords = """2 Bi2 cell Bi1 0.0 0.0 0.0 Bi2 0.0 0.0 5.52321494 """ h = Hamiltonian(xyz=xyz_coords, nn_distance=4.6, so_coupling=1.5) import numpy as np def radial_dep(coords): norm_of_coords = np.linalg.norm(coords) if norm_of_coords < 3.3: return 1 elif 3.7 > norm_of_coords > 3.3: return 2 elif 5.0 > norm_of_coords > 3.7: return 3 else: return 100 # 1NN - Bi-Bi PAR1 = {'ss_sigma': -0.608, 'sp_sigma': 1.320, 'pp_sigma': 1.854, 'pp_pi': -0.600} # 2NN - Bi-Bi PAR2 = {'ss_sigma': -0.384, 'sp_sigma': 0.433, 'pp_sigma': 1.396, 'pp_pi': -0.344} # 3NN - Bi-Bi PAR3 = {'ss_sigma': 0, 'sp_sigma': 0, 'pp_sigma': 0.156, 'pp_pi': 0} set_tb_params(PARAMS_BI_BI1=PAR1, PARAMS_BI_BI2=PAR2, PARAMS_BI_BI3=PAR3) h.initialize(radial_dep) primitive_cell = [[-2.2666 , -1.30862212, 3.93223333], [ 2.2666 , -1.30862212, 3.93223333], [ 0. , 2.61724424, 3.93223333]] h.set_periodic_bc(primitive_cell) sym_points = ['K', 'GAMMA', 'T', 'W', 'L', 'LAMBDA'] num_points = [10, 10, 10, 10, 10] special_k_points = {'GAMMA': [0.0, 0.0, 0.0], 'K': [0.35985144675492087, -0.8002652081237402, 0.5326462926072546], 'L': [0.69305, -0.4001326040618701, 0.2663231463036273], 'LAMBDA': [0.0, 0.0, 0.39948471945544095], 'T': [0.0, 0.0, 0.7989694389108819], 'U': [0.5397771701323816, -0.31164049447834485, 0.7989694389108819], 'W': [0.3598514467549211, -0.6232809889566897, 0.7989694389108819], 'X': [0.0, -0.8002652081237402, 0.5326462926072546]} k_points = get_k_coords(sym_points, num_points, special_k_points) band_structure = [] for jj, item in enumerate(k_points): [eigenvalues, _] =\ h.diagonalize_periodic_bc(k_points[jj]) band_structure.append(eigenvalues) import matplotlib.pyplot as plt plt.figure(dpi=100) ax = plt.axes() plt.ylim((-15, 5)) ax.set_title('Band structure of the bulk bismuth') ax.set_ylabel('Energy (eV)') ax.plot(band_structure, 'k') ax.plot([0, len(band_structure)], [0, 0], '--', color='k', linewidth=0.5) plt.xticks(np.insert(np.cumsum(num_points)-1,0,0), labels=sym_points) ax.xaxis.grid() plt.show()Load Processed Vectorized Datadata = pickle.load(open('data_face_features.pickle', mode='rb')) data.keys()Splitting DataX = np.array(data['data']) y = np.array(data['label']) X.shape,y.shape X = X.reshape(-1,128) X.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=0.8, random_state=0) X_train.shape,X_test.shape, y_train.shape, y_test.shapeTraining Modelsfrom sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.metrics import classification_report, accuracy_score, f1_scoreLogistic Regressionmodel_log = LogisticRegression() model_log.fit(X_train,y_train) y_pred_train = model_log.predict(X_train) y_pred_test = model_log.predict(X_test) #accuracy score acc_train = accuracy_score(y_train,y_pred_train) acc_test = accuracy_score(y_test,y_pred_test) # f1-score f1_score_train = f1_score(y_train,y_pred_train,average='macro') f1_score_test= f1_score(y_test,y_pred_test,average='macro')Logistic Regression Evaluation Metricsprint(f'accuracy score for training set = {acc_train}') print(f'accuracy score for test set = {acc_test}') print(f'f1 score for training set = {f1_score_train}') print(f'f1 score score for test set = {f1_score_test}') #function to evaluate different trained models def evaluation(model,x_train,y_train,x_test,y_test): y_pred_train = model.predict(X_train) y_pred_test = model.predict(X_test) #accuracy score acc_train = accuracy_score(y_train,y_pred_train) acc_test = accuracy_score(y_test,y_pred_test) # f1-score f1_score_train = f1_score(y_train,y_pred_train,average='macro') f1_score_test= f1_score(y_test,y_pred_test,average='macro') print(f'accuracy score for training set = {acc_train}') print(f'accuracy score for test set = {acc_test}') print(f'f1 score for training set = {f1_score_train}') print(f'f1 score score for test set = {f1_score_test}')SVMmodel_svc = SVC(probability=True) model_svc.fit(X_train,y_train) evaluation(model_svc,X_train,y_train,X_test,y_test) #svc performs better than Logaccuracy score for training set = 0.8319641523525019 accuracy score for test set = 0.7074626865671642 f1 score for training set = 0.8325899156931432 f1 score score for test set = 0.7150649550951717Random Forestmodel_rf = RandomForestClassifier() model_rf.fit(X_train,y_train) evaluation(model_rf,X_train,y_train,X_test,y_test) #overfitaccuracy score for training set = 1.0 accuracy score for test set = 0.6626865671641791 f1 score for training set = 1.0 f1 score score for test set = 0.6598291568556367Voting Classifiermodel_voting = VotingClassifier(estimators=[ ('log',LogisticRegression()), ('svm',SVC(probability=True)), ('rf',RandomForestClassifier()) ], voting='soft', weights=[2,3,1]) model_voting.fit(X_train,y_train) evaluation(model_voting,X_train,y_train,X_test,y_test) from sklearn.model_selection import GridSearchCV model_grid = GridSearchCV(model_voting, param_grid={ 'svm__C':[3,5,7,10], 'svm__gamma':[0.1,0.3,0.5], 'rf__n_estimators':[5,10,20], 'rf__max_depth':[3,5,7], 'voting':['soft','hard'] },scoring='accuracy',cv=3,n_jobs=1,verbose=2) model_grid.fit(X_train,y_train) model_grid.best_params_ #model_grid.best_estimator_ model_grid.best_score_ model_best_estimator = model_grid.best_estimator_Save Modelpickle.dump(model_best_estimator,open('models/machinelearning_face_person_identity2.pkl',mode='wb'))This Notebook shows how to use the Camera_Calibration_API to calibrate the camera using symmetrical circular grid patternimport sys sys.path.append("../../") from camera_calibration import Camera_Calibration_API import glob import matplotlib.pyplot as plt %matplotlib inline import os import cv2 test_img = cv2.imread("../example_images/symmetric_grid/Image__2018-02-14__10-12-45.png",0) print(test_img.shape) plt.imshow(test_img,cmap="gray") plt.title("One of the calibration images") plt.show() symmetric_circles = Camera_Calibration_API(pattern_type="symmetric_circles", pattern_rows=6, pattern_columns=5, distance_in_world_units = 10, debug_dir=None) %%time results = symmetric_circles.calibrate_camera(glob.glob("../example_images/symmetric_grid/*.png")) symmetric_circles.calibration_df symmetric_circles.visualize_calibration_boards(20,10)TensorFlow Estimators ```{admonition} AttributionThis notebook builds on Chapter 14: *Going Deeper – The Mechanics of TensorFlow* of {cite}`RaschkaMirjalili2019`.``` In this notebook, we will work with TensorFlow Estimators. The `tf.estimator` API encapsulates the underlying steps in machine learning tasks, such as training, prediction (inference), and evaluation. Estimators are more encapsulated but also more scalable when compared to the previous approaches that we have covered above. Also, the `tf.estimator` API adds support for running models on multiple platforms without requiring major code changes, which makes them more suitable for the so-called "production phase" in industry applications. TensorFlow comes with a selection of off-the-shelf estimators for common machine learning and deep learning architectures that are useful for comparison studies, for example, to quickly assess whether a certain approach is applicable to a particular dataset or problem. Besides using pre-made Estimators, we can also create an Estimator by converting a Keras model to an Estimator.import tensorflow as tf print(tf.__version__) print(tf.config.list_physical_devices('GPU'))2.7.0 [PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]Working with feature columns In machine learning and deep learning applications, we can encounter variousdifferent types of features: continuous, unordered categorical (nominal), and ordered categorical (ordinal). Note that while numeric data can be either continuous or discrete, in the context of the TensorFlow API, "numeric" data specifically refers to continuous data of the floating point type. Sometimes, feature sets are comprised of a mixture of different feature types. While TensorFlow Estimators were designed to handle all these different types of features, we must specify how each feature should be interpreted by the Estimator. Auto MPG dataset ```{figure} ../../img/feature_cols.png ---width: 60emname: feature_cols---Assigning types to feature columns from the Auto MPG dataset.``` To demonstrate the use of TF Estimators, we use the [Auto MPG dataset](https://archive.ics.uci.edu/ml/datasets/auto+mpg). We are going to treat five features from the Auto MPG dataset (*number of cylinders*, *displacement*, *horsepower*, *weight*, and *acceleration*) as numeric (i.e. continuous) features. The *model year* can be regarded as an ordered categorical feature. Lastly, the *manufacturing origin* can be regarded as an unordered categorical feature with three possible discrete values, 1, 2, and 3, which correspond to the US, Europe, and Japan, respectively. {numref}`feature_cols` above shows how we will treat these feature columns.import pandas as pd import numpy as np dataset_path = tf.keras.utils.get_file( "auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data" ) column_names = [ "MPG", "Cylinders", "Displacement", "Horsepower", "Weight", "Acceleration", "ModelYear", "Origin" ] # Load dataset; drop missing values df = pd.read_csv(dataset_path, names=column_names, na_values="?", comment="\t", sep=" ", skipinitialspace=True) print("Shape:", df.shape) print("No. of missing values:") print(df.isna().sum()) # For simplicity drop rows with missing values. df = df.dropna().reset_index(drop=True) df.tail()Shape: (398, 8) No. of missing values: MPG 0 Cylinders 0 Displacement 0 Horsepower 6 Weight 0 Acceleration 0 ModelYear 0 Origin 0 dtype: int64Splitting the dataset and standardizing numerical columns:import sklearn import sklearn.model_selection df_train, df_test = sklearn.model_selection.train_test_split(df, train_size=0.8) train_stats = df_train.describe().transpose() train_stats numeric_column_names = [ 'Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration' ] df_train_norm, df_test_norm = df_train.copy(), df_test.copy() for col_name in numeric_column_names: train_mean = train_stats.loc[col_name, 'mean'] train_std = train_stats.loc[col_name, 'std'] df_train_norm.loc[:, col_name] = (df_train_norm.loc[:, col_name] - train_mean) / train_std df_test_norm.loc[:, col_name] = (df_test_norm.loc[:, col_name] - train_mean) / train_std df_train_norm.tail()Numeric features In the following code, we will use TensorFlow's `feature_column` functionto transform the 5 continuous features into the feature column data structure thatTensorFlow Estimators can work with:numeric_features = [] for col_name in numeric_column_names: feature_column = tf.feature_column.numeric_column(key=col_name) numeric_features.append(feature_column) print(numeric_features)[NumericColumn(key='Cylinders', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), NumericColumn(key='Displacement', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), NumericColumn(key='Horsepower', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), NumericColumn(key='Weight', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), NumericColumn(key='Acceleration', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None)]Bucketized features Next, let's group the rather fine-grained model year information into buckets tosimplify the learning task for the model that we are going to train later. Note that we assign `boundaries=[73, 76, 79]` which results in left-closed partitioning of the real line into 4 intervals `(-∞, 73)`, `[73, 76)`, `[76, 79)`, and `[79, +∞)`.feature_year = tf.feature_column.numeric_column(key="ModelYear") bucketized_column = tf.feature_column.bucketized_column( source_column=feature_year, boundaries=[73, 76, 79] ) # For consistency, we create list of bucketized features bucketized_features = [] bucketized_features.append(bucketized_column) print(bucketized_features)[BucketizedColumn(source_column=NumericColumn(key='ModelYear', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), boundaries=(73, 76, 79))]Categorical indicator features Next, we will proceed with defining a list for the unordered categorical feature,`Origin`. Here we use `categorical_column_with_vocabulary_list` in `tf.feature_column` and provide a list of all possible category names as input. ```{tip}If the list of possible categories is too large, we can use `categorical_column_with_vocabulary_list` and provide a file that contains all the categories/words so that we do not have to store a list of all possible words in memory. If the features are already associatedwith an index of categories in the range `[0, num_categories)`, then we can use the`categorical_column_with_identity` function. However,in this case, the feature `Origin` is given as integer values `1`, `2`, `3` (as opposed to `0`, `1`, `2`), which does not match the requirement for categorical indexing.```print(df.Origin.unique()) feature_origin = tf.feature_column.categorical_column_with_vocabulary_list( key='Origin', vocabulary_list=[1, 2, 3] )```{margin}Refer to the [official TensorFlow docs](https://www.tensorflow.org/api_docs/python/tf/feature_column) for other implemented feature columns such as hashed columns and crossed columns.```Certain Estimators, such as `DNNClassifier` and `DNNRegressor`, only accept so-called"dense columns." Therefore, the next step is to convert the existing categorical feature column to such a dense column. There are two ways to do this: using an embedding column via `embedding_column` or an indicator column via `indicator_column`. We use the latter which converts the categorical indices to one-hot encoded vectors to convert the categorical column to a dense format:indicator_column = tf.feature_column.indicator_column(feature_origin) # For consistency, we create list of nominal features categorical_indicator_features = [] categorical_indicator_features.append(indicator_column) print(categorical_indicator_features)[IndicatorColumn(categorical_column=VocabularyListCategoricalColumn(key='Origin', vocabulary_list=(1, 2, 3), dtype=tf.int64, default_value=-1, num_oov_buckets=0))]Machine learning with pre-made estimators Input functionsWe have to define an **input function** that processes the data and returns a TensorFlow dataset consisting of a tuple that contains the input features and the targets. Note that the features must be a dictionary format such that the keys match the names (or keys) of feature columns.def train_input_fn(df_train, batch_size=8): df = df_train.copy() x_train, y_train = df, df.pop('MPG') dataset = tf.data.Dataset.from_tensor_slices((dict(x_train), y_train)) # Shuffle, batch, and repeat the examples return dataset.shuffle(1000).batch(batch_size).repeat() # Inspection ds = train_input_fn(df_train_norm) batch = next(iter(ds)) print('Keys:', batch[0].keys()) print('Batch Model Years:', batch[0]['ModelYear']) print('Batch MPGs (targets):', batch[1].numpy())Keys: dict_keys(['Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'ModelYear', 'Origin']) Batch Model Years: tf.Tensor([71 74 78 76 72 74 72 75], shape=(8,), dtype=int64) Batch MPGs (targets): [19. 16. 17.7 26. 13. 14. 12. 16. ]Input function for evaluation:def eval_input_fn(df_eval, batch_size=8): df = df_eval.copy() x_eval, y_eval = df, df.pop('MPG') dataset = tf.data.Dataset.from_tensor_slices((dict(x_eval), y_eval)) # Shuffle, batch, and repeat the examples return dataset.shuffle(1000).batch(batch_size).repeat() # Inspection ds = eval_input_fn(df_test_norm) batch = next(iter(ds)) print('Keys:', batch[0].keys()) print('Batch Model Years:', batch[0]['ModelYear']) print('Batch MPGs (targets):', batch[1].numpy())Keys: dict_keys(['Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'ModelYear', 'Origin']) Batch Model Years: tf.Tensor([80 78 76 81 72 73 70 71], shape=(8,), dtype=int64) Batch MPGs (targets): [41.5 21.6 29. 32.4 14. 12. 22. 23. ]Initializing the Estimator Since predicting MPG valuesis a typical regression problem, we will use `tf.estimator.DNNRegressor`. Wheninstantiating the regression Estimator, we will provide the list of feature columnsand specify the number of hidden units that we want to have in each hidden layerusing the argument `hidden_units`.regressor = tf.estimator.DNNRegressor( feature_columns=( numeric_features + bucketized_features + categorical_indicator_features ), hidden_units=[32, 10], model_dir='models/autompg-dnnregressor/')INFO:tensorflow:Using default config. INFO:tensorflow:Using config: {'_model_dir': 'models/autompg-dnnregressor/', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}The other argument, `model_dir`, that we have provided specifies the directoryfor saving model parameters. One of the advantages of Estimators is that theyautomatically checkpoint the model during training, so that in case the training ofthe model crashes for an unexpected reason, we can easily loadthe last saved checkpoint and continue training from there. The checkpoints will alsobe saved in the directory specified by `model_dir`. TrainingThe `.train()` method expects two arguments. The argument `input_fn` expects a callable that returns a batch of training examples. The `steps` which is the total number of SGD updates (or calls to the input function) is calculated as follows:EPOCHS = 30 BATCH_SIZE = 8 total_steps = EPOCHS * int(np.ceil(len(df_train) / BATCH_SIZE)) print('Training Steps:', total_steps) regressor.train( input_fn=lambda: train_input_fn(df_train_norm, batch_size=BATCH_SIZE), steps=total_steps )Training Steps: 1200 INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Create CheckpointSaverHook. INFO:tensorflow:Graph was finalized. INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op.```{note}Recall that `model_dir` saves the checkpoints of the model during training. The last model can be loaded using the `warm_start_from` argument as follows:```pythonreloaded_regressor = tf.estimator.DNNRegressor( feature_columns=all_feature_columns, hidden_units=[32, 10], warm_start_from='models/autompg-dnnregressor/', model_dir='models/autompg-dnnregressor/')``` Evaluation To evaluate performance, we use the `.evaluate` method:eval_results = regressor.evaluate( input_fn=lambda: eval_input_fn(df_test_norm, batch_size=8) ) print(eval_results) pred_res = regressor.predict(input_fn=lambda: eval_input_fn(df_test_norm, batch_size=8)) print(next(iter(pred_res))) tf.get_logger().setLevel('ERROR') boosted_tree = tf.estimator.BoostedTreesRegressor( feature_columns=all_feature_columns, n_batches_per_layer=20, n_trees=200) boosted_tree.train( input_fn=lambda:train_input_fn(df_train_norm, batch_size=BATCH_SIZE)) eval_results = boosted_tree.evaluate( input_fn=lambda:eval_input_fn(df_test_norm, batch_size=8)) print(eval_results) print('Average-Loss {:.4f}'.format(eval_results['average_loss'])){'average_loss': 7.3268905, 'label/mean': 22.655697, 'loss': 7.2563562, 'prediction/mean': 22.736225, 'global_step': 24000} Average-Loss 7.3269WeatherPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.# Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time from scipy.stats import linregress # Import API key from api_keys import weather_api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "../output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180)Generate Cities List# List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(lat_range[0], lat_range[1], size=1500) lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities)Perform API Calls* Perform a weather check on each city using a series of successive API calls.* Include a print log of each city as it'sbeing processed (with the city number and city name).# OpenWeatherMap API Key #api_key = api_keys.api_key # Starting URL for Weather Map API Call url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + weather_api_key +"&q=" #Cities weather data frame Cities_DF = pd.DataFrame({"City":cities}) Cities_DF["Temperature(F)"] ="" Cities_DF["Humidity(%)"] ="" Cities_DF["Cloudiness(%)"] ="" Cities_DF["Wind Speed(mph)"] ="" Cities_DF["Latitude"]="" Cities_DF["Longitude"]="" Cities_DF["Date"]="" Cities_DF.head() print("Beginning Data Retrieval") print("------------------------") for index, row in Cities_DF.iterrows(): time.sleep(.8) city =row["City"] response = requests.get(url+city) city_weather = response.json() print(f"Processing Record {index} | {city}") city_weather_url =response.url #print(response.url) try: Cities_DF.loc[index,"Temperature(F)"]=city_weather['main']['temp'] Cities_DF.loc[index,"Humidity(%)"]=city_weather['main']['humidity'] Cities_DF.loc[index,"Cloudiness(%)"]=city_weather['clouds']['all'] Cities_DF.loc[index,"Wind Speed(mph)"]=city_weather['wind']['speed'] Cities_DF.loc[index,"Latitude"]=city_weather['coord']['lat'] Cities_DF.loc[index,"Longitude"]=city_weather["coord"]['lon'] Cities_DF.loc[index,"Date"]=city_weather["dt"] except (KeyError, IndexError): print(f"City not found... skipping.") print("------------------------") print("Data Retrieval Complete") print("------------------------") print(Cities_DF)Beginning Data Retrieval ------------------------ Processing Record 0 | vaini Processing Record 1 | saleaula City not found... skipping. Processing Record 2 | korla Processing Record 3 | belaya gora Processing Record 4 | cidreira Processing Record 5 | saint-philippe Processing Record 6 | norman wells Processing Record 7 | tsihombe City not found... skipping. Processing Record 8 | puerto narino Processing Record 9 | busselton Processing Record 10 | nikolskoye Processing Record 11 | aberdeen Processing Record 12 | sur Processing Record 13 | male Processing Record 14 | isangel Processing Record 15 | lebu Processing Record 16 | ankazoabo Processing Record 17 | illoqqortoormiut City not found... skipping. Processing Record 18 | cobija Processing Record 19 | mataura Processing Record 20 | longyearbyen Processing Record 21 | mullaitivu City not found... skipping. Processing Record 22 | havre-saint-pierre Processing Record 23 | port alfred Processing Record 24 | mar del plata Processing Record[...]Convert Raw Data to DataFrame* Export the city data into a .csv.* Display the DataFrameCities_DF.dropna(axis=0, inplace=True) Cities_DF.to_csv('city_data_output.csv') Cities_DF.head() Cities_DF["Lat"] = pd.to_numeric(Cities_DF["Latitude"],errors='coerce') Cities_DF["Lng"] = pd.to_numeric(Cities_DF["Longitude"],errors='coerce') Cities_DF["Max Temp(%)"] = pd.to_numeric(Cities_DF["Temperature(F)"],errors='coerce') Cities_DF["Humidity"] = pd.to_numeric(Cities_DF["Humidity(%)"],errors='coerce') Cities_DF["Cloudiness(F)"] = pd.to_numeric(Cities_DF["Cloudiness(%)"],errors='coerce') Cities_DF["Wind Speed"] = pd.to_numeric(Cities_DF["Wind Speed(mph)"],errors='coerce') Cities_DF["Date"] = pd.to_numeric(Cities_DF["Date"],errors='coerce') Cities_DF.describe() # Get index names for humidity > 100% index_names = Cities_DF[Cities_DF['Humidity'] >= 100].index #drop these rows Cities_DF.drop(index_names,inplace=True) Cities_DFInspect the data and remove the cities where the humidity > 100%.----Skip this step if there are no cities that have humidity > 100%.# Make a new DataFrame equal to the city data to drop all humidity outliers by index. # Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data". # Get index names for humidity > 100% index_names = Cities_DF[Cities_DF['Humidity'] >= 100].index #drop these rows clean_city_data = Cities_DF.drop(index_names,inplace=False) clean_city_data clean_city_data.dtypesPlotting the Data* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.* Save the plotted figures as .pngs. Latitude vs. Temperature Plot#Latitude vs Temperature Plot plt.title("City Latitude vs Temperature(F)") plt.xlabel("Latitude") plt.ylabel("Temperature(F)") plt.grid() plt.scatter(clean_city_data["Lat"],clean_city_data["Max Temp(%)"],edgecolors="black") plt.savefig(f'../output_data/temp.png') plt.show()Latitude vs Temperature Scatter Plot InsightsThe plot maps 'cities' temperature against their latitude. The curve shows temperature increasing near the equator and dropping further away. The plot shows highers temperatures to right of the equator due to summer in northern hemisphere Latitude vs. Humidity Plot#Latitude vs Humidity Plot plt.title("City Latitude vs Humidity") plt.xlabel("Latitude") plt.ylabel("Humidity(%)") plt.grid() plt.scatter(clean_city_data["Lat"],clean_city_data["Humidity"],edgecolors="black") plt.savefig(f'../output_data/humidity.png') plt.show()Latitude vs Humidity Scatter Plot InsightsThe plot maps 'cities' humidity against their latitude. There is no corelation between humidity and latitude Latitude vs. Cloudiness Plot#Latitude vs Cloudiness Plot plt.title("City Latitude vs Cloudiness(%)") plt.xlabel("Latitude") plt.ylabel("Cloudiness(%)") plt.grid() plt.scatter(clean_city_data["Lat"],clean_city_data["Cloudiness(F)"],edgecolors="black") plt.savefig(f'../output_data/cloudiness.png') plt.show()Latitude vs Cloudiness Scatter Plot InsightsThe plot maps 'cities' cloudiness against their latitude. Cities selected randomlyThere is no obvious corelation between cloudiness and latitude. The plots are dense around 0% and 100% cloudiness Latitude vs. Wind Speed Plot#Latitude vs Wind Speed Plot plt.title("City Latitude vs Wind Speed(mph)") plt.xlabel("Latitude") plt.ylabel("Wind Speed(mph)") plt.grid() plt.scatter(clean_city_data["Lat"],clean_city_data["Wind Speed"],edgecolors="black") plt.savefig(f'../output_data/wind.png') plt.show()Latitude vs Wind Speed Scatter Plot InsightsThe plot maps 'cities' Wind Speed against their latitude. Cities selected randomlyThere is no obvious corelation between Windspeed and latitude. The plots are dense around 0to 20 mph windspeed Linear Regression#Create Northern and Southers Hemisphere Data Frames north_df = Cities_DF.loc[Cities_DF["Lat"] > 0] south_df = Cities_DF.loc[Cities_DF["Lat"] < 0] #create Linear regression function def linearreg(x_values,y_values,EQplotx,EQploty,y_label): (slope,intercept,rvalue,pvalue,stderr) = linregress(x_values,y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.scatter(x_values,y_values) plt.plot(x_values,regress_values,"r-") print(f'The r square if {rvalue}') print(line_eq) plt.annotate(line_eq,(EQplotx,EQploty),fontsize=10,color="red") plt.xlabel('Latitude') plt.ylabel(f'{y_label}') plt.grid()Northern Hemisphere - Max Temp vs. Latitude Linear Regression#set the data sets x_values = north_df["Lat"] y_values = north_df["Max Temp(%)"] #position equation EQplotx = 0 EQploty = -40 y_label = "Temperature (F)" plt.title(f'Northern Hemishphere Latitude vs Temp') linearreg(x_values,y_values,EQplotx,EQploty,y_label) #save file plt.savefig(f'../output_data/Northern Hemisphere Latitude vs Temp.png') plt.show()The r square if -0.8687064249124591 y = -1.3x + 93.15Southern Hemisphere - Max Temp vs. Latitude Linear Regression#set the data sets x_values = south_df["Lat"] y_values = south_df["Max Temp(%)"] #position equation EQplotx = -55 EQploty = 85 y_label = "Temperature (F)" plt.title(f'Southern Hemishphere Latitude vs Temp') linearreg(x_values,y_values,EQplotx,EQploty,y_label) #save file plt.savefig(f'../output_data/Southern Hemisphere Latitude vs Temp.png') plt.show()The r square if 0.6553140854442211 y = 0.42x + 80.83Northern/Southern vs Temp Linear Regression InsightsThe plot maps Northern and Southern 'cities' Temperature against their latitude. Cities selected randomlyThere is strong corelation between distance from the equator temperature. The closer the equator, the higher the temperature Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression#set the data sets x_values = north_df["Lat"] y_values = north_df["Humidity"] #position equation EQplotx = 50 EQploty = 15 y_label = "Humidity (%)" plt.title(f'Northern Hemishphere Latitude vs Humidity') linearreg(x_values,y_values,EQplotx,EQploty,y_label) #save file plt.savefig(f'../output_data/Northern Hemisphere Latitude vs Humidity.png') plt.show()The r square if 0.30891476307544147 y = 0.32x + 58.96Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression#set the data sets x_values = south_df["Lat"] y_values = south_df["Humidity"] #position equation EQplotx = -20 EQploty = 30 y_label = "Humidity (%)" plt.title(f'Southern Hemishphere Latitude vs Humidity') linearreg(x_values,y_values,EQplotx,EQploty,y_label) #save file plt.savefig(f'../output_data/Southern Hemisphere Latitude vs Humidity.png') plt.show()The r square if 0.39963757688948476 y = 0.43x + 85.15Northern/Southern vs Humidity Linear Regression InsightsThe plot maps Northern and Southern 'cities' Humidity against their latitude. Cities selected randomlyThere is no corelation between latitude and humidity Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression#set the data sets x_values = north_df["Lat"] y_values = north_df["Cloudiness(F)"] #position equation EQplotx = 50 EQploty = 25 y_label = "Cloudiness (F)" plt.title(f'Northern Hemishphere Latitude vs Cloudiness') linearreg(x_values,y_values,EQplotx,EQploty,y_label) #save file plt.savefig(f'../output_data/Northern Hemisphere Latitude vs Cloudiness.png') plt.show()The r square if 0.21961126688000715 y = 0.44x + 38.7Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression#set the data sets x_values = south_df["Lat"] y_values = south_df["Cloudiness(F)"] #position equation EQplotx = -55 EQploty = 60 y_label = "Cloudiness (F)" plt.title(f'Southern Hemishphere Latitude vs Cloudiness') linearreg(x_values,y_values,EQplotx,EQploty,y_label) #save file plt.savefig(f'../output_data/Southern Hemisphere Latitude vs Cloudiness.png') plt.show()The r square if 0.19783330572734323 y = 0.55x + 72.02Northern/Southern Latitude vs Cloudiness Linear Regression InsightsThe plot maps Northern and Southern 'cities' Cloudiness against their latitude. Cities selected randomlyThere is no corelation between latitude and cloudiness Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression#set the data sets x_values = north_df["Lat"] y_values = north_df["Wind Speed"] #position equation EQplotx = 5 EQploty = 25 y_label = "Cloudiness (F)" plt.title(f'Northern Hemishphere Latitude vs Wind Speed') linearreg(x_values,y_values,EQplotx,EQploty,y_label) #save file plt.savefig(f'../output_data/Northern Hemisphere Latitude vs Wind Speed.png') plt.show()The r square if 0.2169214257234315 y = 0.06x + 5.59Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression#set the data sets x_values = south_df["Lat"] y_values = south_df["Wind Speed"] #position equation EQplotx = -15 EQploty = 25 y_label = "Cloudiness (F)" plt.title(f'Southern Hemishphere Latitude vs Wind Speed') linearreg(x_values,y_values,EQplotx,EQploty,y_label) #save file plt.savefig(f'../output_data/Southern Hemisphere Latitude vs Wind Speed.png') plt.show()The r square if -0.3070553806782465 y = -0.12x + 5.11Semantic Role Labeling(SRL)copyright 2022, , MIT License.The notebook is an implementation of the Allen Institute for AI BERT-based model. [Reference usage of the Notebook](https://demo.allennlp.org/semantic-role-labeling/MjE4NjI1Ng==)The BERT-based model is an implementation of [ and , (2019), ‘Simple BERT Models for Relation Extraction and Semantic Role Labeling’]( https://arxiv.org/abs/1904.05255) Intalling Allen NLP!pip install allennlp==2.1.0 allennlp-models==2.1.0Collecting allennlp==2.1.0 Downloading allennlp-2.1.0-py3-none-any.whl (585 kB)  |████████████████████████████████| 585 kB 4.8 MB/s [?25hCollecting allennlp-models==2.1.0 Downloading allennlp_models-2.1.0-py3-none-any.whl (407 kB)  |████████████████████████████████| 407 kB 50.7 MB/s [?25hRequirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (from allennlp==2.1.0) (3.1.0) Collecting sentencepiece Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)  |████████████████████████████████| 1.2 MB 40.2 MB/s [?25hCollecting overrides==3.1.0 Downloading overrides-3.1.0.tar.gz (11 kB) Collecting jsonpickle Downloading jsonpickle-2.1.0-py2.py3-none-any.whl (38 kB) Collecting jsonnet>=0.10.0 Downloading jsonnet-0.18.0.tar.gz (592 kB)  |████████████████████████████████| 592 kB 45.9 MB/s [?25hCollecting transformers<4.4,>=4.1 Downloading transformers-4.3.3-py3-none-any.whl (1.9 MB)[...]Importing the tagging predictorfrom allennlp.predictors.predictor import Predictor import allennlp_models.tagging import json predictor = Predictor.from_path("https://storage.googleapis.com/allennlp-public-models/structured-prediction-srl-bert.2020.12.15.tar.gz")[nltk_data] Downloading package punkt to /root/nltk_data... [nltk_data] Unzipping tokenizers/punkt.zip. [nltk_data] Downloading package wordnet to /root/nltk_data... [nltk_data] Unzipping corpora/wordnet.zip.Defining two display optionsdef head(prediction): # Iterating through the json to display excerpt of the prediciton for i in prediction['verbs']: print('Verb:',i['verb'],i['description']) def full(prediction): #print the full prediction print(json.dumps(prediction, indent = 1, sort_keys=True))Sample 1: Did Bob really think he could prepare a meal for 50 people in only a few hours?prediction=predictor.predict( sentence="Did Bob really think he could prepare a meal for 50 people in only a few hours?" ) head(prediction) full(prediction){ "verbs": [ { "description": "[V: Did] Bob really think he could prepare a meal for 50 people in only a few hours ?", "tags": [ "B-V", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O" ], "verb": "Did" }, { "description": "Did [ARG0: Bob] [ARGM-ADV: really] [V: think] [ARG1: he could prepare a meal for 50 people in only a few hours] ?", "tags": [ "O", "B-ARG0", "B-ARGM-ADV", "B-V", "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "O" ], "verb": "think" }, { "description": "Did Bob really think he [V: could] [ARG1: prepare a meal for 50 people in only a few hours] ?", "tags": [ "O", "O", "O", "O", "O", "B-V", "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", [...]Sample 2: Mrs. and Mr. Tomaso went to Europe for vacation and visited Paris and first went to visit the Eiffel Tower.prediction=predictor.predict( sentence="Mrs. and Mr. Tomaso went to Europe for vacation and visited Paris and first went to visit the Eiffel Tower." ) head(prediction) full(prediction){ "verbs": [ { "description": "[ARG0: Mrs. and Mr. Tomaso] [V: went] [ARG4: to Europe] [ARGM-PRP: for vacation] and visited Paris and first went to visit the Eiffel Tower .", "tags": [ "B-ARG0", "I-ARG0", "I-ARG0", "I-ARG0", "B-V", "B-ARG4", "I-ARG4", "B-ARGM-PRP", "I-ARGM-PRP", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O" ], "verb": "went" }, { "description": "[ARG0: Mrs. and Mr. Tomaso] went to Europe for vacation and [V: visited] [ARG1: Paris] and first went to visit the Eiffel Tower .", "tags": [ "B-ARG0", "I-ARG0", "I-ARG0", "I-ARG0", "O", "O", "O", "O", "O", "O", "B-V", "B-ARG1", "O", "O", "O", "O", "O", "O", "O", "O", "O" ], "verb": "visited" }, { "description": "[ARG0: Mrs. and Mr. Tomaso] went to Europe for vacation and visited Paris and [ARGM-TMP: first] [V: w[...]Sample 3:John wanted to drink tea, Mary likes to drink coffee but Karim drank some cool water and Faiza would like to drink tomato juice.prediction=predictor.predict( sentence="John wanted to drink tea, Mary likes to drink coffee but Karim drank some cool water and Faiza would like to drink tomato juice." ) head(prediction) full(prediction){ "verbs": [ { "description": "[ARG0: John] [V: wanted] [ARG1: to drink tea] , Mary likes to drink coffee but Karim drank some cool water and Faiza would like to drink tomato juice .", "tags": [ "B-ARG0", "B-V", "B-ARG1", "I-ARG1", "I-ARG1", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O" ], "verb": "wanted" }, { "description": "[ARG0: John] wanted to [V: drink] [ARG1: tea] , Mary likes to drink coffee but Karim drank some cool water and Faiza would like to drink tomato juice .", "tags": [ "B-ARG0", "O", "O", "B-V", "B-ARG1", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O" ], "verb": "drink" }, { "description": "John wanted to drink t[...]Sample 4: Alice, whose husband went jogging every Sunday, liked to go to a dancing class in the meantime.prediction=predictor.predict( sentence="Alice, whose husband went jogging every Sunday, liked to go to a dancing class in the meantime." ) head(prediction) full(prediction){ "verbs": [ { "description": "Alice , [ARG0: whose husband] [V: went] [ARG1: jogging] [ARGM-TMP: every Sunday] , liked to go to a dancing class in the meantime .", "tags": [ "O", "O", "B-ARG0", "I-ARG0", "B-V", "B-ARG1", "B-ARGM-TMP", "I-ARGM-TMP", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O" ], "verb": "went" }, { "description": "Alice , [ARG0: whose husband] went [V: jogging] [ARGM-TMP: every Sunday] , liked to go to a dancing class in the meantime .", "tags": [ "O", "O", "B-ARG0", "I-ARG0", "O", "B-V", "B-ARGM-TMP", "I-ARGM-TMP", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O" ], "verb": "jogging" }, { "description": "[ARG0: Alice , whose husband went jogging every Sunday] , [V: liked] [ARG1: to go to a dancing class in the meantime] .", "tags": [ "B-ARG0"[...]Sample 5: The bright sun, the blue sky, the warm sand, the palm trees, everything round off.prediction=predictor.predict( sentence="The bright sun, the blue sky, the warm sand, the palm trees, everything round off." ) head(prediction) full(prediction) prediction=predictor.predict( sentence="The bright sun, the blue sky, the warm sand, the palm trees, everything rounds off." ) head(prediction) full(prediction){ "verbs": [ { "description": "[ARG1: The bright sun , the blue sky , the warm sand , the palm trees , everything] [V: rounds] [ARGM-PRD: off] .", "tags": [ "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "B-V", "B-ARGM-PRD", "O" ], "verb": "rounds" } ], "words": [ "The", "bright", "sun", ",", "the", "blue", "sky", ",", "the", "warm", "sand", ",", "the", "palm", "trees", ",", "everything", "rounds", "off", "." ] }Sample 6 Ice pucksprediction=predictor.predict( sentence="Now, ice pucks guys!" ) head(prediction) full(prediction) prediction=predictor.predict( sentence="Though the customer seemed unhappy, she was, in fact satisfied but thinking of something else at the time, which gave a false impression?" ) head(prediction)Verb: seemed Though the customer [V: seemed] [ARG1: unhappy] , she was , in fact satisfied but thinking of something else at the time , which gave a false impression ? Verb: was [ARGM-ADV: Though the customer seemed unhappy] , [ARG1: she] [V: was] , [ARGM-DIS: in fact] [ARG2: satisfied but thinking of something else at the time , which gave a false impression] ? Verb: thinking [ARGM-ADV: Though the customer seemed unhappy] , [ARG0: she] was , in fact satisfied but [V: thinking] [ARG1: of something else] [ARGM-TMP: at the time] , [ARGM-ADV: which gave a false impression] ? Verb: gave Though the customer seemed unhappy , she was , in fact satisfied but thinking of [ARG0: something else] at the time , [R-ARG0: which] [V: gave] [ARG1: a false impression] ?This is a jupyter notebook demonstrating usage of [garage](https://github.com/rlworkgroup/garage) in a jupyter notebook.In particular, it demonstrates the example `trpo_gym_tf_cartpole.py` file already available in [garage/examples/tf/](https://github.com/rlworkgroup/garage/blob/master/examples/tf/) Install pre-requisites%%shell echo "abcd" > mujoco_fake_key git clone --depth 1 https://github.com/rlworkgroup/garage/ cd garage bash scripts/setup_colab.sh --mjkey ../mujoco_fake_key --no-modify-bashrc > /dev/null raise Exception("Please restart your runtime so that the installed dependencies for 'garage' can be loaded, and then resume running the notebook")--------- Prepare for the training# The contents of this cell and the one after are mostly copied from garage/examples/... # Note that these need to be run twice if for the first time on a colab.research.google.com instance # 1st time is to create the "personal config from template" # 2nd time is the charm from garage.np.baselines import LinearFeatureBaseline from garage.envs import normalize # from garage.envs.box2d import CartpoleEnv from garage.experiment import run_experiment from garage.tf.algos import TRPO from garage.tf.envs import TfEnv #from garage.tf.policies import GaussianMLPPolicy from garage.tf.policies import CategoricalMLPPolicy import gym from garage.experiment import LocalRunner from garage.logger import logger, StdOutput # garage version of CartPole environment, has Discrete action space instead of Box #env = TfEnv(normalize(CartpoleEnv())) #policy = GaussianMLPPolicy( # name="policy", env_spec=env.spec, hidden_sizes=(32, 32)) # gym version of CartPole. Check note above # env = TfEnv(normalize(gym.make("CartPole-v0"))) # garage updated method of getting env env = TfEnv(env_name='CartPole-v1') policy = CategoricalMLPPolicy( name="policy", env_spec=env.spec, hidden_sizes=(32, 32)) # create baseline baseline = LinearFeatureBaseline(env_spec=env.spec) # specify an algorithm algo = TRPO( env_spec=env.spec, policy=policy, baseline=baseline, # Use these settings for garage version of env # max_path_length=100, # n_itr=100, # Use these for gym version max_path_length=200, n_itr=20, discount=0.99, max_kl_step=0.01 )Train the algorithm# start a tensorflow session so that we can keep it open after training and use the trained network to see it performing import tensorflow as tf sess = tf.InteractiveSession() # initialize sess.run(tf.compat.v1.global_variables_initializer()) # log to stdout logger.add_output(StdOutput()) # train the algo runner = LocalRunner() runner.setup(algo=algo, env=env) # use n_epochs = 100 for practical example, n_epochs = 10 for quick demo, n_epochs = 1 for smoke testing runner.train(n_epochs=10, batch_size=10000, plot=False)2019-04-01 05:46:28 | epoch #0 | Obtaining samples... 2019-04-01 05:46:28 | epoch #0 | Obtaining samples for iteration 0...Visualize a video of the algorithm playing%%shell # Prepare display for seeing a video of the policy in action in the jupyter notebook # Note that this doesn't require a runtime restart # https://stackoverflow.com/a/51183488/4126114 apt-get install python-opengl ffmpeg xvfb pip install pyvirtualdisplay # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start() # bugfix? # Set an "id" field since missing for some reason (uncovered in monitor wrapper below) env.spec.id = 1 # wrap the gym environment for recording a video of the policy performance # https://kyso.io/eoin/openai-gym-jupyter?utm_campaign=News&utm_medium=Community&utm_source=DataCamp.com import gym from gym import wrappers env = wrappers.Monitor(env, "./gym-results", force=True) obs = env.reset() for i in range(1000): #action = env.action_space.sample() action, _ = policy.get_action(obs) obs, reward, done, info = env.step(action) if done: break print("done at step %i"%i) env.close() # Display the video in the jupyter notebook # Click the play button below to watch the video import io import base64 from IPython.display import HTML video = io.open('./gym-results/openaigym.video.%s.video000000.mp4' % env.file_infix, 'r+b').read() encoded = base64.b64encode(video) vid_ascii = encoded.decode('ascii') HTML(data=''' ''' .format(vid_ascii))Jansen Linkage ![3-jansen-linkage.png](3-jansen-linkage.png) Modelimport math import numpy as np Vec = lambda x,y: np.array((x, y)) def intersect(p0, r0, p1, r1): """ Bourke's algorithm (http://paulbourke.net/geometry/circlesphere) to find intersect points of circle0 (p0, r0) and circle1 (p1, r1) """ p10 = p1 - p0 d = np.linalg.norm(p10) if (d > r0 + r1) or (d < abs(r1 - r0)) or ((d == 0) and (r0 == r1)): return None a = (r0**2 - r1**2 + d**2) / (2 * d) h = np.sqrt(r0**2 - a**2) p2 = p0 + (a / d) * p10 r = Vec(-p10[1], p10[0]) * (h / d) return (p2 - r, p2 + r) def link_loc(name, joints, links): p1_index, p2_index = name.split("_")[1:] p1 = joints[int(p1_index)] p2 = joints[int(p2_index)] a = math.degrees(math.atan2(p1[1] - p2[1], p1[0] - p2[0])) return (np.array((links[name]["lev"], *p1)), a) def linkage(alpha, x, y, links): """For a given angle return the 2d location of each joint""" p0 = Vec(0, 0) p1 = Vec(x, y) p2 = p1 + links["link_1_2"]["len"] * Vec(np.cos(np.deg2rad(alpha)), np.sin(np.deg2rad(alpha))) p3 = intersect(p0, links["link_0_3"]["len"], p2, links["link_2_3"]["len"])[1] p4 = intersect(p0, links["link_4_0"]["len"], p3, links["link_3_4"]["len"])[1] p5 = intersect(p0, links["link_0_5"]["len"], p2, links["link_2_5"]["len"])[0] p6 = intersect(p4, links["link_4_6"]["len"], p5, links["link_5_6"]["len"])[0] p7 = intersect(p5, links["link_7_5"]["len"], p6, links["link_6_7"]["len"])[1] return (p0, p1, p2, p3, p4, p5, p6, p7) height = 2 x = 38.0 y = 7.8 links = {} links["link_1_2"] = {"len": 15.0, "lev": 3 * height, "col": "Blue4"} links["link_2_3"] = {"len": 50.0, "lev": 4 * height, "col": "DarkGreen"} links["link_3_4"] = {"len": 55.8, "lev": 3 * height, "col": "Red"} links["link_4_0"] = {"len": 40.1, "lev": 1 * height, "col": "Red"} links["link_0_3"] = {"len": 41.5, "lev": 2 * height, "col": "Red"} links["link_4_6"] = {"len": 39.4, "lev": 2 * height, "col": "Purple"} links["link_0_5"] = {"len": 39.3, "lev": 3 * height, "col": "OliveDrab"} links["link_2_5"] = {"len": 61.9, "lev": 1 * height, "col": "Orange"} links["link_5_6"] = {"len": 36.7, "lev": 0 * height, "col": "RoyalBlue"} links["link_6_7"] = {"len": 65.7, "lev": 1 * height, "col": "RoyalBlue"} links["link_7_5"] = {"len": 49.0, "lev": 2 * height, "col": "RoyalBlue"} link_list = list(links.keys())Visualisationimport matplotlib.pyplot as plt import matplotlib.gridspec as gridspec %matplotlib inline def c(a,b): return links[f"link_{a}_{b}"]["col"].replace("Blue4", "blue") def plot(ax, joints): p0, p1, p2, p3, p4, p5, p6, p7 = joints lines = ( (p1, p2, c(1,2)), (p2, p5, c(2,5)), (p2, p3, c(2,3)), (p0, p3, c(0,3)), (p4, p0, c(4,0)), (p3, p4, c(3,4)), (p4, p6, c(4,6)), (p0, p5, c(0,5)), (p5, p6, c(5,6)), (p7, p5, c(7,5)), (p6, p7, c(6,7)) ) ax.scatter((p0[0], p1[0]), (p0[1], p1[1])) for a, b, col in lines: ax.plot((a[0], b[0]), (a[1], b[1]), color=col) fig = plt.figure(constrained_layout=True) fig.set_size_inches(15, 5) spec2 = gridspec.GridSpec(ncols=6, nrows=2, figure=fig) for i, alpha in enumerate(range(0,360, 30)): joints = linkage(alpha, x, y, links) ax = fig.add_subplot(spec2[i//6, i%6]) ax.set_xlim(-70, 60) ax.set_ylim(-90, 50) plot(ax, joints)Assembly Partsdef make_link(length, width=2, height=1): link = ( cq.Workplane("YZ").rect(length + 4, width + 2) .pushPoints(((-length/2, 0), (length/2, 0))).circle(1) .extrude(height).edges("|X").fillet(1.99) ) link.faces(">X").wires(cq.NearestToPointSelector((0, length/2))).tag("mate") return link parts = {name: make_link(links[name]["len"], height=(2 * height if name == "1.2" else height)) for name in link_list}Define Assemblydef create_leg(x, y): L = lambda *args: cq.Location(cq.Vector(*args)) C = lambda *args: cq.Color(*args) leg = MAssembly(cq.Workplane("YZ").polyline([(0,0), (x, 0),(x,y)]), name="base", color=C("Gray")) for i, name in enumerate(link_list): leg.add(parts[name], name=name, color=C(links[name]["col"]), loc=L(0, 0, i*10 - 50)) return leg leg = create_leg(x, y) show(leg, axes=False)Define Matesleg = create_leg(x, y) for name in link_list: leg.mate(f"{name}?mate", name=name, origin=True) show(leg, render_mates=True, axes=False)Relocate and assemblerelocate(leg) show(leg, render_mates=True) alpha = 0 joints = linkage(alpha, x, y, links) for name in link_list: v, a = link_loc(name, joints, links) abs_loc = cq.Location(cq.Workplane("YZ").plane.rotated((0,0,a)), cq.Vector(*v)) # calculate the absolute location ... loc = abs_loc * leg.mates[name].mate.loc.inverse # ... and center the mate of the link first leg.assemble(name, loc) d = show(leg, render_mates=True)Animationanimation = Animation(d.root_group) alphas = {name: [] for name in link_list} positions = {name: [] for name in link_list} for alpha in range(0, -375, -15): for name in link_list: p, a = link_loc(name, linkage(alpha, x, y, links), links) alphas[name].append(a) positions[name].append(p) time = np.linspace(0, 4, 25) for name in link_list: animation.add_track(f"base/{name}", "t", time, [p - positions[name][0] for p in positions[name]]) animation.add_track(f"base/{name}", "rz", time, [a - alphas[name][0] for a in alphas[name]]) animation.animate(speed=2)_prepared by _# A jupyter notebook is composed by one or more cells. # This notebook is prepared for jupyter notebooks, and the menu items and command buttons may differ in jupyter lab. # There are two main cells: Code and Markdown. # A code cell is used to write and execute your codes. # A markdown cell is used to write text descriptions, notes, formulas or include graphics and images. # On a markdown cell, you can format your content by using Markdown, HTML, or LaTeX code. # During our tutorial, you are expected to write only python codes. # Interested readers may also use markdown cells, but it is not necesary to complete our tutorial. # # We explain basic usage of cells in Jupyter notebooks here # # This is the first cell in this notebook. # You can write Python code here, # and then EXECUTE/RUN it by # 1) pressing CTRL+ENTER or SHIFT+ENTER # 2) clicking "Run" on the menu # here are few lines of python code print("hello world") str="*" for i in range(10): print(str) str+="*" # after executing this cell, the outcomes will immedeately appear after this cell # you can change the range above and re-run this cell # This is the second cell in this notebook. # # By using menu item "Insert", you can add a new cell before or after the active cell. # When a cell is selected, you may delete it by using menu item "Edit". # # As you may notice, there are other editing options under "Edit", # for example, copy/cut-paste cells and split-merge cells. #This is the third cell.This is a markdown type cell.The type of any cell is shown on the toolbar under the menu bar (right-side). You can change the type of a cell from this pulldown menuYou can write Markdown, HTML, and LaTex code on this cell.By double clicking on this cell, you can see the code of this cell. By execucting this cell, you see the result content. This is the fourth cell.This is also a markdown cell.LaTex is used to show mathematical expressions, formulas, etc. For example, $ x^2 + y ^ 2 = \frac{4}{9} $, $ \sum_{i=1}^n (i+2)^{3} $, or $ \left( \begin{array}{rr} 1 & 0 & -1 \\ 2 & -2 & 0 \\ 3 & -1 & -2 \end{array} \right) $.By double clicking on this cell, you can see the code. By executing/running this cell, you can see the result content. Tips Showing line numbers: View $\rightarrow$ Toggle Line NumbersCommand mode: CTRL+SHIFT+P Magic CommandsHere we list a few built-in magic commands for Jupyter notebooks that we will use during this tutorial. These commands can be executed in code-type cells. Write the content of a code cell into an external file: %%writefile FILENAME.pyThis command should be placed in the first line of cell, and then the cell should be executed. Example:%%writefile first.py print("hello world") str="*" for i in range(5): print(str) str+="*"Execute an external script without loading its content into the cell: %run FILENAME.pyExample:%run first.pyLoad an external script into a cell: %load FILENAME.pyOnce this command is executed, the content of cell is replaced with the content of file. (The previous content is deleted.)Besides, this command is placed to the first line, and then commented out.Example:%load first.pyLending club loan Data Analysis and Predictions# Loading the required Libraries import warnings warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn from statistics import mode from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from sklearn.impute import SimpleImputer from sklearn.preprocessing import RobustScaler, FunctionTransformer from sklearn.ensemble import RandomForestClassifier, VotingClassifier, GradientBoostingClassifier from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import (roc_auc_score, confusion_matrix, accuracy_score, roc_curve, precision_recall_curve, f1_score,auc) from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline from sklearn.metrics import confusion_matrix from imblearn.pipeline import make_pipeline as imb_make_pipeline from imblearn.over_sampling import RandomOverSampler, SMOTE from imblearn.under_sampling import RandomUnderSampler from imblearn.ensemble import BalancedBaggingClassifier, EasyEnsemble import xgboost as xgb from scipy.stats import boxcox import math from google.colab import drive import os5. Model Evaluation And ValidationMachine Learning Algorithm FittingIn this part, now that we have cleaned the dataset and encoded the variable with dummy encoding to process categorical data, let's dive in the first machine learning algorithm that we will try to use on the data set. But before, let's see the result of our work on the dataset.1. Working on entire Data 2. Train Test Split the Data3. Using Models like Logistic Regression, SVM, KNN, Neural Networks for Model Validation.drive.mount('/content/drive', force_remount=True) %cd /content/drive/My\ Drive/ os.chdir('/content/drive/My Drive/Colab Notebooks/Lending Club') path = 'data' loan_data = pd.read_csv(path+'/loan_clean_data.csv', low_memory=False) loan_data.head() loan_data.info() RangeIndex: 119819 entries, 0 to 119818 Data columns (total 95 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 int_rate 119819 non-null float64 1 emp_length 119819 non-null int64 2 annual_inc 119819 non-null float64 3 loan_status 119819 non-null int64 4 dti 119819 non-null float64 5 delinq_2yrs 119819 non-null float64 6 inq_last_6mths 119819 non-null float64 7 pub_rec 119819 non-null float64 8 revol_bal 119819 non-null int64 9 revol_util 119819 non-null float64 10 total_acc 119819 non-null float64 11 total_rec_int[...]5.1 Evaluation Benchmark And Error MetricsWe talked about the need of a conservative evaluation of the default rate. We must also keep in mind that there is a strong imbalance with the target category of loan repayment in the dataset, because about 6 out of 7 loans are repaid. Meaning that we could lend money all the time (always predicting that the borrower would repay) and be right about 85.71% of the time that the loan would be repaid, but that would mean that the model would not be profitable. Say we lend 1000 at 10% interest, we would expect a return of 100 on each loan. But if we run the experiment 7 times, we would earn 600 (6 x 100) and lose 1000 (the defaulter), we are left with a 400 loss. Hardly a profitable enterprise. The benchmark needs to encompass the weight of the defaulter and the optimization between the true positive rate (good borrowers) and the false positive rate (bad borrowers). This implies that we need to ensure a viable machine learning model and predict a higher percentage of potential defaulters in order to avoid lending to them. The benchmark must beat the 85.71% average loan repayment. Although “money is left on the table”, a conservative investor would prefer a steady return on her investment than suffer the 1 in 7 loss.In the markdown cell above, we talked about the class imbalance and that it could lead to an unprofitable model. If we were to lend to all borrowers, we would most likely lose 1 in 7 times and take a larger loss in that one time than in the 6 others' interest. In the code below, we check the true positive and false positive rate.predictions = pd.Series(np.ones(loan_data.shape[0])) false_positive_filter = (predictions == 1) & (loan_data['loan_status'] == 0) false_positive = len(predictions[false_positive_filter]) true_positive_filter = (predictions == 1) & (loan_data['loan_status'] == 1) true_positive = len(predictions[true_positive_filter]) false_negative_filter = (predictions == 0) & (loan_data['loan_status'] == 1) false_negative = len(predictions[false_negative_filter]) true_negative_filter = (predictions == 0) & (loan_data['loan_status'] == 0) true_negative = len(predictions[true_negative_filter]) true_positive_rate = true_positive / (true_positive + false_negative) false_positive_rate = false_positive / (false_positive + true_negative) print(float(true_positive_rate)) print(float(false_positive_rate)) accuracy = float(true_positive + true_negative)/float(true_positive + false_positive+ false_negative + true_negative) accuracy precision = float(true_positive)/float(true_positive + false_positive) precision # Data to plot labels = 'False Positive', 'True Positive' sizes = [1-precision, precision] colors = ['lightcoral', 'lightblue'] # Plot plt.figure(figsize=(4,4)) plt.pie(sizes, colors=colors, autopct='%1.2f%%', shadow=False, startangle=0) plt.title('Precision Of Lending', fontsize=12) plt.legend(labels, loc='lower left', fontsize=10) plt.axis('equal') plt.show()5.2 Predictions on the Entire Dataimport time start_time =time.time() from sklearn.linear_model import LogisticRegression lr = LogisticRegression() cols = loan_data.columns train_cols = cols.drop('loan_status') features = loan_data[train_cols] target = loan_data['loan_status'] lr.fit(features, target) predictions = lr.predict(features) score_lr_complete = lr.score(features,target) print('Model Accuracy:%.4f' %(score_lr_complete)) end_time = time.time() print('Total time taken: %s seconds'%(end_time - start_time)) predictions = pd.Series(predictions) false_positive_filter = (predictions == 1) & (loan_data['loan_status'] == 0) false_positive = len(predictions[false_positive_filter]) true_positive_filter = (predictions == 1) & (loan_data['loan_status'] == 1) true_positive = len(predictions[true_positive_filter]) false_negative_filter = (predictions == 0) & (loan_data['loan_status'] == 1) false_negative = len(predictions[false_negative_filter]) true_negative_filter = (predictions == 0) & (loan_data['loan_status'] == 0) true_negative = len(predictions[true_negative_filter]) true_positive_rate = float(true_positive)/float((true_positive + false_negative)) false_positive_rate = float(false_positive)/float((false_positive + true_negative)) print(float(true_positive_rate)) print(float(false_positive_rate)) precision = float(true_positive)/float(true_positive + false_positive) precision # Data to plot labels = 'False Positive', 'True Positive' sizes = [1-precision, precision] colors = ['lightcoral', 'lightblue'] # Plot plt.figure(figsize=(4,4)) plt.pie(sizes, colors=colors, autopct='%1.2f%%', shadow=False, startangle=0) plt.title('Precision With Simple Logistic Regression', fontsize=12) plt.legend(labels, loc='lower left', fontsize=10) plt.axis('equal') plt.show()5.3 Train/ Test Split Data Setloan_data.shape traindata, testdata = train_test_split(loan_data, stratify=loan_data['loan_status'],test_size=.3, random_state=17, shuffle=True) testdata.reset_index(drop=True, inplace=True) traindata.reset_index(drop=True, inplace=True) sc = StandardScaler() y_default = traindata[traindata['loan_status'] == 0] n_paid = traindata[traindata['loan_status'] == 1].sample(n=len(y_default), random_state=17) ##chosing equal amount of 1's data = y_default.append(n_paid) Xbal = data.drop('loan_status', axis=1) ybal = data['loan_status'] Xte = testdata.drop('loan_status', axis=1) yte = testdata['loan_status'] Xbal.shape Xte.shape numerical = Xbal.columns[(Xbal.dtypes == 'float64') | (Xbal.dtypes == 'int64')].tolist() Xbal[numerical] = sc.fit_transform(Xbal[numerical]) numerical = Xte.columns[(Xte.dtypes == 'float64') | (Xte.dtypes == 'int64')].tolist() Xte[numerical] = sc.fit_transform(Xte[numerical])Compariosn between Random Forest And Logistic Regression Using 3 CV Score Approachfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB s1 = time.time() models = {'RF': RandomForestClassifier(n_estimators=500), 'LR': LogisticRegression(C=1)} balset = {} for i in models.keys(): scores = cross_val_score(models[i], Xbal - np.min(Xbal) + 1, ybal, scoring='roc_auc', cv=3) balset[i] = scores print(i, scores, np.mean(scores)) e1 = time.time() print('Total time taken: %s seconds' %(e1 - s1))RF [0.97568959 0.97659018 0.97780398] 0.9766945798263308 LR [0.96004765 0.96125735 0.96060553] 0.9606368433774736 Total time taken: 138.2263994216919 seconds6. Implementing Indiviual Modelsrf = RandomForestClassifier(n_estimators=500, min_samples_leaf=500, class_weight='balanced',random_state=3) lr = LogisticRegression(class_weight='balanced', random_state=3, penalty='l2', C=1) selection_param = 0.00036.1 Logistic Regressions2 = time.time() lr.fit(Xbal, ybal) predictions_lr = lr.predict(Xte) predictions_lr = pd.Series(predictions_lr) score_lr = lr.score(Xte,yte) print('Model Accuracy:%.4f' %(score_lr)) e2 = time.time() print('Total time taken: %s seconds' %(e2 -s2)) fp = len(predictions_lr[(predictions_lr == 1) & (yte.values==0)]) tp = len(predictions_lr[(predictions_lr == 1) & (yte.values==1)]) fn = len(predictions_lr[(predictions_lr == 0) & (yte.values==1)]) tn = len(predictions_lr[(predictions_lr == 0) & (yte.values==0)]) tpr = tp / (tp + fn) fpr = fp / (fp + tn) #print(lr.coef_) print('TPR: ',tpr) print('FPR: ',fpr) fprpoints, tprpoints, thresholds = roc_curve(yte, predictions_lr) print('CONFUSION MATRIX:') cm = confusion_matrix(yte,predictions_lr) print('TN: ',cm[0,0], ' FP: ',cm[0,1]) print('FN: ',cm[1,0], ' TP: ',cm[1,1]) plt.figure() lw = 2 plt.plot(fprpoints, tprpoints, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic - Logistic Regression') plt.legend(loc="lower right") plt.show() cm = confusion_matrix(yte, predictions_lr).T cm = cm.astype('float')/cm.sum(axis=0) ax = sns.heatmap(cm, annot=True, cmap='magma'); ax.set_xlabel('Actual Value') ax.set_ylabel("Our Model's Prediction") ax.axis('equal')6.2 Random Forests3 = time.time() rf.fit(Xbal, ybal) predictions_rf = rf.predict(Xte) predictions_rf = pd.Series(predictions_rf) score_rf = rf.score(Xte,yte) print('Model Accuracy:%.4f' %(score_rf)) e3 = time.time() print('Total time taken: %s seconds' %(e3 -s3)) fp = len(predictions_rf[(predictions_rf == 1) & (yte.values==0)]) tp = len(predictions_rf[(predictions_rf == 1) & (yte.values==1)]) fn = len(predictions_rf[(predictions_rf == 0) & (yte.values==1)]) tn = len(predictions_rf[(predictions_rf == 0) & (yte.values==0)]) tpr = tp / (tp + fn) fpr = fp / (fp + tn) #print(lr.coef_) print('TPR: ',tpr) print('FPR: ',fpr) fprpoints, tprpoints, thresholds = roc_curve(yte, predictions_rf) print('CONFUSION MATRIX:') cm1 = confusion_matrix(yte,predictions_rf) print('TN: ',cm1[0,0], ' FP: ',cm1[0,1]) print('FN: ',cm1[1,0], ' TP: ',cm1[1,1]) plt.figure() lw = 2 plt.plot(fprpoints, tprpoints, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic - Random Forest') plt.legend(loc="lower right") plt.show() cm1 = confusion_matrix(yte, predictions_rf).T cm1 = cm1.astype('float')/cm1.sum(axis=0) ax = sns.heatmap(cm1, annot=True, cmap='magma'); ax.set_xlabel('Actual Value') ax.set_ylabel("Our Model's Prediction") ax.axis('equal')6.3. K Nearest Neighbors (KNN)s4 = time.time() data_knn = KNeighborsClassifier(n_neighbors = 5, metric='minkowski',n_jobs =-1, algorithm = 'auto') data_knn.fit(Xbal, ybal) predictions_knn = data_knn.predict(Xte) predictions_knn = pd.Series(predictions_knn) score_knn = data_knn.score(Xte,yte) print('Model Accuracy:%.4f' %(score_knn)) e4 = time.time() print('Total time taken: %s seconds' %(e4 -s4)) fp = len(predictions_knn[(predictions_knn == 1) & (yte.values==0)]) tp = len(predictions_knn[(predictions_knn == 1) & (yte.values==1)]) fn = len(predictions_knn[(predictions_knn == 0) & (yte.values==1)]) tn = len(predictions_knn[(predictions_knn == 0) & (yte.values==0)]) tpr = tp / (tp + fn) fpr = fp / (fp + tn) #print(lr.coef_) print('TPR: ',tpr) print('FPR: ',fpr) fprpoints, tprpoints, thresholds = roc_curve(yte, predictions_knn) print('CONFUSION MATRIX:') cm3 = confusion_matrix(yte,predictions_knn) print('TN: ',cm3[0,0], ' FP: ',cm3[0,1]) print('FN: ',cm3[1,0], ' TP: ',cm3[1,1]) plt.figure() lw = 2 plt.plot(fprpoints, tprpoints, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic - KNN ') plt.legend(loc="lower right") plt.show() cm3 = confusion_matrix(yte, predictions_knn).T cm3 = cm3.astype('float')/cm3.sum(axis=0) ax = sns.heatmap(cm3, annot=True, cmap='magma'); ax.set_xlabel('Actual Value') ax.set_ylabel("Our Model's Prediction") ax.axis('equal')6.4 SVMs5 = time.time() Svm = sklearn.svm.SVC() Svm.fit(Xbal, ybal) predictions_svm = Svm.predict(Xte) predictions_svm = pd.Series(predictions_svm) score_Svm = Svm.score(Xte,yte) print('Model Accuracy:%.4f' %(score_Svm)) e5 = time.time() print('Total time taken: %s seconds' %(e5 -s5)) fp = len(predictions_svm[(predictions_svm == 1) & (yte.values==0)]) tp = len(predictions_svm[(predictions_svm == 1) & (yte.values==1)]) fn = len(predictions_svm[(predictions_svm == 0) & (yte.values==1)]) tn = len(predictions_svm[(predictions_svm == 0) & (yte.values==0)]) tpr = tp / (tp + fn) fpr = fp / (fp + tn) #print(lr.coef_) print('TPR: ',tpr) print('FPR: ',fpr) fprpoints, tprpoints, thresholds = roc_curve(yte, predictions_svm) print('CONFUSION MATRIX:') cm2 = confusion_matrix(yte,predictions_svm) print('TN: ',cm2[0,0], ' FP: ',cm2[0,1]) print('FN: ',cm2[1,0], ' TP: ',cm2[1,1]) plt.figure() lw = 2 plt.plot(fprpoints, tprpoints, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic - SVM ') plt.legend(loc="lower right") plt.show() cm2 = confusion_matrix(yte, predictions_svm).T cm2 = cm2.astype('float')/cm2.sum(axis=0) ax = sns.heatmap(cm2, annot=True, cmap='magma'); ax.set_xlabel('Actual Value') ax.set_ylabel("Our Model's Prediction") ax.axis('equal')6.5 XGB Classifierfrom xgboost import XGBClassifier s7 = time.time() model = XGBClassifier() model.fit(Xbal, ybal) predictions_xgb = model.predict(Xte) prediction_xgb = [round(value) for value in predictions_xgb] prediction_xgb = np.array(prediction_xgb) accuracy = accuracy_score(yte, prediction_xgb) print("Model Accuracy: %.4f" % (accuracy)) e7 = time.time() print('Total time taken: %s seconds' %(e7 -s7)) fp = len(prediction_xgb[(prediction_xgb == 1) & (yte.values==0)]) tp = len(prediction_xgb[(prediction_xgb == 1) & (yte.values==1)]) fn = len(prediction_xgb[(prediction_xgb == 0) & (yte.values==1)]) tn = len(prediction_xgb[(prediction_xgb == 0) & (yte.values==0)]) tpr = tp / (tp + fn) fpr = fp / (fp + tn) #print(lr.coef_) print('TPR: ',tpr) print('FPR: ',fpr) fprpoints, tprpoints, thresholds = roc_curve(yte, prediction_xgb) print('CONFUSION MATRIX:') cm4 = confusion_matrix(yte,predictions_xgb) print('TN: ',cm4[0,0], ' FP: ',cm4[0,1]) print('FN: ',cm4[1,0], ' TP: ',cm4[1,1]) plt.figure() lw = 2 plt.plot(fprpoints, tprpoints, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic - XG Boost ') plt.legend(loc="lower right") plt.show() cm4 = confusion_matrix(yte, prediction_xgb).T cm4 = cm4.astype('float')/cm4.sum(axis=0) ax = sns.heatmap(cm4, annot=True, cmap='magma'); ax.set_xlabel('Actual Value') ax.set_ylabel("Our Model's Prediction") ax.axis('equal')6.6 Neural Network 6.6.1 Converting the matrix Features into tensorflow variable Shape. Also defing the parameters requried for the model.import tensorflow.compat.v1 as tf tf.disable_v2_behavior() Xbal.shape ybal.shape Ybal= ybal Ybal =Ybal.values.flatten() Ybal =np.reshape(Ybal,(38122,1)) Ybal.shape Xte.shape yte.shape Yte =yte.values.flatten() Yte =np.reshape(Yte,(35946,1)) Yte.shape learning_rate = 0.001 training_epochs = 1000 batch_size = 128 n_classes = Ybal.shape[1] n_samples = 74068 n_inputs = Xte.shape[1] n_classes n_samples n_inputs n_hidden_1 = 15 n_hidden_2 = 156.6.2 Creating the Modeldef multilayer_network(X,weights,biases,keep_prob): ''' X: Placeholder for data inputs weights: dictionary of weights biases: dictionary of bias values ''' #first hidden layer with sigmoid activation # sigmoid(X*W+b) with tf.name_scope('layer1'): layer_1 = tf.add(tf.matmul(X,weights['h1']),biases['h1']) layer_1 = tf.nn.sigmoid(layer_1) layer_1 = tf.nn.dropout(layer_1,keep_prob) #second hidden layer with tf.name_scope('layer2'): layer_2 = tf.add(tf.matmul(layer_1,weights['h2']),biases['h2']) layer_2 = tf.nn.sigmoid(layer_2) layer_2 = tf.nn.dropout(layer_2,keep_prob) #output layer with tf.name_scope('output_layer'): out_layer = tf.matmul(layer_2,weights['out']) + biases['out'] tf.summary.histogram("output_for_the_layer",out_layer) return out_layer # defining the weights and biases dictionary with tf.name_scope("weights"): weights = { 'h1': tf.Variable(tf.random_normal([n_inputs,n_hidden_1]),name='W_input'), 'h2': tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2]),name='W_layer1'), 'out': tf.Variable(tf.random_normal([n_hidden_2,n_classes]),name='W_layer2') } tf.summary.histogram("weights1",weights['h1']) tf.summary.histogram("weights2",weights['h2']) tf.summary.histogram("weights_out",weights['out']) with tf.name_scope("biases"): biases = { 'h1': tf.Variable(tf.random_normal([n_hidden_1]),name='b_input'), 'h2': tf.Variable(tf.random_normal([n_hidden_2]),name='b_layer1'), 'out': tf.Variable(tf.random_normal([n_classes]),name='b_layer2') } tf.summary.histogram("bias_input",biases['h1']) tf.summary.histogram("bias_layer1",biases['h2']) tf.summary.histogram("bias_layer2",biases['out']) keep_prob = tf.placeholder("float") with tf.name_scope("inputs"): X = tf.placeholder(tf.float32,[None,n_inputs],name='x_inputs') Y = tf.placeholder(tf.float32,[None,n_classes],name='y_inputs') #obtaining predictions of the model predictions = multilayer_network(X,weights,biases,keep_prob) #cost function(loss) and optimizer function with tf.name_scope('loss'): cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=predictions,labels=Y)) tf.summary.scalar('loss',cost) with tf.name_scope('train'): optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) predicted = tf.nn.sigmoid(predictions) correct_prediction = tf.equal(tf.round(predicted), Y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar("Accuracy",accuracy)6.6.3 Evaluating the Model#initializing all variables init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) s6 = time.time() acc_list= [] loss_list = [] pred = [] merged = tf.summary.merge_all() writer = tf.summary.FileWriter("logs/",sess.graph) #for loop for epoch in range(training_epochs): avg_cost = 0.0 total_batch = int(len(Xbal)/ batch_size) x_batches = np.array_split(Xbal, total_batch) y_batches = np.array_split(Ybal, total_batch) for i in range(total_batch): batch_x, batch_y = x_batches[i], y_batches[i] _,c,p = sess.run([optimizer,cost,predictions], feed_dict={ X: batch_x, Y: batch_y, keep_prob: 0.8 }) avg_cost += c / total_batch print("Epoch:", '%04d' % (epoch+1), "Loss=", "{:.9f}".format(avg_cost)) acc,res,predicts = sess.run([accuracy,merged,predictions], feed_dict={X: Xte, Y: Yte,keep_prob:1.0}) writer.add_summary(res,epoch) print('Accuracy:', acc) acc_list.append(acc) loss_list.append(avg_cost) pred.append(predicts) print ('---------------') e6 = time.time() print("Model has completed {} epochs of training".format(training_epochs)) print('Total Time Taken: %s' %(e6 - s6))Epoch: 0001 Loss= 0.912925831 Accuracy: 0.73654926 --------------- Epoch: 0002 Loss= 0.874621128 Accuracy: 0.76943195 --------------- Epoch: 0003 Loss= 0.848194211 Accuracy: 0.7867635 --------------- Epoch: 0004 Loss= 0.801817735 Accuracy: 0.7980304 --------------- Epoch: 0005 Loss= 0.755378474 Accuracy: 0.80768377 --------------- Epoch: 0006 Loss= 0.717488711 Accuracy: 0.81533414 --------------- Epoch: 0007 Loss= 0.682190257 Accuracy: 0.8217048 --------------- Epoch: 0008 Loss= 0.648587555 Accuracy: 0.82782507 --------------- Epoch: 0009 Loss= 0.617160872 Accuracy: 0.8338619 --------------- Epoch: 0010 Loss= 0.578967276 Accuracy: 0.8412897 --------------- Epoch: 0011 Loss= 0.548006670 Accuracy: 0.8454626 --------------- Epoch: 0012 Loss= 0.516618253 Accuracy: 0.8496634 --------------- Epoch: 0013 Loss= 0.489548435 Accuracy: 0.8538363 --------------- Epoch: 0014 Loss= 0.465203773 Accuracy: 0.85647917 --------------- Epoch: 0015 Loss= 0.442583144 Accuracy: 0.856507 --------------- Epoch[...]6.6.4 Plotting both the Loss and Accuracy Functionx = np.arange(1,1001) y = acc_list plt.figure(figsize =(10,10)) plt.plot(x,y, linewidth = 3.0) plt.grid() plt.xlabel('---- No. of Epochs ------- ', fontsize=20) plt.ylabel('---- Accuray ------',fontsize = 20) plt.title('Epochs vs Accuracy', fontsize =20) plt.show() x = np.arange(1,1001) y = loss_list plt.figure(figsize =(10,10)) plt.grid() plt.plot(x,y, linewidth = 3.0) plt.xlabel('---- No. of Epochs ------- ', fontsize=20) plt.ylabel('---- Loss ------',fontsize = 20) plt.title('Epochs vs Loss', fontsize =20) plt.show()Comparision of the ROC Curves for DIfferent Models and Results Interpretationfprpoints, tprpoints, thresholds = roc_curve(yte, predictions_lr) lw = 2 plt.plot(fprpoints, tprpoints, color='darkorange', lw=lw, label='ROC curve - LR (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') fp = len(predictions_rf[(predictions_rf == 1) & (yte.values==0)]) tp = len(predictions_rf[(predictions_rf == 1) & (yte.values==1)]) fn = len(predictions_rf[(predictions_rf == 0) & (yte.values==1)]) tn = len(predictions_rf[(predictions_rf == 0) & (yte.values==0)]) fprpoints, tprpoints, thresholds = roc_curve(yte, predictions_rf) lw = 2 plt.plot(fprpoints, tprpoints, color='green', lw=lw, label='ROC curve - RF (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') fprpoints, tprpoints, thresholds = roc_curve(yte, predictions_knn) lw = 2 plt.plot(fprpoints, tprpoints, color='purple', lw=lw, label='ROC curve - KNN (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.legend(loc="lower right") fprpoints, tprpoints, thresholds = roc_curve(yte, prediction_xgb) lw = 2 plt.plot(fprpoints, tprpoints, color ='red',lw=lw, label='ROC curve -XGB (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic - For Different Models') fprpoints, tprpoints, thresholds = roc_curve(yte, predictions_svm) lw = 2 plt.plot(fprpoints, tprpoints, lw=lw, label='ROC curve - SVM (area = %0.2f)' % auc(fprpoints,tprpoints)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend(loc="lower right") plt.show()From the Above ROC plot we can see that Random Forest has outperformed all the other models. Logisitic Regression has also done quite Well, when compared with SVM We see that SVM has taken more time to compute the Classification of Loans either Charged or Paid off. XGB has performed the worst among all the models and it would be not recommed to use for this Data Set. A Neural Network Model was trained for 1000 epochs, the accuracy which it got was 0.7980, it outperformed all the models. The computation time for Neural Network was the most expensive compared to all the other models.Dark matter spatial and spectral models Introduction Gammapy has some convenience methods for dark matter analyses in `~gammapy.astro.darkmatter`. These include J-Factor computation and calculation the expected gamma flux for a number of annihilation channels. They are presented in this notebook. The basic concepts of indirect dark matter searches, however, are not explained. So this is aimed at people who already know what the want to do. A good introduction to indirect dark matter searches is given for example in https://arxiv.org/pdf/1012.4515.pdf (Chapter 1 and 5) SetupAs always, we start with some setup for the notebook, and with imports.from gammapy.astro.darkmatter import ( profiles, JFactory, PrimaryFlux, DarkMatterAnnihilationSpectralModel, ) from gammapy.maps import WcsGeom, WcsNDMap from astropy.coordinates import SkyCoord from matplotlib.colors import LogNorm from regions import CircleSkyRegion import astropy.units as u import numpy as np %matplotlib inline import matplotlib.pyplot as pltProfilesThe following dark matter profiles are currently implemented. Each model can be scaled to a given density at a certain distance. These parameters are controlled by ``profiles.DMProfile.LOCAL_DENSITY`` and ``profiles.DMProfile.DISTANCE_GC``profiles.DMProfile.__subclasses__() for profile in profiles.DMProfile.__subclasses__(): p = profile() p.scale_to_local_density() radii = np.logspace(-3, 2, 100) * u.kpc plt.plot(radii, p(radii), label=p.__class__.__name__) plt.loglog() plt.axvline(8.5, linestyle="dashed", color="black", label="local density") plt.legend() print("LOCAL_DENSITY:", profiles.DMProfile.LOCAL_DENSITY) print("DISTANCE_GC:", profiles.DMProfile.DISTANCE_GC)J FactorsThere are utilities to compute J-Factor maps that can serve as a basis to compute J-Factors for certain regions. In the following we compute a J-Factor map for the Galactic Centre regionprofile = profiles.NFWProfile() # Adopt standard values used in HESS profiles.DMProfile.DISTANCE_GC = 8.5 * u.kpc profiles.DMProfile.LOCAL_DENSITY = 0.39 * u.Unit("GeV / cm3") profile.scale_to_local_density() position = SkyCoord(0.0, 0.0, frame="galactic", unit="deg") geom = WcsGeom.create(binsz=0.05, skydir=position, width=3.0, frame="galactic") jfactory = JFactory( geom=geom, profile=profile, distance=profiles.DMProfile.DISTANCE_GC ) jfact = jfactory.compute_jfactor() jfact_map = WcsNDMap(geom=geom, data=jfact.value, unit=jfact.unit) ax = jfact_map.plot(cmap="viridis", norm=LogNorm(), add_cbar=True) plt.title(f"J-Factor [{jfact_map.unit}]") # 1 deg circle usually used in H.E.S.S. analyses sky_reg = CircleSkyRegion(center=position, radius=1 * u.deg) pix_reg = sky_reg.to_pixel(wcs=geom.wcs) pix_reg.plot(ax=ax, facecolor="none", edgecolor="red", label="1 deg circle") plt.legend() # NOTE: https://arxiv.org/abs/1607.08142 quote 2.67e21 without the +/- 0.3 deg band around the plane total_jfact = pix_reg.to_mask().multiply(jfact).sum() print( "J-factor in 1 deg circle around GC assuming a " f"{profile.__class__.__name__} is {total_jfact:.3g}" )Gamma-ray spectra at productionThe gamma-ray spectrum per annihilation is a further ingredient for a dark matter analysis. The following annihilation channels are supported. For more info see https://arxiv.org/pdf/1012.4515.pdffluxes = PrimaryFlux(mDM="1 TeV", channel="eL") print(fluxes.allowed_channels) fig, axes = plt.subplots(4, 1, figsize=(6, 16)) mDMs = [0.01, 0.1, 1, 10] * u.TeV for mDM, ax in zip(mDMs, axes): fluxes.mDM = mDM ax.set_title(rf"m$_{{\mathrm{{DM}}}}$ = {mDM}") ax.set_yscale("log") ax.set_ylabel("dN/dE") for channel in ["tau", "mu", "b", "Z"]: fluxes.channel = channel fluxes.table_model.plot( energy_bounds=[mDM / 100, mDM], ax=ax, label=channel, yunits=u.Unit("1/GeV"), ) axes[0].legend() plt.subplots_adjust(hspace=0.5)Flux mapsFinally flux maps can be produced like this:channel = "Z" massDM = 10 * u.TeV diff_flux = DarkMatterAnnihilationSpectralModel(mass=massDM, channel=channel) int_flux = ( jfact * diff_flux.integral(energy_min=0.1 * u.TeV, energy_max=10 * u.TeV) ).to("cm-2 s-1") flux_map = WcsNDMap(geom=geom, data=int_flux.value, unit="cm-2 s-1") ax = flux_map.plot(cmap="viridis", norm=LogNorm(), add_cbar=True) plt.title( f"Flux [{int_flux.unit}]\n m$_{{DM}}$={fluxes.mDM.to('TeV')}, channel={fluxes.channel}" );Especificando alvo para prediçãodf.columns y = df.SalePrice y.describe() y.shapeCriando as Features* LotArea* YearBuilt* 1stFlrSF* 2ndFlrSF* FullBath* BedroomAbvGr* TotRmsAbvGrdfeatures = 'LotArea YearBuilt 1stFlrSF 2ndFlrSF FullBath BedroomAbvGr TotRmsAbvGrd'.split() features X = df[features] print('Imprimindo as 5 primeiras linhas de X') X.head() print('Descrição ou Estatística de X') X.describe() X.shapeCriando um Modelofrom sklearn.tree import DecisionTreeRegressor iowa_model = DecisionTreeRegressor(random_state=1) iowa_model.fit(X, y)Fazendo Prediçõespredictions = iowa_model.predict(X.head()) print('Predições:\n') for p in predictions: print(p)Predições: 208500.0 181500.0 223500.0 140000.0 250000.0Comparando os resultadosy.head()Validando o Modelofrom sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_test_split(X, y, random_state = 1) iowa_model = DecisionTreeRegressor(random_state = 1) iowa_model.fit(train_X, train_y) print(f'{train_X.shape}\t{train_y.shape}\t\t{val_X.shape}\t{val_y.shape}') val_predictions = iowa_model.predict(val_X) print(val_predictions[0:5]) print(y.head().tolist())[186500. 184000. 130000. 92000. 164500.] [208500, 181500, 223500, 140000, 250000]Calculando o Erro Médio Absolutofrom sklearn.metrics import mean_absolute_error val_mae = mean_absolute_error(val_predictions, val_y) print(val_mae)29652.931506849316Na descrição estatística da coluna alvo que são os valores de venda dos imóveis (y.describe()), o valor médio de venda dos imóveis é de 180.921.O valor de erro médio absoluto é de 29.652, o que dá em torno de 16% do valor médio de venda. Em tese o modelo estaria com acerto de previsão em 84%. Comparando os diferentes tamanhos de nósdef get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=1) model.fit(train_X, train_y) preds_val = model.predict(val_X) mae = mean_absolute_error(val_y, preds_val) return mae nodes = [5, 25, 50, 100, 250] for max_leaf_nodes in nodes: my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y) print('Max leaf nodes: {} \t\t Mean Absolute Error: {:.0f}'.format(max_leaf_nodes, my_mae)) best_tree_size = 100 final_model = DecisionTreeRegressor(max_leaf_nodes=best_tree_size, random_state=1) final_model.fit(X, y) predictions = final_model.predict(X.head()) print('Predições: \n') for p in predictions: print(f'{p:.0f}') # Comparando y.head()count number of cells or sites with significant contextual effects# analyses = ['single cell', 'population', 'probewise pop', 'pop ceiling'] # 4 sounds analyses = ['single cell', 'population'] nsounds = [4, 10]# 10 sounds for analysis, ns in itt.product(analyses, nsounds): print(f'\n{analysis} {ns} sounds') all_id_df= pivoted.loc[(pivoted.nsounds == ns) & (pivoted.analysis == analysis), ['id', "integral (d'*ms)"]] # count of cells or sites with significant instances all_id_count = len(all_id_df.id.unique()) good_id_df= all_id_df.loc[(all_id_df["integral (d'*ms)"] > 0), ['id']] good_id_count = len(good_id_df.id.unique()) print(f'{good_id_count} of {all_id_count} total modulated neurons/PCs. {good_id_count/all_id_count*100:.2f}%') # total count of significant instances total_instances = all_id_df["integral (d'*ms)"].shape[0] significant_instances = np.sum(all_id_df["integral (d'*ms)"] > 0) print(f"{significant_instances} of {total_instances} total significant instances, {significant_instances/total_instances*100:.2f}%") # count of significant instances per neuron or PC, corrected for multiple comparisons signif_prop_per_neuron = all_id_df.groupby('id').agg(corr_signif_prop=("integral (d'*ms)",corrected_proportion)) modulated_neurons = signif_prop_per_neuron.loc[signif_prop_per_neuron.corr_signif_prop > 0] print(f"modulated neurons/PCs have {np.mean(modulated_neurons.values)*100 :.2f}% significant instances on average") # count of significant neurons per site if analysis == 'single cell': signif_prop_per_neuron = signif_prop_per_neuron.reset_index() signif_prop_per_neuron['site'] = signif_prop_per_neuron['id'].apply(lambda x: x[:7]) gp = signif_prop_per_neuron.groupby('site').agg( signif_neu_prop=('corr_signif_prop', lambda x: np.sum(x>0)/np.size(x))) print(f"sites have {np.mean(gp.values)*100 :.2f}% of modulated neurons on average")single cell 4 sounds 375 of 862 total modulated neurons/PCs. 43.50% 1463 of 34480 total significant instances, 4.24% modulated neurons/PCs have 9.75% significant instances on average sites have 42.27% of modulated neurons on average single cell 10 sounds 199 of 247 total modulated neurons/PCs. 80.57% 6676 of 135850 total significant instances, 4.91% modulated neurons/PCs have 6.10% significant instances on average sites have 78.95% of modulated neurons on average population 4 sounds 35 of 35 total modulated neurons/PCs. 100.00% 428 of 1400 total significant instances, 30.57% modulated neurons/PCs have 30.57% significant instances on average population 10 sounds 6 of 6 total modulated neurons/PCs. 100.00% 1002 of 3300 total significant instances, 30.36% modulated neurons/PCs have 30.36% significant instances on average练习 3-1 为MNIST训练一个分类器,并在测试集上达到97%的精度。- KNeighborsClassifier对这个任务非常有效,你只需要找到合适的超参数值即可%matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import fetch_openml # 下载MNIST数据集 mnist = fetch_openml('mnist_784') mnist mnist_data = mnist['data'] mnist_label = mnist['target'].astype(np.uint8) len(mnist_data), len(mnist_label) X_train, y_train, X_test, y_test = mnist_data[:60000], mnist_label[:60000], mnist_data[60000:], mnist_label[60000:] y_train[:10] # 无论合适都要养成对数据进行清洗的好习惯 shuffled_index = np.random.permutation(len(X_train)) X_train, y_train = X_train[shuffled_index], y_train[shuffled_index] some_digit = X_train[10] some_digit_label = y_train[10] plt.imshow(some_digit.reshape(28, 28), cmap=matplotlib.cm.binary) some_digit_label from sklearn.model_selection import cross_val_predict, cross_val_score from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier knn_clf = KNeighborsClassifier() knn_clf #cross_val_score(knn_clf, X_train, y_train, cv=3, scoring='accuracy', n_jobs=-1) knn_clf.fit(X_train, y_train) y_pred = knn_clf.predict(X_test) accuracy_score(y_test, y_pred)使用网络搜索找到最佳的参数from sklearn.model_selection import GridSearchCV param_grid=[{ 'weights': ['uniform', 'distance'], 'n_neighbors': [2, 3, 4, 5, 6, 7, 8, 9], 'algorithm': ['ball_tree', 'kd_tree', 'brute'] }] grid_search = GridSearchCV(knn_clf, param_grid, cv=3, verbose=3, n_jobs=-1) grid_search.fit(X_train, y_train) grid_search.best_score_, grid_search.best_params_ y_pred = grid_search.predict(X_test) accuracy_score(y_test, y_pred)对模型进行更多探索# 说先把网格搜索出来的比较好的模型实例化出来 knn_clf = KNeighborsClassifier(**grid_search.best_params_)混淆矩阵 1. 计算混淆矩阵y_train_pred = cross_val_predict(knn_clf, X_train, y_train, cv=3) from sklearn.metrics import confusion_matrix conf_mx = confusion_matrix(y_train, y_train_pred) conf_mx2. 可视化混淆矩阵row_sums = conf_mx.sum(axis=1, keepdims=True) norm_conf_mx = conf_mx/row_sums norm_conf_mx np.fill_diagonal(norm_conf_mx, 0) #对角线填充为0 plt.figure(figsize=(8, 8)) plt.matshow(norm_conf_mx, cmap= plt.cm.gray) plt.show()计算Precision/Recall 1. 计算Precision和Recallfrom sklearn.metrics import precision_score, recall_score y_pred = knn_clf.predict(X_test) precision_score(y_test, y_pred, average='macro'), recall_score(y_test, y_pred, average='macro')Data Transformation By Z Score In statistics, a z-score tells us how many standard deviations away a value is from the mean. Z = (X - a)/bWhere:'X' is Single Raw Data Value'a' is mean of the Row'b' is Standardimport matplotlib.pyplot as plt import seaborn as sns from google.colab import files import io #Add scipy.stats for z-score of an array/Datasets import scipy.stats as stats data = files.upload() import pandas as pd # Load the dataset df = pd.read_csv(io.StringIO(data['Emotion Dataset.csv'].decode('utf-8'))) df.head #remove the lable column from dataset new_df = df del new_df["labels"] new_df.headApplying Z-scores for datasetnew_df.apply(stats.zscore)https://towardsdatascience.com/data-science-in-finance-56a4d99279f7 Moving Average Strategy Test - The moving average strategy suggests **buying** a stock when **shorter SMA crosses over longer SMA** and **selling** it when **shorter SMA crosses down the longer one**. The picture below shows the **buy signal** (SMA_20, the **red line crosses over the blue line**, SMA_50) and the exit point as vice versa. ![Finance_Moving_Ave](image/1.JPG) 1- Data extraction and preparation - Historical stock price data can be found from various sources but the simplest one is **Yahoo Finance**. To do this, we need to **import yfinance library** and other related libraries which will be used on data manipulation using pandas. In this work, we will extract specific stock symbol data from Yahoo Finance and elaborate moving average strategy test. Let's first define the stategy. Moving average can be simple or exponential average of stock price in specific range of time such as 20, 50 or 200. Based on your favorite time interval, this can be minutes, days, weeks or even months. Relative position of moving averages can be sign of buying or selling signal.import numpy as np import pandas as pd import yfinance as yf import datetime as dt import matplotlib.pyplot as plt import mplfinance as mpf- First of all, we need to define the time range that we will examine the strategy. To use **yfinace library**, the **date should be in the format of YYYY-MM-DD**. Define the start date as your own favorite time. You can do the same for the end date but I prefer the **end date as today**. We can define it as below:# define time range start = '2016-01-01' # format: YYYY-MM-DD end = dt.datetime.now() # today- In the next step, store your favorite stock symbol ticker string in a stock variable. We are going to use it in yahoo finance library to download as a data frame:# favorite stock symbol stock='AMD' df = yf.download(stock,start, end, interval='1d') df.head()[*********************100%***********************] 1 of 1 completed2. Moving Average Calculation - Let’s define the short and long simple moving average, SMA variable, and store them in the list named SMAs here.# Let's calulate Simple Moving Average(SMA) short_sma= 20 long_sma = 50 SMAs=[short_sma, long_sma]- We can calculate **moving average** one by one for **each SMA using rolling and mean function** but to make it more flexible for more than two SMAs, we may use **for loop** such as:for i in SMAs: df["SMA_"+str(i)] = df.iloc[:,4].rolling(window=i).mean() df.tail(3)3. SMAs Relative Position Recognition - if **shorter SMA is higher than longer SMA**, we are in the **up-trend** area and appropriate to have a position and sell it in the opposite condition.position=0 # 1 means we have already entered poistion, 0 means not already entered counter=0 percentChange=[] # empty list to collect %changes for i in df.index: SMA_short=df['SMA_20'] SMA_long =df['SMA_50'] close=df['Adj Close'][i] if(SMA_short[i] > SMA_long[i]): #line 9 print('Up trend') if(position==0): buyP=close #buy price position=1 # turn position print("Buy at the price"+str(buyP)) elif(SMA_short[i] < SMA_long[i]): print('Down trend') if(position==1): # have a poistion in down trend position=0 # selling position sellP=close # sell price print("Sell at the price"+str(sellP)) perc=(sellP/buyP-1)*100 percentChange.append(perc) #line 23 if(counter==df["Adj Close"].count()-1 and position==1): position=0 sellP=close print("Sell at the price"+str(sellP)) perc=(sellP/buyP-1)*100 percentChange.append(perc) counter+=1 print(percentChange)Up trend Buy at the price2.7200000286102295 Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up trend Up[...]- This is a for loop block of code that will iterate over data set we previously read into df variable. After defining close price, short and long SMAs from the data frame, it will go into two different branches using if statement based on short and long SMA values (line 9). If **short SMA is above long SMA, it prints out the trend direction (‘Up trend’)** and if there is not existing position in the portfolio, it will buy a position (signal) based on the close price of that day. If short SMA reads smaller values than long SMA, it is in Down trend. If we have already a position, we should sell at the adjusted close price. Using append function in pandas (line 23), we will store the results to a percentChange variable list as defined in the previous line. The last part of block code, starting line24, is to examine whether there is an open position we are counting down to end of the data frame. 4. Statistics - After defining some variables such as gains and losses with the quantity of them, we may use for loop again over individual values in perecntChange to find out statistical information. Positive values in the list refer to the gain and will be added up in gains variable. The negative values are losses and will be stored in losses variable. We can print out the total return as percent and round the value up to 2 decimal places.gains=0 numGains=0 losses=0 numLosses=0 totReturn=1 for i in percentChange: if(i>0): gains+=i numGains+=1 else: losses+=i numLosses+=1 totReturn = totReturn*((i/100)+1) totReturn=round((totReturn-1)*100,2) print("This statistics is from "+str(df.index[0])+" up to now with "+str(numGains+numLosses)+" trades:") print("SMAs used: "+str(SMAs)) print("Total return over "+str(numGains+numLosses)+ " trades: "+ str(totReturn)+"%" )This statistics is from 2015-12-31 00:00:00 up to now with 11 trades: SMAs used: [20, 50] Total return over 11 trades: 1088.41%- To calculate average gain and loss, we may simply use if statement to see if the number of gains is bigger than zero or not. If so, the average gain can be calculated by gains divided by a number of gains. If the number of losses is a positive value, it will calculate average losses as well. Maximum gain and loss can also be an attractive point of the trades strategy.if (numGains>0): avgGain=gains/numGains maxReturn= str(max(percentChange)) else: avgGain=0 maxReturn='unknown' if(numLosses>0): avgLoss=losses/numLosses maxLoss=str(min(percentChange)) ratioRR=str(-avgGain/avgLoss) # risk-reward ratio else: avgLoss=0 maxLoss='unknown' ratioRR='inf' print("Average Gain: "+ str(avgGain)) print("Average Loss: "+ str(avgLoss)) print("Max Return: "+ maxReturn) print("Max Loss: "+ maxLoss) print("Gain/loss ratio: "+ ratioRR)Average Gain: 54.99325001538664 Average Loss: -8.162503859101655 Max Return: 132.3529450435539 Max Loss: -16.20305914577973 Gain/loss ratio: 6.737301563914858- For this example, the average gain for a single stock share is 62 dollars, the average loss is almost 8 dollars. The maximum return is 153 and the maximum loss is 16 dollars.- Batting average is calculated by dividing the number of gains over the total number of trades. This can be a sign of accuracy to enter for a position ranges between 0 and 1. 1 is the most accurate bat.if(numGains>0 or numLosses>0): batAvg=numGains/(numGains+numLosses) else: batAvg=0 print("Batting Avg: "+ str(batAvg))Batting Avg: 0.63636363636363645. Plotting - Although you can use seaborn or matplotlib libraries to plot the stock price, the mplfinance library is perfectly designed specifically for the stock price plot. Its function accept various attributes such as figure size, price plotting type (line, candlestick, …) and moving average.mpf.plot(df, type = 'ohlc',figratio=(16,6), mav=(short_sma,long_sma), volume=True, title= str(stock), style='default')Exploring the World Happiness Report Data Cleaning, Data Aggregation and Data VisualizationWorld Happiness Report is an annual report created by the UN Sustainable Development Solutions Network with the intent of guiding policy. The report assigns each country a happiness score based on the answers to a poll question that asks respondents to rank their life on a scale of 0 - 10.It also includes estimates of factors that may contribute to each country's happiness, including economic production, social support, life expectancy, freedom, absence of corruption, and generosity, to provide context for the score. Although these factors aren't actually used in the calculation of the happiness score, they can help illustrate why a country received a certain score. QuestionsThe following are the list of questions we need to find out in the dataset.* How can aggregating the data give us more insight into happiness scores?* How did world happiness change from 2015 to 2017?* Which factors contribute the most to the happiness score? Data Aggregation DatasetYou can download the dataset [here](https://www.kaggle.com/unsdsn/world-happiness)The data set is a CSV file called [`World_Happiness_2015.csv`](https://www.kaggle.com/unsdsn/world-happiness). Below are descriptions for columns:|**Column**|**Description**||---|---||**`Country`**|Name of the country.||**`Region`**|Name of the region the country belongs to.||**`Happiness Rank`**|The rank of the country, as determined by its happiness score.||**`Happiness Score`**|A score assigned to each country based on the answers to a poll question that asks respondents to rate their happiness on a scale of 0-10.||**`Family`**|The estimated extent to which family contributes to the happiness score.||**`Freedom`**|The estimated extent to which freedom contributes to the happiness score.||**`Generosity`**|The estimated extent to which generosity contributes to the happiness score.| Dataset Inspection%matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') happiness2015 = pd.read_csv('https://dsserver-prod-resources-1.s3.amazonaws.com/343/World_Happiness_2015.csv?versionId=jQYOlRzzMPFpIhNjLxwFNWDSOAsdpjnN') happiness2015.head() happiness2015.tail() happiness2015.info() RangeIndex: 158 entries, 0 to 157 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 158 non-null object 1 Region 158 non-null object 2 Happiness Rank 158 non-null int64 3 Happiness Score 158 non-null float64 4 Standard Error 158 non-null float64 5 Economy (GDP per Capita) 158 non-null float64 6 Family 158 non-null float64 7 Health (Life Expectancy) 158 non-null float64 8 Freedom 158 non-null float64 9 Trust (Government Corruption) 158 non-null float64 10 Generosity 158 non-null float64 11 Dystopia Residual 158 non-null float64 dtypes: float64(9), int64(1), object(2) memory usage: 1[...]Mean Happiness Score by regionhappiness2015.groupby('Region')['Happiness Score'].mean().sort_values() happiness2015.pivot_table(values='Happiness Score', index='Region', margins=True).plot(kind='barh', title='Mean Happiness Scores by Region', xlim=(0,10), legend=False)Did world happiness increase, decrease, or stay about the same from 2015 to 2017?happiness2016 = pd.read_csv('https://dsserver-prod-resources-1.s3.amazonaws.com/344/World_Happiness_2016.csv?versionId=g2kGflhRQVfDoiUtXaBqfKoLx_BrgtJs') happiness2016.head() happiness2017 = pd.read_csv('https://dsserver-prod-resources-1.s3.amazonaws.com/344/World_Happiness_2017.csv?versionId=VojSXR_mvHTzv41jGPwSvMFGZiHuKXCq') happiness2017.head() for i,df in enumerate([happiness2015,happiness2016,happiness2017]): df['Year']=2015+i happiness2017.rename(columns={'Happiness.Score': 'Happiness Score'}, inplace=True) combined = pd.concat([happiness2015, happiness2016, happiness2017]) combined.pivot_table(index='Year', values='Happiness Score').plot(kind='barh', title='Mean Happiness Scores by Year', xlim=(0,10)) plt.show()mean world happiness score stayed approximately the same from 2015 to 2017. Exploring the factors that contribute happiness.> the World Happiness Report assigns each country a happiness score based on a poll question that asks respondents to rank their life on a scale of 0 - 10.The factors incluce:* Economy (GDP per Capita)* Family* Health (Life Expectancy)* Freedom* Trust (Government Corruption)* Generosity Which of the factors above contribute the most to the happiness score?happiness2015=happiness2015.rename({'Economy (GDP per Capita)': 'Economy', 'Health (Life Expectancy)': 'Health', 'Trust (Government Corruption)': 'Trust' }, axis=1) happiness2015.columns factors_impact=happiness2015[['Economy', 'Family', 'Health', 'Freedom', 'Trust', 'Generosity']].applymap(lambda x: 'High' if x > 1 else 'low') factors_impact> We can see from the results that, according to our definition, the `Economy` and `Family` columns had a high impact on the happiness scores of the first five countries.factors_impact.apply(pd.value_counts)> Now, we can easily see that the `Family` and `Economy` columns contain the most `'High'` values!main_cols = ['Country', 'Region', 'Happiness Rank', 'Happiness Score'] factors = ['Economy', 'Family', 'Health', 'Freedom', 'Trust', 'Generosity', 'Dystopia Residual'] melt=pd.melt(happiness2015, id_vars=main_cols,value_vars=factors) melt['Percentage']=round((melt.value/melt['Happiness Score'])*100,2) melt.head() pv_melt=melt.pivot_table(index='variable', values='value') pv_melt.plot(kind='pie', y='value', legend=False) plt.show()World Development Indicatorsworking with the additional economic data from the World Bank. Column descriptionBelow are descriptions for the columns:|**Column Name**|**Column Description**||---|---||`ShortName`|Name of the country||`Region`|The region the country belongs to||`IncomeGroup`|The income group the country belongs to, based on Gross National Income (GNI) per capita||`CurrencyUnit`|Name of country's currency||`SourceOfMostRecentIncomeAndExpenditureData`|The name of the survey used to collect the income and expenditure data||`SpecialNotes`|Contains any miscellaneous notes about the data|world_dev = pd.read_csv("https://dsserver-prod-resources-1.s3.amazonaws.com/346/World_dev.csv?versionId=1G.IH6N10dXmFAG1mSTdtdLax4d9DPtC") col_renaming = {'SourceOfMostRecentIncomeAndExpenditureData': 'IESurvey'} world_dev.head() merged=pd.merge(left=happiness2015, right=world_dev,how='left', left_on='Country', right_on='ShortName') merged=merged.rename(col_renaming, axis=1) merged.head() merged['Currency'] = merged.CurrencyUnit.str.split().str.get(-1) merged.Currency.head()National Accountsmerged_national_accounts = merged[merged.SpecialNotes.str.contains(r"[Nn]ational accounts", na=False)] merged_national_accounts.head() merged["IncomeGroup"]=merged.IncomeGroup.str.upper().str.strip().str.replace(" INCOME","").str.strip().str.replace(":","") pv_incomes=merged.pivot_table(index="IncomeGroup", values="Happiness Score") pv_incomes.plot(kind='bar', rot=30,ylim=(0,10)) happiness2015.columns happiness2016.columns happiness2017.columns happiness2017.columns = happiness2017.columns.str.replace('.', ' ').str.replace('\s+', ' ').str.strip().str.upper() happiness2015.columns = happiness2015.columns.str.replace('(', '').str.replace(')', '').str.strip().str.upper() happiness2016.columns = happiness2016.columns.str.replace('(', '').str.replace(')', '').str.strip().str.upper() combined = pd.concat([happiness2015, happiness2016, happiness2017], ignore_index=True) combined.isnull().sum() happiness2015 = pd.read_csv("https://dsserver-prod-resources-1.s3.amazonaws.com/347/wh_2015.csv?versionId=jfEXotZkNMvPapsuIuIiUy9PD8d.yQpp") happiness2016 = pd.read_csv("https://dsserver-prod-resources-1.s3.amazonaws.com/347/wh_2016.csv?versionId=YTFx.axVvCayn262yITV7dTcN5VWrd_8") happiness2017 = pd.read_csv("https://dsserver-prod-resources-1.s3.amazonaws.com/347/wh_2017.csv?versionId=rpTqH_8SLrRgSWwCJvfx472qYfpLK5Qm") happiness2017.columns = happiness2017.columns.str.replace('.', ' ').str.replace('\s+', ' ').str.strip().str.upper() happiness2015.columns = happiness2015.columns.str.replace('(', '').str.replace(')', '').str.strip().str.upper() happiness2016.columns = happiness2016.columns.str.replace('(', '').str.replace(')', '').str.strip().str.upper() combined = pd.concat([happiness2015, happiness2016, happiness2017], ignore_index=True) combined.isnull().sum() combined_updated = combined.set_index('YEAR') sns.heatmap(combined_updated.isnull(), cbar=False)**We can make the following observations:*** No values are missing in the COUNTRY column.* There are some rows in the 2015, 2016, and 2017 data with missing values in all columns EXCEPT the COUNTRY column.* Some columns only have data populated for one year.* It looks like the REGION data is missing for the year 2017.Since the regions are fixed values - the region a country was assigned to in 2015 or 2016 won't change - we should be able to assign the 2015 or 2016 region to the 2017 row.combined = pd.merge(left=combined, right=pd.read_csv("https://raw.githubusercontent.com/VictorOmondi1997/exploring-whr/master/data/regions.csv"), on="COUNTRY", how="left") combined = combined.drop("REGION_x", axis=1) combined.isna().sum()check for duplicate valuesSince we should only have one country for each year, we can be a little more thorough by defining rows with ONLY the same country and year as duplicates.combined[combined.duplicated(["COUNTRY", 'YEAR'])]Since the dataframe is empty, we can tell that there are no rows with exactly the same country AND year.combined["COUNTRY"]=combined["COUNTRY"].str.upper() combined[combined.duplicated(['COUNTRY', 'YEAR'])] combined = combined.drop_duplicates(["COUNTRY", "YEAR"])Missing valuesSince missing values make up more than half of the following columns and we don't need them to accomplish our end goal, we'll drop them:* STANDARD ERROR* LOWER CONFIDENCE INTERVAL* UPPER CONFIDENCE INTERVAL* WHISKER HIGH* WHISKER LOWcombined=combined.drop(['LOWER CONFIDENCE INTERVAL', 'STANDARD ERROR', 'UPPER CONFIDENCE INTERVAL', 'WHISKER HIGH', 'WHISKER LOW'], axis=1) combined.isna().sum() sorted_happiness = combined.set_index('REGION_y').sort_values(['REGION_y', 'HAPPINESS SCORE']) sns.heatmap(sorted_happiness.isnull(), cbar=False)From the visualization above, we can also identify that only three regions contain missing values:* Sub-Saharan Africa* Middle East and Northern Africa* Latin America and CarribbeanThe Sub-Saharan Africa region contains the most missing values, accounting for about 9 percent of that regions's values. >* Only about 4 percent of the values in each column are missing.* Dropping rows with missing values won't cause us to lose information in other columns.combined.pivot_table(index='REGION_y', values='HAPPINESS SCORE', margins=True)We can see that the world mean happiness score, 5.370728, is over 1 point higher than the mean happiness score for the Sub-Saharan Africa region, 4.150957.Also, if we think about the reasons why a country may not have participated in the happiness survey - war, natural disaster, etc - many of them would likely result in a lower happiness score than even the region's mean. We'll conclude that the mean for the whole world wouldn't be a good estimate for them.As a result, we'll decide that of these two options, it's better to drop the rows with missing values.combined = combined.dropna() combined.isna().sum()Copyright 2019 The TensorFlow Authors.#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.単語埋め込み (Word embeddings) View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [ メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-ja)にご連絡ください。 このチュートリアルでは、単語埋め込みを紹介します。このチュートリアルには、小さいデータセットを使って単語埋め込みを最初から学習させ、その埋め込みベクトルを [Embedding Projector](http://projector.tensorflow.org) (下図参照)を使って可視化するためのプログラムがすべて含まれています。 テキストを数値で表す機械学習モデルは、ベクトル(数値の配列)を入力として受け取ります。テキストを扱う際、最初に決めなければならないのは、文字列を機械学習モデルに入力する前に、数値に変換する(あるいはテキストを「ベクトル化」する)ための戦略です。このセクションでは、これを行う3つの戦略を見てみます。 ワンホット・エンコーディング最初のアイデアとして、ボキャブラリの中の単語それぞれを「ワンホット」エンコードするというのがあります。 "The cat sat on the mat" という文を考えてみましょう。この文に含まれるボキャブラリ(ユニークな単語)は、 (cat, mat, on, sat, the) です。それぞれの単語を表現するため、ボキャブラリの長さに等しいゼロベクトルを作り、その単語に対応するインデックスの場所に 1 を立てます。これを下図で示します。 文をエンコードしたベクトルを作成するには、その後、それぞれの単語のワンホット・ベクトルをつなげればよいのです。Key point: この手法は非効率です。ワンホット・エンコードされたベクトルは疎(つまり、ほとんどのインデックスではゼロ)です。ボキャブラリに 10,000 の単語があると考えてみましょう。単語をすべてワンホット・エンコードするということは、要素の 99.99% がゼロであるベクトルを作ることになります。 それぞれの単語をユニークな数値としてエンコードする2つ目のアプローチとして、それぞれの単語をユニークな数値でエンコードするというのがあります。上記の例をとれば、"cat" に 1、"mat" に 2、というふうに番号を割り当てることができます。そうすれば、 "The cat sat on the mat" という文は、 [5, 1, 4, 3, 5, 2] という密なベクトルで表すことができます。この手法は効率的です。疎なベクトルの代わりに、密な(すべての要素が入っている)ベクトルが得られます。しかしながら、このアプローチには 2つの欠点があります。* 整数エンコーディングは勝手に決めたものです(単語間のいかなる関係性も含んでいません)。* 整数エンコーディングはモデルにとっては解釈しにくいものです。たとえば、線形分類器はそれぞれの特徴量について単一の重みしか学習しません。したがって、2つの単語が似かよっていることと、それらのエンコーディングが似かよっていることの間には、なんの関係もありません。この特徴と重みの組み合わせには意味がありません。 単語埋め込み単語埋め込みを使うと、似たような単語が似たようにエンコードされる、効率的で密な表現が得られます。重要なのは、このエンコーディングを手動で行う必要がないということです。埋め込みは浮動小数点数の密なベクトルです(そのベクトルの長さはあなたが指定するパラメータです)。埋め込みベクトルの値は指定するものではなく、学習されるパラメータです(モデルが密結合レイヤーの重みを学習するように、訓練をとおしてモデルが学習する重みです)。一般的には、(小さいデータセットの場合の)8次元の埋め込みベクトルから、大きなデータセットを扱う 1024次元のものまで見られます。高次元の埋め込みは単語間の細かな関係を取得できますが、学習にはよりたくさんのデータが必要です。上図は単語埋め込みを図示したものです。それぞれの単語が 4次元の浮動小数点数のベクトルで表されています。埋め込みは「参照テーブル」と考えることもできます。重みが学習された後では、テーブルを参照して、それぞれの単語を対応する密ベクトルにエンコードできます。 設定!pip install tf-nightly import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_datasets as tfds tfds.disable_progress_bar()Embedding レイヤーを使うKeras では単語埋め込みを使うのも簡単です。[Embedding](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding) レイヤーを見てみましょう。 Embedding レイヤーは、(特定の単語を示す)整数のインデックスに(その埋め込みである)密なベクトルを対応させる参照テーブルとして理解することができます。埋め込みの次元数(あるいはその幅)は、取り組んでいる問題に適した値を実験して求めるパラメータです。これは、Dense レイヤーの中のニューロンの数を実験で求めるのとまったくおなじです。embedding_layer = layers.Embedding(1000, 5)Embedding レイヤーを作成するとき、埋め込みの重みは(ほかのレイヤーとおなじように)ランダムに初期化されます。訓練を通じて、これらの重みはバックプロパゲーションによって徐々に調整されます。いったん訓練が行われると、学習された単語埋め込みは、(モデルを訓練した特定の問題のために学習された結果)単語の間の類似性をおおまかにコード化しています。Embedding レイヤーに整数を渡すと、結果はそれぞれの整数が埋め込みテーブルのベクトルに置き換えられます。result = embedding_layer(tf.constant([1,2,3])) result.numpy()テキストあるいはシーケンスの問題では、入力として、Embedding レイヤーは shape が `(samples, sequence_length)` の2次元整数テンソルを取ります。ここで、各エントリは整数のシーケンスです。このレイヤーは、可変長のシーケンスを埋め込みベクトルにすることができます。上記のバッチでは、 `(32, 10)` (長さ10のシーケンス32個のバッチ)や、 `(64, 15)` (長さ15のシーケンス64個のバッチ)を埋め込みレイヤーに投入可能です。返されたテンソルは入力より 1つ軸が多くなっており、埋め込みベクトルはその最後の新しい軸に沿って並べられます。`(2, 3)` の入力バッチを渡すと、出力は `(2, 3, N)` となります。result = embedding_layer(tf.constant([[0,1,2],[3,4,5]])) result.shapeシーケンスのバッチを入力されると、Embedding レイヤーは shape が `(samples, sequence_length, embedding_dimensionality)` の3次元浮動小数点数テンソルを返します。この可変長のシーケンスを、固定長の表現に変換するには、さまざまな標準的なアプローチが存在します。Dense レイヤーに渡す前に、RNNやアテンション、プーリングレイヤーを使うことができます。ここでは、一番単純なのでプーリングを使用します。[RNN を使ったテキスト分類](https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/text/text_classification_rnn.ipynb) は次のステップとしてよいチュートリアルでしょう。 埋め込みを最初から学習する IMDB の映画レビューの感情分析器を訓練しようと思います。そのプロセスを通じて、埋め込みを最初から学習します。ここでは、前処理済みのデータセットを使用します。テキストデータセットを最初からロードする方法については、[テキスト読み込みのチュートリアル](../load_data/text.ipynb)を参照してください。(train_data, test_data), info = tfds.load( 'imdb_reviews/subwords8k', split = (tfds.Split.TRAIN, tfds.Split.TEST), with_info=True, as_supervised=True)エンコーダー(`tfds.features.text.SubwordTextEncoder`)を取得し、すこしボキャブラリを見てみましょう。ボキャブラリ中の "\_" は空白を表しています。ボキャブラリの中にどんなふうに("\_")で終わる単語全体と、長い単語を構成する単語の一部が含まれているかに注目してください。encoder = info.features['text'].encoder encoder.subwords[:20]映画のレビューはそれぞれ長さが異なっているはずです。`padded_batch` メソッドを使ってレビューの長さを標準化します。train_data train_batches = train_data.shuffle(1000).padded_batch(10, padded_shapes=([None],[])) test_batches = test_data.shuffle(1000).padded_batch(10, padded_shapes=([None],[]))Note: **TensorFlow 2.2** から、padded_shapes は必須ではなくなりました。デフォルトではすべての軸をバッチ中で最も長いものに合わせてパディングします。train_batches = train_data.shuffle(1000).padded_batch(10) test_batches = test_data.shuffle(1000).padded_batch(10)インポートした状態では、レビューのテキストは整数エンコードされています(それぞれの整数がボキャブラリ中の特定の単語あるいは部分単語を表しています)。あとの方のゼロに注目してください。これは、バッチが一番長いサンプルに合わせてパディングされた結果です。train_batch, train_labels = next(iter(train_batches)) train_batch.numpy()単純なモデルの構築[Keras Sequential API](../../guide/keras) を使ってモデルを定義することにします。今回の場合、モデルは「連続した Bag of Words」スタイルのモデルです。* 次のレイヤーは Embedding レイヤーです。このレイヤーは整数エンコードされた語彙を受け取り、それぞれの単語のインデックスに対応する埋め込みベクトルをみつけて取り出します。これらのベクトルはモデルの訓練により学習されます。このベクトルは出力配列に次元を追加します。その結果次元は `(batch, sequence, embedding)` となります。* 次に、GlobalAveragePooling1D レイヤーが、それぞれのサンプルについて、シーケンスの次元で平均を取り、固定長の出力ベクトルを返します。これにより、モデルは可変長の入力を最も簡単な方法で扱えるようになります。* この固定長のベクトルは、16個の隠れユニットを持つ全結合(Dense)レイヤーに接続されます。* 最後のレイヤーは、1個の出力ノードを持つ Dense レイヤーです。シグモイド活性化関数を使うことで、値は 0 と 1 の間の値を取り、レビューがポジティブ(好意的)であるかどうかの確率(または確信度)を表します。Caution: このモデルはマスキングを使用していません。このため、ゼロパディングが入力の一部として扱われ、結果としてパディングの長さが出力に影響を与える可能性があります。これを修正するには[マスキングとパディングのガイド](../../guide/keras/masking_and_padding)を参照してください。embedding_dim=16 model = keras.Sequential([ layers.Embedding(encoder.vocab_size, embedding_dim), layers.GlobalAveragePooling1D(), layers.Dense(16, activation='relu'), layers.Dense(1) ]) model.summary()モデルのコンパイルと訓練model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) history = model.fit( train_batches, epochs=10, validation_data=test_batches, validation_steps=20)このアプローチにより、モデルの評価時の正解率は 88% 前後に達します(モデルは過学習しており、訓練時の正解率の方が際立って高いことに注意してください)。import matplotlib.pyplot as plt history_dict = history.history acc = history_dict['accuracy'] val_acc = history_dict['val_accuracy'] loss=history_dict['loss'] val_loss=history_dict['val_loss'] epochs = range(1, len(acc) + 1) plt.figure(figsize=(12,9)) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() plt.figure(figsize=(12,9)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.ylim((0.5,1)) plt.show()学習した埋め込みの取得次に、訓練によって学習された単語埋め込みを取得してみます。これは、shape が `(vocab_size, embedding-dimension)` の行列になります。e = model.layers[0] weights = e.get_weights()[0] print(weights.shape) # shape: (vocab_size, embedding_dim)この重みをディスクに出力します。[Embedding Projector](http://projector.tensorflow.org) を使うため、タブ区切り形式の2つのファイルをアップロードします。(埋め込みを含む)ベクトルのファイルと、(単語を含む)メタデータファイルです。import io encoder = info.features['text'].encoder out_v = io.open('vecs.tsv', 'w', encoding='utf-8') out_m = io.open('meta.tsv', 'w', encoding='utf-8') for num, word in enumerate(encoder.subwords): vec = weights[num+1] # 0 はパディングのためスキップ out_m.write(word + "\n") out_v.write('\t'.join([str(x) for x in vec]) + "\n") out_v.close() out_m.close()このチュートリアルを [Colaboratory](https://colab.research.google.com) で実行している場合には、下記のコードを使ってこれらのファイルをローカルマシンにダウンロードすることができます(あるいは、ファイルブラウザを使います。*表示 -> 目次 -> ファイル* )。try: from google.colab import files except ImportError: pass else: files.download('vecs.tsv') files.download('meta.tsv')Example of handling bad moleculesIf you give a bad conformer, of course xTB will not be able to calculate any properties.This is an example of how ppqm handles that%load_ext autoreload %autoreload 2 %matplotlib inline import logging import sys import pandas as pd from rdkit import Chem from rdkit.Chem.Draw import MolsToGridImage try: import ppqm except ModuleNotFoundError: import pathlib cwd = pathlib.Path().resolve().parent sys.path.append(str(cwd)) import ppqm from ppqm import jupyter as ppqm_jupyterSet logging levellogging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger("ppqm").setLevel(logging.INFO) logging.getLogger("xtb").setLevel(logging.DEBUG) show_progress = FalseDefine a molecule you likesmiles = "Cc1cc(NCCO)nc(-c2ccc(Br)cc2)n1" # CHEMBL1956589 molobj = Chem.MolFromSmiles(smiles) molobjGet a conformer, and make it physically impossible- Get a conformer from rdkit- Change bond length to make it badmolobj = ppqm.tasks.generate_conformers(molobj, n_conformers=1) molobj coordinates = ppqm.chembridge.get_coordinates(molobj) atoms = ppqm.chembridge.get_atoms(molobj) atoms coordinates[0, :] = 0.0 coordinates[1, :] = 0.0 ppqm.chembridge.molobj_set_coordinates(molobj, coordinates) ppqm_jupyter.show_molobj(molobj)Try to optimize this bad conformerxtb = ppqm.XtbCalculator(scr="_tmp_directory_", n_cores=1, cmd="xtb") calculation = { "gfn": 2, "alpb": "water", "opt": None, } results = xtb.calculate(molobj, calculation) resultsComparisonpalette= sns.color_palette("tab10") sns.set_style("whitegrid") sns.set_context("talk") fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(20, 6)) sns.lineplot( data=majority_df[majority_df['type']=='false_alarm_rate'], x="C", y="val", hue="model", style='model', ax=axs[0], markers=True, dashes=False ) axs[0].set_ylabel('(%)') axs[0].set_title('False Alarm Rate') sns.lineplot( data=majority_df[majority_df['type']=='anom_miss_rate'], x="C", y="val", hue="model", style='model', ax=axs[1], markers=True, dashes=False ) axs[1].set_ylabel('(%)') axs[1].set_title('Anomaly Miss Rate') handles, labels = axs[0].get_legend_handles_labels() fig.legend(handles[1:], ['lgbm','rf','xgboost'], loc='lower left', bbox_to_anchor=(0.32, 1, 0.3, 0.4), ncol=3, frameon=True, mode='None',) axs[0].get_legend().remove() axs[1].get_legend().remove() plt.suptitle('Majority Filtering') palette= sns.color_palette("tab10") sns.set_style("whitegrid") sns.set_context("talk") fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(20, 6)) sns.lineplot( data=consecutive_df[consecutive_df['type']=='false_alarm_rate'], x="C", y="val", hue="model", style='model', ax=axs[0], markers=True, dashes=False ) axs[0].set_ylabel('(%)') axs[0].set_title('False Alarm Rate') sns.lineplot( data=consecutive_df[consecutive_df['type']=='anom_miss_rate'], x="C", y="val", hue="model", style='model', ax=axs[1], markers=True, dashes=False ) axs[1].set_ylabel('(%)') axs[1].set_title('Anomaly Miss Rate') handles, labels = axs[0].get_legend_handles_labels() fig.legend(handles[1:], ['lgbm','rf','xgboost'], loc='lower left', bbox_to_anchor=(0.32, 1, 0.3, 0.4), ncol=3, frameon=True, mode='None',) axs[0].get_legend().remove() axs[1].get_legend().remove() plt.suptitle('Consecutive Filtering')Timing Comparison#TODO: sorting? MODELS = ['lgbm','rf','xgboost'] time_dict = {} for MODEL in MODELS: if FS: MODEL = MODEL + '-fs' try: with open(model_config['results_dir'] / '{}_time_dict.json'.format(MODEL)) as f: temp_dict = json.load(f) time_dict[MODEL] = temp_dict[MODEL] except: print("{} does not exist".format(MODEL)) time_dict dataframe = pd.DataFrame.from_dict(time_dict).transpose()#,orient='index',columns=['a','b','c','d','e']) dataframe.round(2) dataframe.index.name = 'model' dataframe.reset_index(inplace=True) dataframe.rename(columns={'training_time':'Training Time','inference_time':'Inference Time'},inplace=True) dataframe dataframe = pd.melt(dataframe, id_vars=['model'],value_vars=['Training Time','Inference Time'],value_name='time') dataframe.loc[dataframe['variable'] == 'Training Time','scaled_time'] = dataframe.loc[dataframe['variable'] == 'Training Time']['time'] / min(dataframe.loc[dataframe['variable'] == 'Training Time']['time']) dataframe.loc[dataframe['variable'] == 'Inference Time','scaled_time'] = dataframe.loc[dataframe['variable'] == 'Inference Time']['time'] / min(dataframe.loc[dataframe['variable'] == 'Inference Time']['time']) dataframe.sort_values('scaled_time',inplace=True) palette= sns.color_palette("tab10") sns.set_style("whitegrid") sns.set_context("talk") g = sns.catplot(x='variable',y='scaled_time', palette = palette[3:6], kind = 'bar', #ax = axs[1], height=6, aspect=2.7, hue = 'model', order =['Training Time','Inference Time'], data=dataframe) plt.title('Training and Inference Time Comparison',pad=30,fontdict={'fontsize': 20,'fontweight':'bold'}) plt.ylabel('Relative Time') plt.xlabel('') plt.yticks([]) #plt.xtickslabels(['Training Time','Inference Time']) ax = g.facet_axis(0,0) for p in ax.patches: ax.text(p.get_x() + 0.05, p.get_height() * 1.05, '{0:.1f}x'.format(p.get_height()), #Used to format it K representation color='black', rotation='horizontal', size='large')OLD TIMING COMPARISONwith open(model_config['results_dir'] / 'time_dict_fs.json') as f: time_dict = json.load(f) dataframe = pd.DataFrame.from_dict(time_dict).transpose()#,orient='index',columns=['a','b','c','d','e']) dataframe.round(2) dataframe.index.name = 'model' dataframe.reset_index(inplace=True) dataframe.rename(columns={'training_time':'Training Time','inference_time':'Inference Time'},inplace=True) dataframe #added this due to thread thingy dataframe.loc[dataframe['model'] == 'rf-fs','Training Time']= dataframe[dataframe['model'] == 'rf-fs']['Training Time'].values[0]*5 dataframe.loc[dataframe['model'] == 'rf-fs','Inference Time']= dataframe[dataframe['model'] == 'rf-fs']['Inference Time'].values[0]*3 dataframe = pd.melt(dataframe, id_vars=['model'],value_vars=['Training Time','Inference Time'],value_name='time') dataframe.loc[dataframe['variable'] == 'Training Time','scaled_time'] = dataframe.loc[dataframe['variable'] == 'Training Time']['time'] / min(dataframe.loc[dataframe['variable'] == 'Training Time']['time']) dataframe.loc[dataframe['variable'] == 'Inference Time','scaled_time'] = dataframe.loc[dataframe['variable'] == 'Inference Time']['time'] / min(dataframe.loc[dataframe['variable'] == 'Inference Time']['time']) dataframe.sort_values('scaled_time',inplace=True) palette= sns.color_palette("tab10") sns.set_style("whitegrid") sns.set_context("talk") g = sns.catplot(x='variable',y='scaled_time', palette = palette[3:6], kind = 'bar', #ax = axs[1], height=6, aspect=2.7, hue = 'model', order =['Training Time','Inference Time'], data=dataframe) plt.title('Training and Inference Time Comparison',pad=30,fontdict={'fontsize': 20,'fontweight':'bold'}) plt.ylabel('Relative Time') plt.xlabel('') plt.yticks([]) #plt.xtickslabels(['Training Time','Inference Time']) ax = g.facet_axis(0,0) for p in ax.patches: ax.text(p.get_x() + 0.05, p.get_height() * 1.05, '{0:.1f}x'.format(p.get_height()), #Used to format it K representation color='black', rotation='horizontal', size='large') #comingfrom when we run random forest with out nthread palette= sns.color_palette("tab10") sns.set_style("whitegrid") sns.set_context("talk") g = sns.catplot(x='variable',y='time', palette = palette[4:7], kind = 'bar', #ax = axs[1], height=6, aspect=2.7, hue = 'model', data=dataframe) plt.title('Training and Inference Time Comparison') plt.ylabel('Relative Time') plt.xlabel('') plt.yticks([]) ax = g.facet_axis(0,0) for p in ax.patches: ax.text(p.get_x() + 0.05, p.get_height() * 1.05, '{0:.1f}x'.format(p.get_height()), #Used to format it K representation color='black', rotation='horizontal', size='large')Single Plot for Papersns.barplot(x='anomaly',y='f1-score', palette = palette, dodge=False, data=f1_score_df[f1_score_df['model'] == 'rf']) plt.title('F1-Scores for Window Size '+str(model_config['window_size'])) sns.barplot(y='false_alarm_rate',x='model',hue='model', #style='model', palette = palette, dodge=False, data=alarm_score_df,ax=axs[1],capsize=.1) axs[1].set_title('False Alarm Rate', pad=20) axs[1].set_xlabel("") axs[1].set_ylabel('')31 - Covid-19 case study - GOME-2 anomaly map >> Copernicus Sentinel-5P TROPOMI Carbonmonoxide (CO) A precursor satellite mission, Sentinel-5P aims to fill in the data gap and provide data continuity between the retirement of the Envisat satellite and NASA's Aura mission and the launch of Sentinel-5. The Copernicus Sentinel-5P mission is being used to closely monitor the changes in air quality and was launched in October 2017.Sentinel-5p Pre-Ops data are disseminated in the `netCDF` format and can be downloaded via the [Copernicus Open Access Hub](https://scihub.copernicus.eu/).Sentinel-5p carries the `TROPOMI` instrument, which is a spectrometer in the UV-VIS-NIR-SWIR spectral range. `TROPOMI` provides measurements on:* `Ozone`* `NO``2`* `SO``2`* `Formaldehyde`* `Aerosol`* `Carbonmonoxide`* `Methane`* `Clouds` Module outline:* [1 - Load and browse Sentinel-5P data](load_s5p)* [2 - Plotting example - Sentinel-5P data](plotting_s5p) Load required libraries%matplotlib inline import os import xarray as xr import numpy as np import netCDF4 as nc import matplotlib.pyplot as plt from matplotlib.colors import LogNorm import cartopy.crs as ccrs from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER from matplotlib.axes import Axes from cartopy.mpl.geoaxes import GeoAxes GeoAxes._pcolormesh_patched = Axes.pcolormesh import cartopy.feature as cfeature import geopandas as gpd import warnings warnings.simplefilter(action = "ignore", category = RuntimeWarning)Load and browse Sentinel-5P data Open one individual Sentinel-5P netCDF file with `NetCDF4` The dataset object contains information about the general data structure of the dataset. You can see that the variables of `Sentinel-5P` data are organised in groups, which is analogous to directories in a filesystem.s5p_file = nc.Dataset('./eodata/sentinel5p/co/2019/08/19/S5P_OFFL_L2__CO_____20190819T164807_20190819T182937_09581_01_010302_20190825T161022.nc', 'r') s5p_file.groupsIf you select the `/PRODUCT` group, you get more information on what variables the dataset object contain.s5p_file.groups['PRODUCT']You see that the object contains the following variables:* `scanline`* `ground_pixel`* `time`* `corner`* `delta_time`* `time_utc`* `ga_value`* `latitude`* `longitude`* `carbonmonoxide_total_column`* `carbonmonoxie_total_column_precision` You can specify one variable of interest and get more detailed information about the variable. E.g. `carbonmonoxide_total_column` is the atmosphere mole content of carbon monoxide, has the unit mol m-2, and is a 3D variable.You can do this for the available variables, but also for the dimensions latitude and longitude.You can see e.g. that the `latitude` coordinates range between -85.9 S and 61.9 S and the `longitude` coordinates range between -124.3 W to 101.9 E.co = s5p_file.groups['PRODUCT'].variables['carbonmonoxide_total_column'] lon = s5p_file.groups['PRODUCT'].variables['longitude'][:][0,:,:] lat = s5p_file.groups['PRODUCT'].variables['latitude'][:][0,:,:] co, lon, latYou can retrieve the array values of the variable object by selecting the `time` dimension and `data`. You can have a look at the `minimum` and `maximum` data value to get an idea of the data range. You see that the data contain negative values. Let's mask the negative values and all values equal to the `_FillValue` and set it to `NaN`. `_FillValue` is used for not significant data. Thus, you want to mask those.co_data = co[0,:,:].data print(co_data.min(), co_data.max()) co_data[co_data <= 0.] = co._FillValue co_data[co_data == co._FillValue] = np.nanPlotting example - Sentinel-5P data Plot `Dataset` NetCDF library object with `matplotlib` and `cartopy` The retrieved data array from the Dataset NetCDF object is of type `numpy array` and you can plot it with matplotlib's `pcolormesh` function. Due to the nature of the `CO` data values, we apply a logarithmic scale to the color bar with `LogNorm` from `matplotlib.colors`, which facilitates the visualisation of the data.Let's create a function [visualize_pcolormesh](./functions.ipynbvisualize_pcolormesh), where we can specify projection, extent, conversion_factor, color_scale, unit, title and if the plot shall have a global extent.def visualize_pcolormesh(data_array, longitude, latitude, projection, color_scale, unit, long_name, vmin, vmax, lonmin, lonmax, latmin, latmax, log=True, set_global=True): """ Visualizes a numpy array with matplotlib's 'pcolormesh' function. Parameters: data_array: any numpy MaskedArray, e.g. loaded with the NetCDF library and the Dataset function longitude: numpy Array holding longitude information latitude: numpy Array holding latitude information projection: a projection provided by the cartopy library, e.g. ccrs.PlateCarree() color_scale (str): string taken from matplotlib's color ramp reference unit (str): the unit of the parameter, taken from the NetCDF file if possible long_name (str): long name of the parameter, taken from the NetCDF file if possible vmin (int): minimum number on visualisation legend vmax (int): maximum number on visualisation legend lonmin,lonmax,latmin,latmax: geographic extent of the plot log (logical): set True, if the values shall be represented in a logarithmic scale set_global (logical): set True, if the plot shall have a global coverage """ fig=plt.figure(figsize=(20, 10)) ax = plt.axes(projection=projection) # define the coordinate system that the grid lons and grid lats are on if(log): img = plt.pcolormesh(longitude, latitude, np.squeeze(data_array), norm=LogNorm(), cmap=plt.get_cmap(color_scale), transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax) else: img = plt.pcolormesh(longitude, latitude, data_array, cmap=plt.get_cmap(color_scale), transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax) ax.add_feature(cfeature.BORDERS, edgecolor='black', linewidth=1) ax.add_feature(cfeature.COASTLINE, edgecolor='black', linewidth=1) if (projection==ccrs.PlateCarree()): ax.set_extent([lonmin, lonmax, latmin, latmax], projection) gl = ax.gridlines(draw_labels=True, linestyle='--') gl.xlabels_top=False gl.ylabels_right=False gl.xformatter=LONGITUDE_FORMATTER gl.yformatter=LATITUDE_FORMATTER gl.xlabel_style={'size':14} gl.ylabel_style={'size':14} if(set_global): ax.set_global() ax.gridlines() cbar = fig.colorbar(img, ax=ax, orientation='horizontal', fraction=0.04, pad=0.1) cbar.set_label(unit, fontsize=16) cbar.ax.tick_params(labelsize=14) ax.set_title(long_name, fontsize=20, pad=20.0) return fig, axYou can retrieve unit and title information from the load `Dataset`, where the information is stored as attributes. You can now plot the data.unit = co.units long_name = co.long_name visualize_pcolormesh(co_data, lon, lat, ccrs.Mollweide(), 'viridis', unit, long_name, 0.01,1, lon.min(), lon.max(), lat.min(), lat.max(), log=True, set_global=True)You can zoom into a region by specifying a `bounding box` of interest. Let's set the extent to South America, with: `[-100, 0, -80, 40]`. The above plotting function [visualize_pcolormesh](./functions.ipynbvisualize_pcolormesh) allows for setting a specific bounding box. You simply have to set the `set_global` key to False. It is best to adjust the projection to `PlateCarree()`, as this will be more appropriate for a regional subset.lonmin=-100 lonmax=0 latmin=-80 latmax=40 visualize_pcolormesh(co_data, lon, lat, ccrs.PlateCarree(), 'viridis', unit, long_name, 0.01,1, lonmin, lonmax, latmin, latmax, log=True, set_global=False)Load multiple Sentinel-5p data files with `xarray` and `open_mfdataset` The plots above showed the extent of one Sentinel-5P ground track. You can load multiple ground tracks into a single `xarray` and the `DataArrays` will be concatenated at the `scanline` dimension. This allows you to have a larger region of interest (ROI).s5p_mf_19 = xr.open_mfdataset('./eodata/sentinel5p/co/2019/08/19/*.nc', concat_dim='scanline', combine='nested', group='PRODUCT') s5p_mf_19From the `Dataset` object `s5p_mf_19`, you can choose the data variable of interest, e.g. `carbonmonoxide_total_column`. It has three dimensions (`3D`), but the time dimension consisting only of one dimension entry. If you want to reduce it by the dimension time, you can simply select the first dimension and reduce it to a `2D` object. You can again use the function [visualize_pcolormesh](./functions.ipynbvisualize_pcolormesh) to visualize the data.co_19 = s5p_mf_19.carbonmonoxide_total_column[0,:,:] lat_19 = co_19.latitude lon_19 = co_19.longitude unit = co_19.units long_name = co_19.long_name visualize_pcolormesh(co_19, lon_19, lat_19, ccrs.PlateCarree(), 'viridis', unit, long_name, 0.01, 1.0, lonmin, lonmax, latmin, latmax, log=True, set_global=False)This notebook is used to cleanly organize single-molecule tracking data by scanning through JSON parameter files used to generate figures and moving all XML TrackMate files into a specified folder, re-creating the JSON parameter file if needed. Optionally, it can also scan through the raw data file folder and move/organize all relevant raw data files.# User-defined parameters # str or list of str: settings_files = ['../analysis_settings/Fig2F_HaloTagControls.json', '../analysis_settings/Fig3F_IRE1-HaloTag.json', '../analysis_settings/Fig4A_Tm_Tg_DTT.json', '../analysis_settings/Fig4B_IRE1mutants.json', '../analysis_settings/FigS1B_GST_dimer.json', '../analysis_settings/FigS2_ERstress_HaloControls.json', '../analysis_settings/FigS3D_IRE1-HaloTag_clones.json', '../analysis_settings/FigS4_Trajectory_density.json'] # Specify which data to organize (TrackMate outputs, raw data, or both) #organize_tracks = True organize_raw = True # Specify source directories # Reference directory to the relative paths in the settings JSON file(s) base_dir_tracks = r'C:\Vlad\Work_sys_drive\Paper_data\Archive\processed' # Directory in which all raw image files are found (subdirectories OK) base_dir_raw = r'C:\Vlad\Work_sys_drive\Paper_data\raw\raw_unzipped' #Specify target directories (absolute or relative to the notebook file) final_dir_tracks = r'C:\Vlad\Work_sys_drive\Paper_code\Belyy_et_al_2021\data\processed' final_dir_json = r'C:\Vlad\Work_sys_drive\Paper_code\Belyy_et_al_2021\analysis_settings\auto_gen' final_dir_raw = r'C:\Vlad\Work_sys_drive\Paper_code\Belyy_et_al_2021\data\raw' image_file_extension = 'tif' # for searching through image files # Load modules import json, os, copy, shutil, glob from pathlib import Path # Uncomment the following two lines for debugging %load_ext autoreload %autoreload 2 # Add source code directory (src) to path to enable user module import module_dir = '../src' os.sys.path.insert(0, module_dir) # Import user modules from source code directory #import parse_trackmate as pt import correlation_analysis as corr # Read and organize data if type(settings_files) is not list: settings_files = [settings_files] raw_files_copied = [] # to avoid duplicating raw files for file in settings_files: print('Working on file: '+str(file)) conditions, params = corr.read_analysis_params(file, print_summary=False) new_json = copy.deepcopy(params['raw_json']) origin_paths = [] dest_paths = [] for condition, paths in params['raw_json']['conditions'].items(): if type(paths) is not list: paths = [paths] new_paths_rel = [] for curr_path in paths: path = Path(curr_path) origin = (Path(base_dir_tracks) / path).resolve() origin_paths.append(origin) #Keep folder and parent folder dest_subfolder = origin.relative_to(origin.parents[1]) dest = (Path(final_dir_tracks) / dest_subfolder).resolve() dest_paths.append(dest) #Determine relative path to put in the new JSON file json_relpath = Path(os.path.relpath(dest,final_dir_json)) new_paths_rel.append(json_relpath.as_posix()) new_json['conditions'][condition] = new_paths_rel # Move the xml and raw files without overwriting num_dirs = len(origin_paths) dir_counter = 1 for origin, dest in zip(origin_paths, dest_paths): print('Working on directory '+str(dir_counter)+' out of '+str(num_dirs)) print(str(origin)) dir_counter = dir_counter + 1 if not os.path.exists(dest): os.makedirs(dest) xml_files_in_origin = glob.glob(os.path.join(origin, '**/*.xml'), recursive=True) if not xml_files_in_origin: print("WARNING: no valid xml files found in this directory!") else: print(str(len(xml_files_in_origin))+' xml files found here.') for file_origin in xml_files_in_origin: name = os.path.split(file_origin)[1] file_dest = os.path.join(dest, name) # Move xml track files if not os.path.exists(file_dest): shutil.copy(file_origin, dest) # Locate and move raw tif files if needed if organize_raw: # get source file name name_core = name[3:-4] # remove channel and extension search_string = '*'+name_core+'.'+image_file_extension # Look for matching tif file source_path = list(Path(base_dir_raw).rglob(search_string)) if len(source_path) > 1: print('Too many matches found! Problematic files:') print(name_core) for x in source_path: print(x) if len(source_path) == 0: print('No matches found! Problematic file:') print(name_core) continue source_path = source_path[0] if source_path in raw_files_copied: continue # this file had already been moved raw_files_copied.append(source_path) #Organize folder and parent folder to match those of the xml file dest_subfolder = dest.relative_to(dest.parents[1]) dest_folder = os.path.join(final_dir_raw, dest_subfolder) dest_folder = os.path.abspath(dest_folder) if not os.path.exists(dest_folder): os.makedirs(dest_folder) dest_file = os.path.join(dest_folder, os.path.split(source_path)[1]) # Move files to new folder without overwriting if not os.path.exists(dest_file): shutil.copy(source_path, dest_file) # write new JSON settings file if not os.path.exists(final_dir_json): os.makedirs(final_dir_json) json_file_name = os.path.split(file)[1] json_file_path = os.path.join(final_dir_json, json_file_name) with open(json_file_path, 'w', encoding='utf-8') as f: json.dump(new_json, f, ensure_ascii=False, indent=4) #print(params['raw_json']) #print(origin_paths) #print(dest_paths) #print(new_json['conditions']) print('Job done')![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) Use pretrained `explain_document` Pipeline Stages * DocumentAssembler * SentenceDetector * Tokenizer * Lemmatizer * Stemmer * Part of Speech * SpellChecker (Norvig)import sys import time #Spark ML and SQL from pyspark.ml import Pipeline, PipelineModel from pyspark.sql.functions import array_contains from pyspark.sql import SparkSession from pyspark.sql.types import StructType, StructField, IntegerType, StringType #Spark NLP import sparknlp from sparknlp.pretrained import PretrainedPipeline from sparknlp.annotator import * from sparknlp.common import RegexRule from sparknlp.base import DocumentAssembler, FinisherLet's create a Spark Session for our appspark = sparknlp.start() print("Spark NLP version: ", sparknlp.version()) print("Apache Spark version: ", spark.version)Spark NLP version: 2.6.0 Apache Spark version: 2.4.4This is our testing document, we'll use it to exemplify all different pipeline stages.testDoc = [ "Frenchg author who helped pioner the science-fiction genre. \ Verne wrate about space, aisr, and underwater travel befdaore \ navigable aircrast and practical submarines were invented, \ and before any means of space travel had been devised. " ] pipeline = PretrainedPipeline('explain_document_ml', lang='en')explain_document_ml download started this may take some time. Approx size to download 9.4 MB [OK!]We are not interested in handling big datasets, let's switch to LightPipelines for speed.result = pipeline.annotate(testDoc)Let's analyze these results - first let's see what sentences we detected[content['sentence'] for content in result]Now let's see how those sentences were tokenized[content['token'] for content in result]Notice some spelling errors? the pipeline takes care of that as well[content['spell'] for content in result]Now let's see the lemmas[content['lemmas'] for content in result]Let's check the stems, any difference with the lemmas shown bebore?[content['lemmas'] for content in result][content['stems'] for content in result]Now it's the turn on Part Of Speech(POS)pos = [content['pos'] for content in result] token = [content['token'] for content in result] # let's put token and tag together list(zip(token[0], pos[0]))Use pretrained `match_chunk` Pipeline for Individual Noun Phrase * DocumentAssembler* SentenceDetector* Tokenizer* Part of speech* chunkerPipeline:* The pipeline uses regex `?*+`* which states that whenever the chunk finds an optional determiner * (DT) followed by any number of adjectives (JJ) and then a noun (NN) then the Noun Phrase(NP) chunk should be formed.pipeline = PretrainedPipeline('match_chunks', lang='en') result = pipeline.annotate("The book has many chapters") # single noun phrase result['chunk'] result = pipeline.annotate("the little yellow dog barked at the cat") #multiple noune phrases result['chunk'] result**Handling Imbalanced Dataset**from imblearn.over_sampling import RandomOverSampler # used oversampling os = RandomOverSampler() X, y = os.fit_resample(X, y) from collections import Counter Counter(y) # checking value counts of labels X=pd.DataFrame(X,columns=['Messages']) # after using overfit the X data was in numpy so to make in a proper dataframe we used pandas DataFrame X.info() X.head()**Data Preprocessing**Removing the punctuation cleaning the text using stemming or lemmatization in which stemming is used to remove the suffix and lemmatization means to have a proper meaning word.import nltk import re from nltk.corpus import stopwords import string from nltk.stem import WordNetLemmatizer from nltk.stem import PorterStemmer lemmatize=WordNetLemmatizer() stemming=PorterStemmer() # nltk.download('stopwords') # nltk.download('wordnet') corpus=[] for i in range(len(X)): url=re.sub(r'https?://\S+|www\.\S+',' ',X['Messages'][i]) # fixing url html=re.sub(r'<.*?>',' ',url) #fixing html word=re.sub(r'\W',' ',html) # removing special characters remove_pun="".join([char for char in word if char not in string.punctuation]) # removing punctuation lower=remove_pun.lower() # lowercase letters tokens=lower.split() # tokenization # lemmatize=[lemmatize.lemmatize(word) for word in tokens if word not in set(stopwords.words('english'))] # lemmatization S_stem=[stemming.stem(word) for word in tokens if word not in set(stopwords.words('english'))] #stemming replace=' '.join(S_stem) corpus.append(replace) print(corpus) len(corpus) len(y)**Train-Test split**# train-test split for training the model and to avoid data lekage from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(corpus,y,test_size=0.25,random_state=42,stratify=y) Counter(y_train),Counter(y_test) X_train[0:5]**Using Term Frequency Inverse Document Frequency**TF = No of repeated words in sentences / No of words in a sentenceIDF= No of sentences / Word containing in the number of sentence# As our data is in text data so to convert the data into numerical is really important for creating our machine learning model # We are using Term Frequency Inverse Document Frequency from sklearn.feature_extraction.text import TfidfVectorizer tfidf=TfidfVectorizer(lowercase=True,ngram_range=(1, 2), max_features = 5000) # Use unigram & bi-gram # applying tfidf on X_train and X_test X_train_tfidf = tfidf.fit_transform(X_train).toarray() X_test_tfidf = tfidf.transform(X_test).toarray() X_train_tfidf.shape,X_test_tfidf.shape**Using Support Vector Classifier**# using Support Vector Classifier from sklearn.svm import SVC support_vector_classifier=SVC()**Evaluating the model**from sklearn.metrics import accuracy_score,confusion_matrix,classification_report support_vector_classifier.fit(X_train_tfidf,y_train) predictions_svc_training = support_vector_classifier.predict(X_train_tfidf) accuracy_svc_training=accuracy_score(y_train,predictions_svc_training) predictions_svc_testing = support_vector_classifier.predict(X_test_tfidf) accuracy_svc_testing=accuracy_score(y_test,predictions_svc_testing) print('SVC on training: ',accuracy_svc_training) print('SVC on testing: ',accuracy_svc_testing) confusion_matrix_svc=confusion_matrix(y_test,predictions_svc_testing) print(confusion_matrix_svc) classification_report_svc=classification_report(y_test,predictions_svc_testing) print(classification_report_svc)[[1205 2] [ 2 1204]] precision recall f1-score support 0 1.00 1.00 1.00 1207 1 1.00 1.00 1.00 1206 accuracy 1.00 2413 macro avg 1.00 1.00 1.00 2413 weighted avg 1.00 1.00 1.00 2413Using ***StratifiedKFold*** for cross validation# from sklearn.model_selection import StratifiedKFold # from sklearn.model_selection import cross_val_score # skfold=StratifiedKFold(n_splits=5) # score=cross_val_score(support_vector_classifier,X_train_tfidf,y_train,cv=skfold) # score**Using Naive Bayes**from sklearn.naive_bayes import MultinomialNB naive_bayes_classifier = MultinomialNB() naive_bayes_classifier.fit(X_train_tfidf,y_train) predictions_nb_training = naive_bayes_classifier.predict(X_train_tfidf) accuracy_nb_training=accuracy_score(y_train,predictions_nb_training) predictions_nb_testing = naive_bayes_classifier.predict(X_test_tfidf) accuracy_nb_testing=accuracy_score(y_test,predictions_nb_testing) print('Naive Bayes on training: ',accuracy_nb_training) print('Naive Bayes on testing: ',accuracy_nb_testing) confusion_matrix_nb=confusion_matrix(y_test,predictions_nb_testing) print(confusion_matrix_nb) classification_report_nb=classification_report(y_test,predictions_nb_testing) print(classification_report_nb)[[1139 68] [ 21 1185]] precision recall f1-score support 0 0.98 0.94 0.96 1207 1 0.95 0.98 0.96 1206 accuracy 0.96 2413 macro avg 0.96 0.96 0.96 2413 weighted avg 0.96 0.96 0.96 2413**Using Logistic Regression**from sklearn.linear_model import LogisticRegression rfc=LogisticRegression() rfc.fit(X_train_tfidf,y_train) predictions_rfc_training = rfc.predict(X_train_tfidf) accuracy_rfc_training=accuracy_score(y_train,predictions_rfc_training) predictions_rfc_testing = rfc.predict(X_test_tfidf) accuracy_rfc_testing=accuracy_score(y_test,predictions_rfc_testing) print('Logistic Regression on training: ',accuracy_rfc_training) print('Logistic Regression classifier on testing: ',accuracy_rfc_testing) confusion_matrix_lr=confusion_matrix(y_test,predictions_rfc_testing) print(confusion_matrix_lr) classification_report_lr=classification_report(y_test,predictions_rfc_testing) print(classification_report_lr) from sklearn.ensemble import RandomForestClassifier random_fc=RandomForestClassifier() random_fc.fit(X_train_tfidf,y_train) predictions_randomfc_training = random_fc.predict(X_train_tfidf) accuracy_randomfc_training=accuracy_score(y_train,predictions_randomfc_training) predictions_randomfc_testing = random_fc.predict(X_test_tfidf) accuracy_randomfc_testing=accuracy_score(y_test,predictions_randomfc_testing) print('Random forest classifier on training: ',accuracy_randomfc_training) print('Random forest classifier on testing: ',accuracy_randomfc_testing) confusion_matrix_random_fc=confusion_matrix(y_test,predictions_randomfc_testing) print(confusion_matrix_random_fc) classification_report_random_fc=classification_report(y_test,predictions_randomfc_testing) print(classification_report_random_fc)[[1205 2] [ 1 1205]] precision recall f1-score support 0 1.00 1.00 1.00 1207 1 1.00 1.00 1.00 1206 accuracy 1.00 2413 macro avg 1.00 1.00 1.00 2413 weighted avg 1.00 1.00 1.00 2413**Checking on new data whether it is spam or ham**text=input() clean_data=tfidf.transform([text]).toarray() predict=support_vector_classifier.predict(clean_data)[0] if predict == 0: print('Spam') else: print('Ham') import pickle file=open('Tfidf.pkl','wb') pickle.dump(tfidf,file) file1=open('svc.pkl','wb') pickle.dump(support_vector_classifier,file1)Hopfield Network for Binocular Stereo This demo applies a Hopfield network (mean field theory) to binocular stereo# First initialize this notebook, load libraries for matrix manipulation and plotting %matplotlib inline import matplotlib.pyplot as plt import numpy as np def imshowPatch(im): plt.imshow(im, cmap='gray', interpolation='none') plt.axis('off')Hand-wiring the constraints in a neural net. How does one "sculpt the energy landscape"? One can use a form of Hebbian learning to dig holes (i.e. stable points or attractors) in the energy landscape indicating things to be remembered. Alternatively, one can study the nature of the problem to be solved and hand-wire the network to represent the constraints (i.e. reason out, or make an educated guess as to what the weights should be). We will follow the second approach to solve the correspondence problem. This problem crops up in a number of domains in pattern theory and recognition, and occurs whenever the features in two patterns need to be matched up, but one pattern is an unknown distortion of the other. Imagine, for example, the image of a face in memory, and you want to test whether an incoming stimulus is from the same person. Both patterns have more or less the same features, but they don't superimpose because they are from different views or expressions. So you want to try to morph one on to the other, but before you do that, you might want to decide which features go with which--i.e. pupils to pupils, nostrils to nostrils, etc.. Establishing correspondences between two similar but not quite identical patterns has also been a central, and challenging problem in both stereo vision and motion processing.In the next few sections, we will show how the weights in a network can be set up to represent constraints. Then we will look at three ways of letting the network evolve: asynchronous, synchronous, and partially asychronous updating. The first case exactly satisfies the assumptions required for Hopfield's energy function. This setup of the stereo problem is based on one of the earliest stereo algorithms from 1976. It is highly simplified. Establishing correspondences: An example of constraint satisfaction Introduction to stereo and the correspondence problem If you cross your eyes so that you can perceptually fuse the two random patterns below, you may be able to see a small square floating in front of the random background. Crossing your eyes means that the left image goes to the right eye, and the right to the left. (Some people are better at looking at the left image with the left eye, and right with the right eye. For this type of human, the images below should be exchanged.)This is an example of a random dot stereogram originally developed by in the 1960's. This is an example of a random dot stereogram originally developed by in the 1960's.# Generate random dot stereogram, 32 x 32 # Make a randome image, the value of pixel is [0,4] left = np.random.randint(0, 5, size=(32,32)) / 4.0 # Shift a patch two pixels to right xoffset = 1; r = [10, 10, 10, 10] # [x, y, w, h] patch = left[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] right = left.copy() r[0] = r[0] + xoffset; right[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] = patch # Shift the patch right[r[1]:r[1]+r[3], r[0]-xoffset:r[0]] = np.random.randint(0, 5, size=(r[3], xoffset)) / 4.0 # Fill the hole with random value plt.subplot(121); imshowPatch(left) plt.subplot(122); imshowPatch(right)It is made by taking a small square sub-patch in one image, shifting it by a pixel or two, to make the second image. Since the subpatch is shifted, it leaves a sub-column of pixels unspecified. These get assigned new random values. The stereogram mimics what happens in the real world when you look at an object that stands out in depth from a background--the left eye's view is slightly different than the right eye's view. The pixels for an object in front are shifted with respect to the background pixels in one eye's view compared to the other. There is a disparity between the two eyes. The distances between two points in the left eye and the distance of the images of the same two points in the right eye are, in general different, and depend on the relative depth of the two points in the world. To see depth in a random dot stereogram, the human visual system effectively solves a correspondence problem. The fundamental problem is to figure out which of the pixels in the left eye belong to which ones in the right. This is a non-trivial computational problem when so may of the features (i.e. the pixel intensities) look the same--there is considerable potential for false matches. A small minority don't have matching pairs (i.e. the ones that got filled in the vertical slot left after shifting the sub-patch). We'll get to this in a moment. Human perception solves the stereo correspondence, so let us see if we can devise a neural network style algorithm to solve it. Display a pair of images It is not easy to fuse the left and right image without a stereo device (it requires placing the images side by side and crossing your eyes. We can check out our images another way. The visual system also solves a correspondence problem over time. We can illustrate this using animation. When the animation begins you can find the central patch almost magically appears to oscillate and float above the background. When the animation stops, the central square patch disappears again into the camouflage. Animation: Execute below cell to see animation# Alternating the two images to show it's relly moving import time from IPython.display import display, clear_output f, ax = plt.subplots() t = 0.8 for i in range(8): clear_output(wait=True); ax.imshow(left, cmap='gray', interpolation='none') display(f); time.sleep(t) clear_output(wait=True); ax.imshow(right, cmap='gray', interpolation='none') display(f); time.sleep(t) clear_output()Two-state neural network implementation using Marr and Poggio (1976) constraints Simplify the problem to one dimension We will apply the constraints proposed by Marr and Poggio (1976) to try to solve the correspondence problem for just the middle rows of left and right images:leftMiddle = np.matrix(left[16,]) rightMiddle = np.matrix(right[16,]) imshowPatch(np.concatenate((leftMiddle, rightMiddle)))We've displayed the left eye's view above the right eye's, so you can try to spot which pixels have been shifted. Because the patch was shifted horizontally, we haven't lost the essence of the correspondence problem by reducing it to one dimension. You can see the ambiguity in the correspondence problem. Following Marr and Poggio, we will try to solve the correspondence (i.e. which pairs of pixels in the two images belong together) using three constraints: - compatibility- uniqueness- smoothnessWe will see what these constraints mean as we move along. The challenge is to design a network that enforces these constraints. The compatibility constraint The compatibility constraint says that similar features in each eye should match, in other words like pixels (same graylevel) in each image should match. We can represent compatibility by constructing a "compatibility matrix" which has a 1 at each location where there is a possible match (i.e. in graylevel color), and zeros elsewhere. Here's the plot of the compatibility of the middle row for the left and right eye's stereo pictures, where white represents 1 (compatible), and black 0 (not compatible).leftRepeat = np.repeat(leftMiddle, 32, axis=0) rightRepeat = np.repeat(rightMiddle.T, 32, axis=1) compatibility = (leftRepeat == rightRepeat) imshowPatch(compatibility)The uniqueness and smoothness constraints But many of these possible matches seem unlikely. A given point in one eye shouldn't have lots of matches in the other eye--a point on a surface in the world typically maps to at most one point in each eye. We would like to find a line through the compatibility matrix to indicate unique matches. So we have to discourage more than one unit from being on in any given row or column (i.e. enforce a uniqueness constraint). Also when we think about how features along surfaces typically change as a function of position, we realize that surfaces to a first approximation are smooth--thus, nearby points on a surface project to nearby points in the two eyes. For convenience, let's assume an even stronger constraint in which nearby points have constant disparities. (Recall that disparity refers to the relative shift of corresponding points in the two images.) This means to we should encourage elements that have nearest neighbor support along the diagonals to be on (to encourage regions that have constant disparity). This latter constraint is called a "surface smoothness constraint", which refers to the underlying assumption that changes in depth usually change gradually--sudden changes in disparity are rare. We will follow Marr and Poggio and set up a threshold logic unit at each location of the compatibility matrix, and wire each up to reflect the above constraints. We are going to have to worry about the boundaries. There are several ways of doing this. One is to have a "free" boundary in which the connection weights at the boundaries are actually different (to compensate for a lack of neighbors in the other directions). Although not biologically realistic, a second way is to use a toroidal geometry, restricting indices by the following modulus function: myMod[x_] := Mod[x-1,size]+1. This option is computationally convenient and makes it possible to comply with the restriction of symmetric connections everywhere. To encourage uniqueness, let's make the vertical and horizontal connection weights all equal and negative with a weight inhib. To encourage smoothness, we'll make the diagonal support positive with mutally excitatory weights excit. The network will have biases for each unit (which are equivalent to appropriate thresholds for each unit) proportional to the original compatibility matrix. These biases correspond to the Ui's in the Hopfield net and serve to prevent the network from loosing this strong constraint from the data as the iterations progress. Note that we won't set up the network calculations using a matrix and matrix multiplications as you will do in the Hopfield memory examples. This is because most of our connections are zero and we don't want to waste time multiplying zero elements and adding them up. So the code below will look messier. Hopfield Net: Asynchronous updating--all sites visited randomly, at random times(Animation) To do random asynchronous updating, you simply pick a site at random and update it, and then pick another, and so on.excitWeight = 2 inhibWeight = -1 theta = 13 k = 8 V = compatibility.copy() def r(ind): return ind % 32 for iter in range(10000): p = np.random.randint(32, size=(1,2)) x0 = p[0,0]; y0 = p[0,1] # sum over the same row inhib = 0 for x in range(-4, 5): inhib = inhib + V[y0, r(x0+x)] # sum over the same col for y in range(-4, 5): inhib = inhib + V[r(y0+y), x0] inhib = inhib - 2 * V[y0, x0] # sum over diagonal excit = 0 for off in range(-4, 5): excit = excit + V[r(y0+off), r(x0+off)] excit = excit - V[y0,x0] V[y0, x0] = (inhibWeight * inhib + excitWeight * excit + k * V[y0,x0] > theta) plt.subplot(121); imshowPatch(-compatibility) plt.subplot(122); imshowPatch(-V)Load preprocessed data If you'd like to play around with this notebook, start by downloading the skipgram dataset from here:https://www.dropbox.com/s/nd1zxh538o6psal/skipgram_full.npzWARNING: it's a 1Gb download, so it may take a while!import numpy as np codes = np.load("../data/skipgram_full.npz")['coded'] # Remove duplicate skipgrams codes = codes[codes[:, 0] != codes[:, 1]] code2token = np.load("../data/skipgram_full.npz")['c2t'].tolist() token2code = np.load("../data/skipgram_full.npz")['t2c'].tolist() # First column is the first token code # second column is the 2nd token code # third column is the skip gram count # fourth is PMI * 1e6 codes train_x = codes[:, :2].copy().astype(np.int64) train_y = codes[:, 3].astype(np.float32) / 1e6 train_y train_y.max() top_codes = np.argsort(train_y)[-10:] [[code2token[c[0]], code2token[c[1]]] for c in codes[top_codes, :2]] n_user = np.max(train_x[:, :2]) + 1 n_item = np.max(train_x[:, :2]) + 1 n_userDefine the MF Modelimport torch from torch import nn import torch.nn.functional as F def l2_regularize(array): loss = torch.sum(array ** 2.0) return loss class MF(nn.Module): itr = 0 def __init__(self, n_user, n_item, k=18, c_vector=1.0, c_bias=1.0, writer=None): super(MF, self).__init__() self.writer = writer self.k = k self.n_user = n_user self.n_item = n_item self.c_bias = c_bias self.c_vector = c_vector self.user = nn.Embedding(n_user, k) self.item = nn.Embedding(n_item, k) self.user.weight.data.normal_(0, 1.0 / n_user) self.item.weight.data.normal_(0, 1.0 / n_item) # We've added new terms here: self.bias_user = nn.Embedding(n_user, 1) self.bias_item = nn.Embedding(n_item, 1) self.bias = nn.Parameter(torch.ones(1)) def __call__(self, train_x): user_id = train_x[:, 0] item_id = train_x[:, 1] vector_user = self.user(user_id) vector_item = self.item(item_id) bias_user = self.bias_user(user_id).squeeze() bias_item = self.bias_item(item_id).squeeze() biases = (self.bias + bias_user + bias_item) ui_interaction = torch.sum(vector_user * vector_item, dim=1) prediction = ui_interaction + biases return prediction def loss(self, prediction, target): loss_mse = F.mse_loss(prediction, target.squeeze()) prior_bias_user = l2_regularize(self.bias_user.weight) * self.c_bias prior_bias_item = l2_regularize(self.bias_item.weight) * self.c_bias prior_user = l2_regularize(self.user.weight) * self.c_vector prior_item = l2_regularize(self.item.weight) * self.c_vector total = loss_mse #+ prior_user + prior_item for name, var in locals().items(): if type(var) is torch.Tensor and var.nelement() == 1 and self.writer is not None: self.writer.add_scalar(name, var, self.itr) return totalTrain modelfrom ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator from ignite.metrics import Loss from tensorboardX import SummaryWriter from ignite.metrics import MeanSquaredError from loader import Loader from datetime import datetimeHyperparameterslr = 1e-3 k = 128 c_bias = 1e-9 c_vector = 1e-9 log_dir = 'runs/simple_mf_05_word2vec_' + str(datetime.now()).replace(' ', '_') print(log_dir) writer = SummaryWriter(log_dir=log_dir) model = MF(n_user, n_item, k=k, c_bias=c_bias, c_vector=c_vector, writer=writer) optimizer = torch.optim.Adam(model.parameters(), lr=lr) trainer = create_supervised_trainer(model, optimizer, model.loss) metrics = {'accuracy': MeanSquaredError()} train_loader = Loader(train_x, train_y, batchsize=1024) def log_training_loss(engine, log_interval=400): epoch = engine.state.epoch itr = engine.state.iteration fmt = "Epoch[{}] Iteration[{}/{}] Loss: {:.2f}" msg = fmt.format(epoch, itr, len(train_loader), engine.state.output) model.itr = itr if itr % log_interval == 0: print(msg) trainer.add_event_handler(event_name=Events.ITERATION_COMPLETED, handler=log_training_loss) model model.load_state_dict(torch.load("model_05_word2vec"))Run modeltrainer.run(train_loader, max_epochs=25) torch.save(model.state_dict(), "model_05_word2vec")Save the embeddingslabel_token = ['|' + code2token[c] for c in range(n_user)] writer.add_embedding(model.user.weight) # writer.add_embedding(model.item.weight, metadata=label_token)Introspect the model Evaluate what urban dictionary thinks are similar words.vectors_raw = model.user.weight.data.numpy() vectors = vectors_raw / np.sqrt((vectors_raw**2.0).sum(axis=1)[:, None]) (vectors[0]**2.0).sum() def find_closest(token, n=10): code = token2code[token] vector = vectors[code] similarity = np.sum(vector[None, :] * vectors, axis=1) closest = np.argsort(similarity)[::-1] for code in closest[1:n]: print(code2token[code], similarity[code]) find_closest('dude') find_closest('netflix') find_closest('lol') find_closest('hipster') find_closest('crunk') find_closest('bromance') find_closest('barbie') find_closest('relationship') find_closest('pope') find_closest('trump') find_closest('selfie')selfies 0.6768813 instagram 0.58078086 photo 0.5547765 pic 0.5447346 snapchat 0.54272944 upload 0.52603865 photographer 0.5154379 caption 0.49573278 tweet 0.47855204Subtract and add word vectorsdef add_subtract(center, minus, plus, n=10): vector = (vectors[token2code[center]] - vectors[token2code[minus]] + vectors[token2code[plus]]) similarity = np.sum(vector[None, :] * vectors, axis=1) closest = np.argsort(similarity)[::-1] for code in closest[2:n]: print(code2token[code]) add_subtract('burrito', 'mexican', 'italian') add_subtract('drunk', 'beer', 'weed')drunk ganja shrooms chronic paranoid pothead kush fuckedManipulate position of jointsThis notebook tries to change the output of the openpose network to move any joint to a wanted postion%load_ext autoreload %autoreload 2 # imports import logging import os import sys import cv2 import numpy as np from matplotlib import pyplot as plt from plot_utils import plot_vector_field from tf_pose import common from tf_pose.common import CocoPairs, CocoPairsNetwork, CocoPart from tf_pose.estimator import PoseEstimator, TfPoseEstimator from tf_pose.networks import get_graph_path os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" logging.getLogger("tensorflow").setLevel(logging.CRITICAL) logging.getLogger('TfPoseEstimatorRun').setLevel(logging.ERROR) logging.getLogger('DeepExplain').setLevel(logging.ERROR) logging.getLogger('TfPoseEstimator').setLevel(logging.ERROR) # params 432, 368 w, h = 432, 368 image_path = '../data/images/muscle.jpg' model = 'cmu' resize_out_ratio = 2.0 image = common.read_imgfile(image_path, w, h) e = TfPoseEstimator(get_graph_path(model), target_size=(w, h), trt_bool=False) # compute with and without humans_original = e.inference(image, resize_to_default=( w > 0 and h > 0), upsample_size=resize_out_ratio) def get_index_on_line(peak_a, peak_b, steps=1000): indicies_on_line = [] step_vec = (peak_b - peak_a) / float(steps) for i in range(steps): index = peak_a + i * step_vec index = np.ceil(index) indicies_on_line.append(index.astype(int)) return indicies_on_line def lies_between(A, B, C): a = np.linalg.norm(C - B) b = np.linalg.norm(C - A) c = np.linalg.norm(B - A) return a**2 + b**2 >= c**2 and a**2 + c**2 >= b**2 def get_index_near_line(data, p1, p2, distance=10): indicies_near_line = [] for p3, value in np.ndenumerate(data): dist_to_line = np.linalg.norm( np.cross(p2-p1, p1-p3))/np.linalg.norm(p2-p1) if dist_to_line < distance and lies_between(p3, p1, p2): indicies_near_line.append(p3) return indicies_near_line def get_affected_pairs(index): return list(filter(lambda pair: pair[1][0] == index or pair[1][1] == index, enumerate(CocoPairs))) # position where to put the peak def modify_joint(old_peaks, old_heatMat, old_pafMat, change_id, pos_x=40, pos_y=60): # copy the required mats peaks = np.copy(old_peaks) heatMat = np.copy(old_heatMat) pafMat = np.copy(old_pafMat) eps = 0.0001 print(f'Changing {change_id} part') # CHANGE VLAUES OF THE PEAKS # change PAFs # find all affected PAF affected_pafs = get_affected_pairs(change_id) for (part_id, (peak_a_id, peak_b_id)) in affected_pafs: peak_id = peak_a_id if peak_b_id == change_id else peak_b_id # find the peak connect to the joint we want to move peak_y, peak_x = np.unravel_index( peaks[:, :, peak_id].argmax(), peaks[:, :, peak_id].shape) print(f'SET pos x: {pos_x}, y: {pos_y}') print(f'Peak pos x: {peak_x}, y: {peak_y}') vec = np.array([pos_x - peak_x, pos_y - peak_y]).astype('float64') vec /= np.linalg.norm(vec) if peak_a_id == change_id: vec *= -1.0 print(f'Vec x: {vec[0]}, y: {vec[1]}') # compute the index on the line between the two points index_to_change = get_index_near_line( np.zeros(shape=pafMat.shape[:2]), np.array([peak_x, peak_y]), np.array([pos_x, pos_y]), 1 ) # reset the PAF # which paf in the network joint_id = CocoPairsNetwork[part_id] # set all values to zero pafMat[:, :, joint_id[0]] = np.zeros(shape=pafMat.shape[:2]) pafMat[:, :, joint_id[1]] = np.zeros(shape=pafMat.shape[:2]) # set only the required parts for (x, y) in index_to_change: pafMat[y, x, joint_id[0]] = vec[0] pafMat[y, x, joint_id[1]] = vec[1] # change heat # only one point is needed cur_heat_max_val = np.max(heatMat[:, :, change_id]) heatMat[:, :, change_id] = np.zeros(shape=peaks.shape[:2]) heatMat[pos_y, pos_x, change_id] = cur_heat_max_val + eps # change peak position cur_max = np.max(peaks[:, :, change_id]) peaks[:, :, change_id] = np.zeros(shape=peaks.shape[:2]) peaks[pos_y, pos_x, change_id] = cur_max + eps return peaks, heatMat, pafMat COCO_id = CocoPart.RElbow.value peaks, heatMat, pafMat = modify_joint( e.peaks, e.heatMat, e.pafMat, COCO_id, 10, 20) peaks, heatMat, pafMat = modify_joint( peaks, heatMat, pafMat, CocoPart.RWrist.value, 30, 5) peaks, heatMat, pafMat = modify_joint( peaks, heatMat, pafMat, CocoPart.LElbow.value, 70, 20) peaks, heatMat, pafMat = modify_joint( peaks, heatMat, pafMat, CocoPart.LWrist.value, 90, 5) humans_modified = PoseEstimator.estimate_paf(peaks, heatMat, pafMat) part = humans_original[0].body_parts[COCO_id] print(f'ORIG: score: {part.score} x: {part.x} y: {part.y} ') part = humans_modified[0].body_parts[COCO_id] print(f'OCCULED: score: {part.score} x: {part.x} y: {part.y} ') fig = plt.figure(figsize=(50, 25)) a = fig.add_subplot(1, 3, 1) a.set_title('Different peak locations ORIG: RED, MODIFIED: BLUE') peak_orig = e.peaks[:, :, COCO_id] peak_black = peaks[:, :, COCO_id] comb_image = np.amax([peak_orig, peak_black], axis=0) comb_image[comb_image == np.max(peak_orig)] = 1.0 comb_image[comb_image == np.max(peak_black)] = -1.0 a.imshow(comb_image, cmap=plt.cm.seismic) # draw orig a = fig.add_subplot(1, 3, 2) image_result = TfPoseEstimator.draw_humans( image, humans_original, imgcopy=True) a.set_title('Result ORIGNAL') plt.imshow(cv2.cvtColor(image_result, cv2.COLOR_BGR2RGB)) # draw modified a = fig.add_subplot(1, 3, 3) image_result = TfPoseEstimator.draw_humans( image, humans_modified, imgcopy=True) a.set_title('Result MODIFIED') plt.imshow(cv2.cvtColor(image_result, cv2.COLOR_BGR2RGB)) fig = plt.figure(figsize=(20, 10)) ax = fig.add_subplot(1, 2, 1) ax.imshow(e.heatMat[:, :, COCO_id]) ax.set_title('ORIGNAL HEATMAP') ax = fig.add_subplot(1, 2, 2) ax.imshow(heatMat[:, :, COCO_id]) ax.set_title('MODIFIED HEATMAP') def get_UV(mats, change_id): U = [] V = [] affected_pafs = get_affected_pairs(change_id) for (part_id, (peak_a_id, peak_b_id)) in affected_pafs: joint_id = CocoPairsNetwork[part_id] U.append(np.copy(mats[:, :, joint_id[0]]) * (-1.0)) V.append(np.copy(mats[:, :, joint_id[1]])) U = np.apply_along_axis(lambda x: max(x.min(), x.max(), key=abs), 0, U) V = np.apply_along_axis(lambda x: max(x.min(), x.max(), key=abs), 0, V) return U, V fig = plt.figure(figsize=(40, 20)) ax = fig.add_subplot(1, 2, 1) bgimg = cv2.resize( image, (e.pafMat.shape[1], e.pafMat.shape[0]), interpolation=cv2.INTER_AREA) U, V = get_UV(e.pafMat, COCO_id) plot_vector_field(U, V, bgimg, ax, fig) ax = fig.add_subplot(1, 2, 2) U, V = get_UV(pafMat, COCO_id) plot_vector_field(U, V, bgimg, ax, fig)Unit 3 Project Second section : Unsupervised task In this notebook you will be building and training an unsupervised learning model to cluster your data. For this task we will be using another clustering method called "Agglomerative Clustering" For this task : It's a hierarchical clustering method. The main idea is to merge examples until the desired number of clusters is achieved. Steps for this task: 1. Load the already clean dataset 2. Take into consideration that in this task we will not be using the target variable "Category" as we are trying to cluster the data. 3. Cluster numbers should be equal to the number of categories we have 4. We will add those clusters numbers as a feature (column in our dataset) 5. Build your model using the SKlearn AgglomerativeClustering class 6. Cluster your data and that as a column to your dataset 5. Save the new dataset with the additional column as "clustered_HepatitisC.csv" 1. Loading the data 1. Load the clean data 2. Keep only the needed **Columns**import pandas as pd from sklearn.cluster import AgglomerativeClustering # Load the dataset df = pd.read_csv("HepatitisCdata_1.csv") #df.rename(columns={'Unnamed: 0': "ID"}, inplace=True) df.head() #Just to save the "Category" column: df.head() Category = df["Category"] Category #Just to save the "ID" column: df.head() Id = df["ID"] Id # To-Do : We will select only the columns to work with = all columns except the id and the Category df=df.loc[:,["Age","Sex","ALB","ALP","ALT","AST","BIL","CHE","CHOL","CREA","GGT","PROT"]] df.head()2. Build the clusting model and cluster the data 1. Build the model 2. Fit the model to the data 3. Cluster all data points and add that as a column# To-Do: set the number of clusters to be equal to the number of classes ( categories) n_clusters = 5 # define the model model = AgglomerativeClustering(n_clusters=n_clusters) #To-Do : fit model and predict clusters yhat = model.fit_predict(df) # To-Do :Add the cluster as a column to the data frame df['cluster'] = yhat df3. Save the new dataset with the additional column# To-Do save the file , Do not forget to drop the index df.insert(1,"Category",Category) df.insert(0,"ID",Id) df.head() df.to_csv('HepatitisCdata_2.csv',index=False ) df.head()Shueyimport matplotlib.pyplot as plt import numpy as np plt.style.use(['science', 'notebook', 'grid']) #shuey para angulos menores que 30 grados angulo=0.15 #gas sand Vp1=2280 nu1=0.42 rho1=2190 #wet sand Vp2=2217 nu2=0.42 rho2=2150 A=(Vp2*rho2-Vp1*rho1)/(Vp2*rho2+Vp1*rho1) #=Rpp print('El valor de A es igual a = '+str("{:.2e}".format(A))) Vp=(Vp1+Vp2)/2 print('El valor de vp es igual a = '+str("{:.2e}".format(Vp))) deltaVp=Vp2-Vp1 print('El valor de delta vp es igual a = '+str("{:.2e}".format(deltaVp))) rho=(rho1+rho2)/2 print('El valor de rho es igual a = '+str("{:.2e}".format(rho))) deltarho=rho2-rho1 print('El valor de drho es igual a = '+str("{:.2e}".format(deltarho))) D=(deltaVp/Vp)/(deltaVp/Vp+deltarho/rho) print('El valor de D es igual a = '+str("{:.2e}".format(D))) nu=(nu1+nu2)/2 print('El valor de nu es igual a = '+str("{:.2e}".format(nu))) delthanu=nu2-nu1 print('El valor de d nu es igual a = '+str("{:.2e}".format(delthanu))) B=A*(D-2*(1+D)*((1-2*nu)/(1-nu)))+(delthanu/(1-nu)**2) print('El valor de B es igual a = '+str("{:.2e}".format(B))) Rpp=A+B*(np.sin(angulo)**2) print('El valor de Rpp es igual a = '+str("{:.2e}".format(Rpp))) print('El valor de B*sin(angulo) es igual a = '+str("{:.2e}".format(B*(np.sin(angulo)**2))))El valor de B*sin(angulo) es igual a = 1.46e-04Multithreading and Multiprocessing* Published a blog post here: https://medium.com/@bfortuner/python-multithreading-vs-multiprocessing-73072ce5600bfrom concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor import numpy as np import time import matplotlib.pyplot as plt import glob from PIL import Image import random import string %matplotlib inline MULTITHREADING_TITLE="Multithreading" MULTIPROCESSING_TITLE="Multiprocessing" def visualize_runtimes(results, title): start,stop = np.array(results).T plt.barh(range(len(start)),stop-start,left=start) plt.grid(axis='x') plt.ylabel("Tasks") plt.xlabel("Seconds") plt.title(title) return stop[-1]-start[0] def multithreading(func, args, workers): begin_time = time.time() with ThreadPoolExecutor(max_workers=workers) as executor: res = executor.map(func, args, [begin_time for i in range(len(args))]) return list(res) def multiprocessing(func, args, workers): begin_time = time.time() with ProcessPoolExecutor(max_workers=workers) as executor: res = executor.map(func, args, [begin_time for i in range(len(args))]) return list(res)API Callsfrom urllib.request import urlopen def download(url, base): start = time.time() - base try: resp = urlopen(url) except Exception as e: print ('ERROR: %s' % e) stop = time.time() - base return start,stop N = 16 URL = 'http://scholar.princeton.edu/sites/default/files/oversize_pdf_test_0.pdf' urls = [URL for i in range(N)]Serial%timeit -n 1 [download(url, 1) for url in urls]1 loop, best of 3: 6.36 s per loopMultithreadingvisualize_runtimes(multithreading(download, urls, 1), "Single Thread") visualize_runtimes(multithreading(download, urls, 2),MULTITHREADING_TITLE) visualize_runtimes(multithreading(download, urls, 4),MULTITHREADING_TITLE)Multiprocessingvisualize_runtimes(multiprocessing(download, urls, 1), "Single Process") visualize_runtimes(multiprocessing(download, urls, 2), MULTIPROCESSING_TITLE) visualize_runtimes(multiprocessing(download, urls, 4), MULTIPROCESSING_TITLE)IO Heavydef io_heavy(text,base): start = time.time() - base f = open('output.txt', 'wt', encoding='utf-8') f.write(text) f.close() stop = time.time() - base return start,stop N=12 TEXT = ''.join(random.choice(string.ascii_lowercase) for i in range(10**7*5))Serial%timeit -n 1 [io_heavy(TEXT,1) for i in range(N)]1 loop, best of 3: 1.37 s per loopMultithreadingShould see good benefit from thisvisualize_runtimes(multithreading(io_heavy, [TEXT for i in range(N)], 1),"Single Thread") visualize_runtimes(multithreading(io_heavy, [TEXT for i in range(N)], 2),MULTITHREADING_TITLE) visualize_runtimes(multithreading(io_heavy, [TEXT for i in range(N)], 4),MULTITHREADING_TITLE)MultiprocessingShould see good benefit from thisvisualize_runtimes(multiprocessing(io_heavy, [TEXT for i in range(N)], 1),"Single Process") visualize_runtimes(multiprocessing(io_heavy, [TEXT for i in range(N)], 2),MULTIPROCESSING_TITLE) visualize_runtimes(multiprocessing(io_heavy, [TEXT for i in range(N)], 4),MULTIPROCESSING_TITLE)Numpy Addition#Does not use parallel processing by default #But will see speedups if multiprocessing used #Because numpy sidesteps python's GIL def addition(i, base): start = time.time() - base res = a + b stop = time.time() - base return start,stop DIMS = 20000 N = 20 DIMS_ARR = [DIMS for i in range(N)] a = np.random.rand(DIMS,DIMS) b = np.random.rand(DIMS,DIMS)Serial%timeit -n 1 [addition(i, time.time()) for i in range(N)]1 loop, best of 3: 14.9 s per loopMultithreadingSome benefit for numpy addition (operation avoids GIL, but not parallel by default)visualize_runtimes(multithreading(addition, [i for i in range(N)], 1),"Single Thread") visualize_runtimes(multithreading(addition, [i for i in range(N)], 2),MULTITHREADING_TITLE) visualize_runtimes(multithreading(addition, [i for i in range(N)], 4),MULTITHREADING_TITLE)MultiprocessingSome benefit for numpy addition (operation avoids GIL, but not parallel by default)visualize_runtimes(multiprocessing(addition, [i for i in range(N)], 1),"Single Process") visualize_runtimes(multiprocessing(addition, [i for i in range(N)], 1),MULTIPROCESSING_TITLE) visualize_runtimes(multiprocessing(addition, [i for i in range(N)], 1),MULTIPROCESSING_TITLE)Dot Product#Automatic parallel processing built works out of the box #Depending on BLAS impl, MKL (default with anaconda3) does #Should NOT see speedups with multithreading/processing def dot_product(i, base): start = time.time() - base res = np.dot(a,b) stop = time.time() - base return start,stop DIMS = 3000 N = 10 DIMS_ARR = [DIMS for i in range(N)] a = np.random.rand(DIMS,DIMS) b = np.random.rand(DIMS,DIMS)Serial%timeit -n 1 [dot_product(i, time.time()) for i in range(N)]1 loop, best of 3: 2.87 s per loopMultithreadingNo benefit on dot product (since already parallel)visualize_runtimes(multithreading(dot_product, [i for i in range(N)], 1),"Single Thread") visualize_runtimes(multithreading(dot_product, [i for i in range(N)], 2),MULTITHREADING_TITLE) visualize_runtimes(multithreading(dot_product, [i for i in range(N)], 4),MULTITHREADING_TITLE)MultiprocessingNo benefit on dot product (since already parallel)visualize_runtimes(multiprocessing(dot_product, [i for i in range(N)], 1),"Single Process") visualize_runtimes(multiprocessing(dot_product, [i for i in range(N)], 2),MULTIPROCESSING_TITLE) visualize_runtimes(multiprocessing(dot_product, [i for i in range(N)], 4),MULTIPROCESSING_TITLE)CPU Intensivedef cpu_heavy(n,base): start = time.time() - base count = 0 for i in range(n): count += i stop = time.time() - base return start,stop N = 10**7 ITERS = 10Serial%timeit -n 1 [cpu_heavy(N, time.time()) for i in range(ITERS)]1 loop, best of 3: 4.22 s per loopMultithreadingNo benefit on CPU-intensive tasksvisualize_runtimes(multithreading(cpu_heavy, [N for i in range(ITERS)], 1),"Single Thread") visualize_runtimes(multithreading(cpu_heavy, [N for i in range(ITERS)], 2),MULTITHREADING_TITLE) visualize_runtimes(multithreading(cpu_heavy, [N for i in range(ITERS)], 4),MULTITHREADING_TITLE)MultiprocessingShows benefits on CPU-intensive tasksvisualize_runtimes(multiprocessing(cpu_heavy, [N for i in range(ITERS)], 1),"Single Process") visualize_runtimes(multiprocessing(cpu_heavy, [N for i in range(ITERS)], 2),MULTIPROCESSING_TITLE) visualize_runtimes(multiprocessing(cpu_heavy, [N for i in range(ITERS)], 4),MULTIPROCESSING_TITLE)Resize Images* https://github.com/python-pillow/Pillow/blob/c9f54c98a5dc18685a9bf8c8822f770492a796d6/_imagingtk.c#This one takes IO so multithreading might be better? def resize_img(fpath, base): img = Image.open(fpath) rimg = img.resize((224,224)) img.close() return rimg DATA_PATH='/home/bfortuner/workplace/data/imagenet_sample/' fnames = list(glob.iglob(DATA_PATH+'*/*.JPEG')) N = 5000Serial%timeit -n 1 [resize_img(f,1) for f in fnames[:N]]1 loop, best of 3: 14.1 s per loopMultithreading%timeit -n 1 multithreading(resize_img, fnames[:N], 2) %timeit -n 1 multithreading(resize_img, fnames[:N], 4) %timeit -n 1 multithreading(resize_img, fnames[:N], 8)1 loop, best of 3: 3.26 s per loopMultiprocessing%timeit -n 1 multiprocessing(resize_img, fnames[:N], 2) %timeit -n 1 multiprocessing(resize_img, fnames[:N], 4) %timeit -n 1 multiprocessing(resize_img, fnames[:N], 8)1 loop, best of 3: 4.21 s per loop`Introduction `_ ||`Tensors `_ ||**Autograd** ||`Building Models `_ ||`TensorBoard Support `_ ||`Training Models `_ ||`Model Understanding `_The Fundamentals of Autograd============================Follow along with the video below or on `youtube `__... raw:: html PyTorch’s *Autograd* feature is part of what make PyTorch flexible andfast for building machine learning projects. It allows for the rapid andeasy computation of multiple partial derivatives (also referred to as*gradients)* over a complex computation. This operation is central tobackpropagation-based neural network learning.The power of autograd comes from the fact that it traces yourcomputation dynamically *at runtime,* meaning that if your model hasdecision branches, or loops whose lengths are not known until runtime,the computation will still be traced correctly, and you’ll get correctgradients to drive learning. This, combined with the fact that yourmodels are built in Python, offers far more flexibility than frameworksthat rely on static analysis of a more rigidly-structured model forcomputing gradients.What Do We Need Autograd For?----------------------------- A machine learning model is a *function*, with inputs and outputs. Forthis discussion, we’ll treat the inputs a as an *i*-dimensional vector$\vec{x}$, with elements $x_{i}$. We can then express themodel, *M*, as a vector-valued function of the input: $\vec{y} =\vec{M}(\vec{x})$. (We treat the value of M’s output asa vector because in general, a model may have any number of outputs.)Since we’ll mostly be discussing autograd in the context of training,our output of interest will be the model’s loss. The *loss function*L($\vec{y}$) = L($\vec{M}$\ ($\vec{x}$)) is asingle-valued scalar function of the model’s output. This functionexpresses how far off our model’s prediction was from a particularinput’s *ideal* output. *Note: After this point, we will often omit thevector sign where it should be contextually clear - e.g.,* $y$instead of $\vec y$.In training a model, we want to minimize the loss. In the idealized caseof a perfect model, that means adjusting its learning weights - that is,the adjustable parameters of the function - such that loss is zero forall inputs. In the real world, it means an iterative process of nudgingthe learning weights until we see that we get a tolerable loss for awide variety of inputs.How do we decide how far and in which direction to nudge the weights? Wewant to *minimize* the loss, which means making its first derivativewith respect to the input equal to 0:$\frac{\partial L}{\partial x} = 0$.Recall, though, that the loss is not *directly* derived from the input,but a function of the model’s output (which is a function of the inputdirectly), $\frac{\partial L}{\partial x}$ =$\frac{\partial {L({\vec y})}}{\partial x}$. By the chain rule ofdifferential calculus, we have$\frac{\partial {L({\vec y})}}{\partial x}$ =$\frac{\partial L}{\partial y}\frac{\partial y}{\partial x}$ =$\frac{\partial L}{\partial y}\frac{\partial M(x)}{\partial x}$.$\frac{\partial M(x)}{\partial x}$ is where things get complex.The partial derivatives of the model’s outputs with respect to itsinputs, if we were to expand the expression using the chain rule again,would involve many local partial derivatives over every multipliedlearning weight, every activation function, and every other mathematicaltransformation in the model. The full expression for each such partialderivative is the sum of the products of the local gradient of *everypossible path* through the computation graph that ends with the variablewhose gradient we are trying to measure.In particular, the gradients over the learning weights are of interestto us - they tell us *what direction to change each weight* to get theloss function closer to zero.Since the number of such local derivatives (each corresponding to aseparate path through the model’s computation graph) will tend to go upexponentially with the depth of a neural network, so does the complexityin computing them. This is where autograd comes in: It tracks thehistory of every computation. Every computed tensor in your PyTorchmodel carries a history of its input tensors and the function used tocreate it. Combined with the fact that PyTorch functions meant to act ontensors each have a built-in implementation for computing their ownderivatives, this greatly speeds the computation of the localderivatives needed for learning.A Simple Example----------------That was a lot of theory - but what does it look like to use autograd inpractice?Let’s start with a straightforward example. First, we’ll do some importsto let us graph our results:# %matplotlib inline import torch import matplotlib.pyplot as plt import matplotlib.ticker as ticker import mathNext, we’ll create an input tensor full of evenly spaced values on theinterval $[0, 2{\pi}]$, and specify ``requires_grad=True``. (Likemost functions that create tensors, ``torch.linspace()`` accepts anoptional ``requires_grad`` option.) Setting this flag means that inevery computation that follows, autograd will be accumulating thehistory of the computation in the output tensors of that computation.a = torch.linspace(0., 2. * math.pi, steps=25, requires_grad=True) print(a)Next, we’ll perform a computation, and plot its output in terms of itsinputs:b = torch.sin(a) plt.plot(a.detach(), b.detach())Let’s have a closer look at the tensor ``b``. When we print it, we seean indicator that it is tracking its computation history:print(b)This ``grad_fn`` gives us a hint that when we execute thebackpropagation step and compute gradients, we’ll need to compute thederivative of $sin(x)$ for all this tensor’s inputs.Let’s perform some more computations:c = 2 * b print(c) d = c + 1 print(d)Finally, let’s compute a single-element output. When you call``.backward()`` on a tensor with no arguments, it expects the callingtensor to contain only a single element, as is the case when computing aloss function.out = d.sum() print(out)Each ``grad_fn`` stored with our tensors allows you to walk thecomputation all the way back to its inputs with its ``next_functions``property. We can see below that drilling down on this property on ``d``shows us the gradient functions for all the prior tensors. Note that``a.grad_fn`` is reported as ``None``, indicating that this was an inputto the function with no history of its own.print('d:') print(d.grad_fn) print(d.grad_fn.next_functions) print(d.grad_fn.next_functions[0][0].next_functions) print(d.grad_fn.next_functions[0][0].next_functions[0][0].next_functions) print(d.grad_fn.next_functions[0][0].next_functions[0][0].next_functions[0][0].next_functions) print('\nc:') print(c.grad_fn) print('\nb:') print(b.grad_fn) print('\na:') print(a.grad_fn)With all this machinery in place, how do we get derivatives out? Youcall the ``backward()`` method on the output, and check the input’s``grad`` property to inspect the gradients:out.backward() print(a.grad) plt.plot(a.detach(), a.grad.detach())Recall the computation steps we took to get here::: a = torch.linspace(0., 2. * math.pi, steps=25, requires_grad=True) b = torch.sin(a) c = 2 * b d = c + 1 out = d.sum()Adding a constant, as we did to compute ``d``, does not change thederivative. That leaves $c = 2 * b = 2 * sin(a)$, the derivativeof which should be $2 * cos(a)$. Looking at the graph above,that’s just what we see.Be aware than only *leaf nodes* of the computation have their gradientscomputed. If you tried, for example, ``print(c.grad)`` you’d get back``None``. In this simple example, only the input is a leaf node, so onlyit has gradients computed.Autograd in Training--------------------We’ve had a brief look at how autograd works, but how does it look whenit’s used for its intended purpose? Let’s define a small model andexamine how it changes after a single training batch. First, define afew constants, our model, and some stand-ins for inputs and outputs:BATCH_SIZE = 16 DIM_IN = 1000 HIDDEN_SIZE = 100 DIM_OUT = 10 class TinyModel(torch.nn.Module): def __init__(self): super(TinyModel, self).__init__() self.layer1 = torch.nn.Linear(1000, 100) self.relu = torch.nn.ReLU() self.layer2 = torch.nn.Linear(100, 10) def forward(self, x): x = self.layer1(x) x = self.relu(x) x = self.layer2(x) return x some_input = torch.randn(BATCH_SIZE, DIM_IN, requires_grad=False) ideal_output = torch.randn(BATCH_SIZE, DIM_OUT, requires_grad=False) model = TinyModel()One thing you might notice is that we never specify``requires_grad=True`` for the model’s layers. Within a subclass of``torch.nn.Module``, it’s assumed that we want to track gradients on thelayers’ weights for learning.If we look at the layers of the model, we can examine the values of theweights, and verify that no gradients have been computed yet:print(model.layer2.weight[0][0:10]) # just a small slice print(model.layer2.weight.grad)Let’s see how this changes when we run through one training batch. For aloss function, we’ll just use the square of the Euclidean distancebetween our ``prediction`` and the ``ideal_output``, and we’ll use abasic stochastic gradient descent optimizer.optimizer = torch.optim.SGD(model.parameters(), lr=0.001) prediction = model(some_input) loss = (ideal_output - prediction).pow(2).sum() print(loss)Now, let’s call ``loss.backward()`` and see what happens:loss.backward() print(model.layer2.weight[0][0:10]) print(model.layer2.weight.grad[0][0:10])We can see that the gradients have been computed for each learningweight, but the weights remain unchanged, because we haven’t run theoptimizer yet. The optimizer is responsible for updating model weightsbased on the computed gradients.optimizer.step() print(model.layer2.weight[0][0:10]) print(model.layer2.weight.grad[0][0:10])You should see that ``layer2``\ ’s weights have changed.One important thing about the process: After calling``optimizer.step()``, you need to call ``optimizer.zero_grad()``, orelse every time you run ``loss.backward()``, the gradients on thelearning weights will accumulate:print(model.layer2.weight.grad[0][0:10]) for i in range(0, 5): prediction = model(some_input) loss = (ideal_output - prediction).pow(2).sum() loss.backward() print(model.layer2.weight.grad[0][0:10]) optimizer.zero_grad() print(model.layer2.weight.grad[0][0:10])After running the cell above, you should see that after running``loss.backward()`` multiple times, the magnitudes of most of thegradients will be much larger. Failing to zero the gradients beforerunning your next training batch will cause the gradients to blow up inthis manner, causing incorrect and unpredictable learning results.Turning Autograd Off and On---------------------------There are situations where you will need fine-grained control overwhether autograd is enabled. There are multiple ways to do this,depending on the situation.The simplest is to change the ``requires_grad`` flag on a tensordirectly:a = torch.ones(2, 3, requires_grad=True) print(a) b1 = 2 * a print(b1) a.requires_grad = False b2 = 2 * a print(b2)In the cell above, we see that ``b1`` has a ``grad_fn`` (i.e., a tracedcomputation history), which is what we expect, since it was derived froma tensor, ``a``, that had autograd turned on. When we turn off autogradexplicitly with ``a.requires_grad = False``, computation history is nolonger tracked, as we see when we compute ``b2``.If you only need autograd turned off temporarily, a better way is to usethe ``torch.no_grad()``:a = torch.ones(2, 3, requires_grad=True) * 2 b = torch.ones(2, 3, requires_grad=True) * 3 c1 = a + b print(c1) with torch.no_grad(): c2 = a + b print(c2) c3 = a * b print(c3)``torch.no_grad()`` can also be used as a function or method dectorator:def add_tensors1(x, y): return x + y @torch.no_grad() def add_tensors2(x, y): return x + y a = torch.ones(2, 3, requires_grad=True) * 2 b = torch.ones(2, 3, requires_grad=True) * 3 c1 = add_tensors1(a, b) print(c1) c2 = add_tensors2(a, b) print(c2)There’s a corresponding context manager, ``torch.enable_grad()``, forturning autograd on when it isn’t already. It may also be used as adecorator.Finally, you may have a tensor that requires gradient tracking, but youwant a copy that does not. For this we have the ``Tensor`` object’s``detach()`` method - it creates a copy of the tensor that is *detached*from the computation history:x = torch.rand(5, requires_grad=True) y = x.detach() print(x) print(y)We did this above when we wanted to graph some of our tensors. This isbecause ``matplotlib`` expects a NumPy array as input, and the implicitconversion from a PyTorch tensor to a NumPy array is not enabled fortensors with requires_grad=True. Making a detached copy lets us moveforward.Autograd and In-place Operations~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~In every example in this notebook so far, we’ve used variables tocapture the intermediate values of a computation. Autograd needs theseintermediate values to perform gradient computations. *For this reason,you must be careful about using in-place operations when usingautograd.* Doing so can destroy information you need to computederivatives in the ``backward()`` call. PyTorch will even stop you ifyou attempt an in-place operation on leaf variable that requiresautograd, as shown below.NoteThe following code cell throws a runtime error. This is expected.:: a = torch.linspace(0., 2. * math.pi, steps=25, requires_grad=True) torch.sin_(a) Autograd Profiler-----------------Autograd tracks every step of your computation in detail. Such acomputation history, combined with timing information, would make ahandy profiler - and autograd has that feature baked in. Here’s a quickexample usage:device = torch.device('cpu') run_on_gpu = False if torch.cuda.is_available(): device = torch.device('cuda') run_on_gpu = True x = torch.randn(2, 3, requires_grad=True) y = torch.rand(2, 3, requires_grad=True) z = torch.ones(2, 3, requires_grad=True) with torch.autograd.profiler.profile(use_cuda=run_on_gpu) as prf: for _ in range(1000): z = (z / x) * y print(prf.key_averages().table(sort_by='self_cpu_time_total'))The profiler can also label individual sub-blocks of code, break out thedata by input tensor shape, and export data as a Chrome tracing toolsfile. For full details of the API, see the`documentation `__.Advanced Topic: More Autograd Detail and the High-Level API-----------------------------------------------------------If you have a function with an n-dimensional input and m-dimensionaloutput, $\vec{y}=f(\vec{x})$, the complete gradient is a matrix ofthe derivative of every output with respect to every input, called the*Jacobian:*\begin{align}J = \left(\begin{array}{ccc} \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{1}}{\partial x_{n}}\\ \vdots & \ddots & \vdots\\ \frac{\partial y_{m}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} \end{array}\right)\end{align}If you have a second function, $l=g\left(\vec{y}\right)$ thattakes m-dimensional input (that is, the same dimensionality as theoutput above), and returns a scalar output, you can express itsgradients with respect to $\vec{y}$ as a column vector,$v=\left(\begin{array}{ccc}\frac{\partial l}{\partial y_{1}} & \cdots & \frac{\partial l}{\partial y_{m}}\end{array}\right)^{T}$- which is really just a one-column Jacobian.More concretely, imagine the first function as your PyTorch model (withpotentially many inputs and many outputs) and the second function as aloss function (with the model’s output as input, and the loss value asthe scalar output).If we multiply the first function’s Jacobian by the gradient of thesecond function, and apply the chain rule, we get:\begin{align}J^{T}\cdot v=\left(\begin{array}{ccc} \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{1}}\\ \vdots & \ddots & \vdots\\ \frac{\partial y_{1}}{\partial x_{n}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} \end{array}\right)\left(\begin{array}{c} \frac{\partial l}{\partial y_{1}}\\ \vdots\\ \frac{\partial l}{\partial y_{m}} \end{array}\right)=\left(\begin{array}{c} \frac{\partial l}{\partial x_{1}}\\ \vdots\\ \frac{\partial l}{\partial x_{n}} \end{array}\right)\end{align}Note: You could also use the equivalent operation $v^{T}\cdot J$,and get back a row vector.The resulting column vector is the *gradient of the second function withrespect to the inputs of the first* - or in the case of our model andloss function, the gradient of the loss with respect to the modelinputs.**``torch.autograd`` is an engine for computing these products.** Thisis how we accumulate the gradients over the learning weights during thebackward pass.For this reason, the ``backward()`` call can *also* take an optionalvector input. This vector represents a set of gradients over the tensor,which are multiplied by the Jacobian of the autograd-traced tensor thatprecedes it. Let’s try a specific example with a small vector:x = torch.randn(3, requires_grad=True) y = x * 2 while y.data.norm() < 1000: y = y * 2 print(y)If we tried to call ``y.backward()`` now, we’d get a runtime error and amessage that gradients can only be *implicitly* computed for scalaroutputs. For a multi-dimensional output, autograd expects us to providegradients for those three outputs that it can multiply into theJacobian:v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float) # stand-in for gradients y.backward(v) print(x.grad)(Note that the output gradients are all related to powers of two - whichwe’d expect from a repeated doubling operation.)The High-Level API~~~~~~~~~~~~~~~~~~There is an API on autograd that gives you direct access to importantdifferential matrix and vector operations. In particular, it allows youto calculate the Jacobian and the *Hessian* matrices of a particularfunction for particular inputs. (The Hessian is like the Jacobian, butexpresses all partial *second* derivatives.) It also provides methodsfor taking vector products with these matrices.Let’s take the Jacobian of a simple function, evaluated for a 2single-element inputs:def exp_adder(x, y): return 2 * x.exp() + 3 * y inputs = (torch.rand(1), torch.rand(1)) # arguments for the function print(inputs) torch.autograd.functional.jacobian(exp_adder, inputs)If you look closely, the first output should equal $2e^x$ (sincethe derivative of $e^x$ is $e^x$), and the second valueshould be 3.You can, of course, do this with higher-order tensors:inputs = (torch.rand(3), torch.rand(3)) # arguments for the function print(inputs) torch.autograd.functional.jacobian(exp_adder, inputs)The ``torch.autograd.functional.hessian()`` method works identically(assuming your function is twice differentiable), but returns a matrixof all second derivatives.There is also a function to directly compute the vector-Jacobianproduct, if you provide the vector:def do_some_doubling(x): y = x * 2 while y.data.norm() < 1000: y = y * 2 return y inputs = torch.randn(3) my_gradients = torch.tensor([0.1, 1.0, 0.0001]) torch.autograd.functional.vjp(do_some_doubling, inputs, v=my_gradients)Batch Process Historical Daily & Hourly Stock Dataimport pandas as pd import pymysql from yitian.datasource import * # Set up cloud sql connections password = '' database = EQUITY connection = pymysql.connect(host=PRIVATE_HOST, user=USER, password=password, db=database) # Grab tickers from dim tables nasdaq_pd = pd.read_sql("SELECT * FROM equity.nasdaq", connection) nasdaq_tickers = nasdaq_pd.symbol nyse_pd = pd.read_sql("SELECT * FROM equity.nyse", connection) nyse_tickers = nyse_pd.symbol # updates ticker in 'tickers' tickers = nasdaq_tickersUPDATING - ETL daily & hourly stock data into GCP Cloud Storage and mySQL# ETL current year daily & hourly stock data current_year = 2020 mode = 'update' for t in tickers: ticker = t print(ticker) with connection.cursor() as cursor: cursor.execute(f" DELETE FROM stock_daily WHERE ticker='{ticker}' AND year={str(current_year)};") cursor.execute(f" DELETE FROM stock_hourly WHERE ticker='{ticker}' AND year={str(current_year)};") connection.commit() %run -i 'notebooks/etl/yfinance/stock_etl.py' # Close connection connection.close()HISTORICAL - ETL daily & hourly stock data into Cloud Storage and mySQL# ETL historical daily & hourly stock data before current_year current_year = 2020 mode = 'history' for t in tickers: ticker = t print(ticker) %run -i 'notebooks/etl/yfinance/stock_etl.py'Machine Learning Laboratory (410302)BE Sem I Honors in AI/MLAcademic Year: 2021-22Lab Assignment No. 7 Name: Roll Number: 41301Branch: Department of Computer Engineering Problem Statement:Write a program to solve a problem using Decision tree or Random forest algorithm Lab Exercise 1Use famous iris flower dataset from sklearn.datasets to predict flower species using random forest classifier.1. Measure prediction score using default n_estimators (10)2. Now fine tune your model by changing number of trees in your classifier and tell me what best score you can get using how many treesfrom sklearn import datasets iris = datasets.load_iris() print(iris.target_names) print(iris.feature_names) X, y = datasets.load_iris( return_X_y = True) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.70) from sklearn.ensemble import RandomForestClassifier import pandas as pd data = pd.DataFrame({'sepallength': iris.data[:, 0], 'sepalwidth': iris.data[:, 1], 'petallength': iris.data[:, 2], 'petalwidth': iris.data[:, 3], 'species': iris.target}) print(data.head()) clf = RandomForestClassifier(n_estimators = 10) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) from sklearn import metrics print("ACCURACY OF THE MODEL: ", metrics.accuracy_score(y_test, y_pred)*100,"%") from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators = 100) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print("ACCURACY OF THE MODEL: ", metrics.accuracy_score(y_test, y_pred)*100, "%") from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators = 1000) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print("ACCURACY OF THE MODEL: ", metrics.accuracy_score(y_test, y_pred)*100, "%") from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators = 5) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print("ACCURACY OF THE MODEL: ", metrics.accuracy_score(y_test, y_pred)*100, "%")ACCURACY OF THE MODEL: 97.14285714285714 %ConclusionComparing the classifiers by their result, I found the best model with 5 trees giving an accuracy of 97.14285714285714 %. Lab Exercise 2 Build decision tree model to predict survival of titanic based on certain parameters. .csv file is available to download at Kaggle or is also with lab assignment. In this file using following columns build a model to predict if person would survive or not,1. Pclass 2. Sex 3. Age 4. FareCalculate score of your modelimport pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv('titanic.csv') df.head() df.isnull()Exploratory Data Analysis Missing DataUse seaborn to create a simple heatmap to see where we are missing data!sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')Roughly 20 percent of the Age data is missing. The proportion of Age missing is likely small enough for reasonable replacement with some form of imputation. Looking at the Cabin column, it looks like we are just missing too much of that data to do something useful with at a basic level.sns.set_style('whitegrid') sns.countplot(x='Survived', data=df, palette='RdBu_r') sns.set_style('whitegrid') sns.countplot(x='Survived', hue='Sex', data=df, palette='RdBu_r') sns.set_style('whitegrid') sns.countplot(x='Survived', hue='Pclass', data=df, palette='rainbow') sns.distplot(df['Age'].dropna(), kde=False, color='darkred', bins=30) df['Age'].hist(bins=30, color='darkred', alpha=0.7) sns.countplot(x='SibSp', data=df) df['Fare'].hist(color='green', bins=40, figsize=(8,4))Data CleaningFill in missing age data instead of dropping the missing age data rows. One way to do this is by filling in the mean age of all the passengers (imputation).However we can check the average age by passenger class. For example:plt.figure(figsize=(12, 7)) sns.boxplot(x='Pclass', y='Age', data=df, palette='winter')Wealthier passengers in the higher classes tend to be older, which makes sense. We'll use these average age values to impute based on Pclass for Age.def impute_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 37 elif Pclass == 2: return 29 else: return 24 else: return Age df['Age'] = df[['Age', 'Pclass']].apply(impute_age, axis=1) sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')Drop the Cabin column and the row in Embarked that is NaN.df.drop('Cabin', axis=1, inplace=True) df.head() df.dropna(inplace=True)Converting Categorical Features Convert categorical features to dummy variables using pandas.df.info() sex = pd.get_dummies(df['Sex'], drop_first=True) embark = pd.get_dummies(df['Embarked'], drop_first=True) df.drop(['Sex', 'Embarked', 'Name', 'Ticket'], axis=1, inplace=True) df = pd.concat([df, sex, embark], axis=1) df.head() df.drop('PassengerId', axis=1, inplace=True) df.head() y = df['Survived'] X = df.iloc[:,1:] X.head() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier() classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) from sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_pred) from sklearn.metrics import accuracy_score accuracy_score(y_test, y_pred)Homework AssignmentYour homework assignment this week is to write a function called 'average'. it takes a list as input and returns the arithmetic average. You should assume the list only contains numbers.It should have a docstring explaining what it does (hint: """text""").Hints:* len(input) will give you the length of the list.* sum(input) will add everything up. * If the list is empty, return the string "EMPTY LIST"** FOR BONUS POINTS ** * You are ONLY allowed to use addition and for-loops (The use of LEN and SUM is not allowed!).The bonus problem is a bit tricky but I believe in you guys. Good luck! Possible Solution (easy)def average(L): """L is a list, we return the average of L (a float)""" if not L: return "EMPTY LIST" return sum(L)/len(L)Possible Solution (hard)def average2(L): """L is a list, we return the average of L (a float)""" if not L: return "EMPTY LIST" total = 0 length = 0 for num in L: total += num length +=1 return total / lengthInitializationimport pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import glob import time from datetime import datetime, timedelta # Plotting initialization import seaborn as sns import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd %matplotlib inline # Initialize style plt.style.use("acm_ieee_latex_pubstyle.txt") SINGLE_COLUMN = SINGLE_WIDTH = 8.0 DOUBLE_COLUMN = DOUBLE_WIDTH = 16.0 def width_height(width=SINGLE_COLUMN, height=None, columns=1, rows=1): """Width driven sizing for seaborn multi-grid layouts Derive height & aspect kwargs for facetgrid, pairgrid & friends with a target sizing in terms of width and height. Convenient for creating figures that has to be fit into a certain width, while maintaining the correct aspect ratios of the fonts. Call this function to derive the height & aspect Currently, ACM & IEEE latex, single column figure defaults to a width of 8inch and the double column figure defaults to a width of 8 inch for the correct text scaling Usage: - sns.FacetGrid( ... , **width_height()) - sns.PairGrid( ..., **width_height(columns=X, rows=Y)) :param width: target width in inches (4 inch for a single column figure is default) :param height: target heigth in inches (width * 3/4 - golden ratio is the default) :param columns: target # of columns for facetgrid :param rows: target # of rows for facetgrid """ if height is None: height = width * 3/4 ratio = float(width) / (float(height) / float(rows)) return {"height": (float(width) / ratio), "aspect": ratio / float(columns)} # Dask setup block import os import pwd import glob import pandas as pd from distributed import LocalCluster, Client import dask import dask.dataframe as dd # Cleanup try: client.shutdown() client.close() except Exception as e: pass # Configuration LOCALDIR = "/tmp/dask" dask.config.set({'worker.memory': {'target': False, 'spill': False, 'pause': 0.8, 'terminate': 0.95}}) # Cluster creation cluster = LocalCluster(processes=True, n_workers=2, threads_per_worker=15, dashboard_address=":8787", local_directory=LOCALDIR, memory_limit="16GB") client = Client(cluster) cluster client/opt/conda/lib/python3.8/site-packages/distributed/node.py:151: UserWarning: Port 8787 is already in use. Perhaps you already have a cluster running? Hosting the HTTP server on port 46779 instead warnings.warn(Dataset Selection and Test Read# Component temperature & CEP data TEMP_PATH = "/gpfs/alpine/stf218/proj-shared/data/lake/summit_thermal_cluster_comptype_cep" # Power data POWER_PATH = "/gpfs/alpine/stf218/proj-shared/data/lake/summit_power_temp_openbmc/power_ts_10s_2" # Valid dates for temperature data # - 20200724 ~ 20201124 sdate = pd.to_datetime("2020-07-24") edate = pd.to_datetime("2020-09-30") #edate = pd.to_datetime("2020-08-30") date_keys_with_temp = [dt.date().strftime("%Y%m%d") for dt in list(pd.date_range(sdate, edate - timedelta(days=1),freq='d'))] # Work to do date_keys_with_temp = [ work for work in date_keys_with_temp if os.access(f"{TEMP_PATH}/{work}.csv", os.F_OK) and os.access(f"{POWER_PATH}/{work}.csv", os.F_OK) ] # Power only data # - 20200101 ~ 20201124 sdate = pd.to_datetime("2020-01-01") edate = pd.to_datetime("2020-12-31") date_keys_only_power = [dt.date().strftime("%Y%m%d") for dt in list(pd.date_range(sdate, edate - timedelta(days=1),freq='d'))] # Work to do date_keys_only_power = [ work for work in date_keys_only_power if os.access(f"{TEMP_PATH}/{work}.csv", os.F_OK) and os.access(f"{POWER_PATH}/{work}.csv", os.F_OK) ] TIMESTAMP = "timestamp" # Power consumption VALID_COUNT = "count_inp" NODE_COUNT = "size_inp" POWER = "sum_inp" MAX_POWER = "max_inp" MIN_POWER = "min_inp" STD_POWER = "std_inp" # Component temperature GPU_CORE_MEAN = "gpu_core.mean" GPU_CORE_MAX = "gpu_core.max" GPU_CORE_MIN = "gpu_core.min" CPU_CORE_MEAN = "cpu_core.mean" CPU_CORE_MAX = "cpu_core.max" CPU_CORE_MIN = "cpu_core.min" # Secondary loop SUPPLY_TEMP = "mtwst" RETURN_TEMP = "mtwrt" SUPPLY_FLOW = "mtwflw" # Efficiency PUE = "k100_pue" KW_PER_TON = "cep_kw_per_ton" MTW_TON = "cep_mtw_tons" CT_TON = "cep_cooling_tower_tons" CHW_TON = "cep_chilled_water_tons" WETBULB_TEMP = "cep_outside_air_wet_bulb_temp" # Primary loop flow CT_FLOWRATE = "ct_water_flowrate" CHW_FLOWRATE = "chw_flowrate" MAKEUP_FLOW = "cep_make_up_flow" # List CLUSTER_POWER = [VALID_COUNT, NODE_COUNT, POWER, MAX_POWER, MIN_POWER, STD_POWER] COMPONENTS = [GPU_CORE_MEAN, GPU_CORE_MAX, CPU_CORE_MEAN, CPU_CORE_MAX] SECONDARY_LOOP = [SUPPLY_TEMP, RETURN_TEMP, SUPPLY_FLOW] PRIMARY_LOOP = [CT_FLOWRATE, CHW_FLOWRATE, MAKEUP_FLOW] EFFICIENCY = [PUE, KW_PER_TON, MTW_TON, CT_TON, CHW_TON] WEATHER = [WETBULB_TEMP,]Test Data Power datadf = pd.read_csv(POWER_PATH + f"/{date_keys_with_temp[0]}.csv") df.head() df.columnsTemperature datadf = pd.read_csv(TEMP_PATH + f"/{date_keys_with_temp[0]}.csv") df.head() df.columnsDetect movements and carve out segments from both power & tempPrepare a large table that has many short segments* Select only the dates that have temperature data as we lack temperature sensor data for certain dates* For each dates, join power & temp data, sort - Find rows where the power delta is larger than the specified criteria and store the indexes - For each indexes, lookup the original time series, carve out the rows around the center row - When carving out: - filter out starting points based on the specified criteria - we should bin them with 1MW intervals - filter out segments that has too many missing nodes - translate the indicies into offsets that starts from 0 - The timestamp of the center row should be in a category - Also, categorize based on the specified criteria%%time import functools import operator # Number of timesteps to take before and after SEGMENT_FRONT =12 SEGMENT_REAR = 64 PERIODS = 4 def handle_day(date_key, direction="rise"): """Process the day""" BAND_WATTS = (1000 * 1000) thresholds = [ {"class": 13, "start": 13000000, "end": 14000000}, {"class": 12, "start": 12000000, "end": 13000000}, {"class": 11, "start": 11000000, "end": 12000000}, {"class": 10, "start": 10000000, "end": 11000000}, {"class": 9, "start": 9000000, "end": 10000000}, {"class": 8, "start": 8000000, "end": 9000000}, {"class": 7, "start": 7000000, "end": 8000000}, {"class": 6, "start": 6000000, "end": 7000000}, {"class": 5, "start": 5000000, "end": 6000000}, {"class": 4, "start": 4000000, "end": 5000000}, {"class": 3, "start": 3000000, "end": 4000000}, {"class": 2, "start": 2000000, "end": 3000000}, {"class": 1, "start": 1000000, "end": 2000000}, ] # Filenames temp_file = f"{TEMP_PATH}/{date_key}.csv" power_file = f"{POWER_PATH}/{date_key}.csv" # Join file power_df = pd.read_csv(power_file, usecols=["timestamp"]+ CLUSTER_POWER).sort_index() temp_df = pd.read_csv(temp_file).sort_index() all_df = pd.merge(power_df, temp_df, on="timestamp") max_index = all_df['timestamp'].count() # Get diff diff_df = all_df[POWER].diff(periods=PERIODS) # Inner carving function def carve_edge(row, threshold): """Segment carving""" # Carve out the segment index = int(row["index"]) diff = row[POWER] # Filter incomplete segments if index < SEGMENT_FRONT or index >= max_index - SEGMENT_REAR: return [] segment = all_df.iloc[index - SEGMENT_FRONT:index + SEGMENT_REAR].reset_index(drop=True) start_inp_class_mw = int(segment.iloc[SEGMENT_FRONT][POWER] / BAND_WATTS) # Filter segments with insufficient data ts = segment[segment[NODE_COUNT] < (4626 - 200)]['timestamp'].count() if ts > 0: return [] # Categorize the segment segment['edge_index'] = index segment['segment_timestamp'] = segment['timestamp'] segment['start_class_mw'] = start_inp_class_mw segment['amplitude'] = diff segment['amplitude_class_mw'] = threshold['class'] return segment # Iterate through the thrshold spec def carve_threshold(threshold): """Carve out a threshold based segment set""" # setup mask based on rise / fall assert(direction in ['rise', 'fall']) mask = ((diff_df >= threshold['start']) & (diff_df < threshold['end'])) if direction != "rise": mask = ((- diff_df) >= threshold['start']) & ((- diff_df) < threshold['end']) # Apply mask and filter out errors edge_indexes = diff_df[mask].reset_index() # Apply segment carving and filter out empty batches segment_lst = [ seg for seg in list(edge_indexes.apply(carve_edge, args=(threshold,), axis=1)) if type(seg) is pd.DataFrame ] return segment_lst # Iterate through thresholds segment_list = [carve_threshold(thr_spec) for thr_spec in thresholds] segment_list = [seg for seg in segment_list if len(seg) > 0] if not segment_list: return [] # Done with the segments, concatenate them and exit flat_list = functools.reduce(operator.iconcat, segment_list, []) return flat_list def filter_duplicates(flat, direction="rise"): """Filter out duplicates""" segments = sorted(flat, key=lambda x: -x.iloc[0]['amplitude_class_mw']) indices = [] ret_flat = [] def compare(x): start = index - SEGMENT_FRONT end = index + SEGMENT_REAR res = (x >= start) and (x < end) return res # Iterate segments and accumulate indices for i, segment in enumerate(segments): index = segment.iloc[0]['edge_index'] existing_indicies = list(filter(compare, indices)) if not existing_indicies: ret_flat.append(segment) indices.append(index) return ret_flat # Distribute def get_segments(date_keys, direction="rise"): """Get segments from the full dataset""" import dask.bag as db segments = [seg for seg in db.from_sequence(date_keys).map(handle_day, direction=direction).compute() if seg] item_count = [len(seg) for seg in segments] print(f"Segments collected: {direction} {sum(item_count)}, {item_count}") flat = functools.reduce(operator.iconcat, segments, []) filtered = filter_duplicates(flat, direction=direction) print(f"Segments survived: {direction} {len(filtered)}") return pd.concat(filtered) def get_segments_slow(date_keys, direction="rise"): segments = [handle_day(date_key, direction=direction) for date_key in date_keys] item_count = [len(seg) for seg in segments] print(f"Segments collected: {direction} {sum(item_count)}, {item_count}") flat = functools.reduce(operator.iconcat, segments, []) filtered = filter_duplicates(flat, direction=direction) print(f"Segments survived: {direction} {len(filtered)}") return pd.concat(filtered) # Test process a particular day segment_list = handle_day("20200728") %%time # Get all the rise & falling segments rise_df = get_segments(date_keys_with_temp, direction="rise") fall_df = get_segments(date_keys_with_temp, direction="fall") rise_df['type'] = "rise" fall_df['type'] = "fall" rf_df = pd.concat([rise_df, fall_df]) rf_dfSegments collected: rise 942, [22, 260, 9, 31, 18, 10, 2, 3, 25, 8, 13, 29, 50, 24, 10, 33, 27, 76, 3, 2, 7, 12, 25, 6, 28, 2, 1, 2, 8, 24, 13, 12, 5, 4, 14, 1, 7, 104, 3, 5, 4] Segments survived: rise 156 Segments collected: fall 958, [23, 245, 8, 4, 29, 31, 7, 24, 4, 28, 2, 2, 73, 10, 49, 24, 1, 68, 7, 2, 13, 18, 41, 10, 21, 5, 1, 1, 12, 27, 15, 12, 9, 1, 4, 1, 1, 2, 105, 6, 6, 6] Segments survived: fall 152 CPU times: user 3.07 s, sys: 224 ms, total: 3.3 s Wall time: 17.2 sPower Dynamics - Summer Summer power dynamicsThis one will be the substitute for the rise / fall edge power graphs. Should give a more comprehensive view.Will be using a 16 x 4 strip for this one%%time # Get all the rise & falling segments rise_df = get_segments(date_keys_with_temp, direction="rise") fall_df = get_segments(date_keys_with_temp, direction="fall") rise_df['type'] = "rise" fall_df['type'] = "fall" rf_df_summer = pd.concat([rise_df, fall_df]) %%time # initialize rf_df = rf_df_summer plt.close() # Override acm-ieee-latex-pubstyle plt.figure(figsize=(16, 2)) sns.despine(top=False, right=False) # Layout gs = mpl.gridspec.GridSpec(2, 7, height_ratios=[1, 1]) gs.update(hspace=0.1, wspace=0.1) plt.xlabel('Time (minutes)', loc='center') ################################################## # Comparison def plot(rf_df, ax, amp_class, direction, metric, kwargs): mask = ( (rf_df['type'] == direction) & (rf_df['amplitude_class_mw'] == amp_class) ) df = rf_df[mask].reset_index() # TODO: Some massage (melting) sns.lineplot(ax=ax, data=df, x="index", y=metric, sort=True, ci=95, **kwargs) layout = [ # Rise (0, gs[0, 0], 1, '1MW', 'rise', POWER), (1, gs[0, 1], 2, '2MW', 'rise', POWER), (2, gs[0, 2], 3, '3MW', 'rise', POWER), (3, gs[0, 3], 4, '4MW', 'rise', POWER), (4, gs[0, 4], 5, '5MW', 'rise', POWER), (5, gs[0, 5], 6, '6MW', 'rise', POWER), (6, gs[0, 6], 7, '7MW', 'rise', POWER), # Fall (7, gs[1, 0], 1, '1MW', 'rise', PUE), (8, gs[1, 1], 2, '2MW', 'rise', PUE), (9, gs[1, 2], 3, '3MW', 'rise', PUE), (10, gs[1, 3], 4, '4MW', 'rise', PUE), (11, gs[1, 4], 5, '5MW', 'rise', PUE), (12, gs[1, 5], 6, '6MW', 'rise', PUE), (13, gs[1, 6], 7, '7MW', 'rise', PUE), ] for i, loc, amp_class, amp_label, direction, metric in layout: # Subplot item ax = plt.subplot(loc) kwargs = { "linewidth": 1, "legend": False, } # Count number of segments per amplitude class seg_count = rf_df[rf_df['type'] == 'rise'].groupby('amplitude_class_mw')['timestamp'].count() # Location customization if i not in [0, 7]: plt.setp(ax.get_yticklabels(), visible=False) if i in [0, 1, 2, 3, 4, 5, 6]: ax.set_title(f"{amp_label} - {seg_count[amp_class]:,}") plt.setp(ax.get_xticklabels(), visible=False) # Plot plot(rf_df, ax, amp_class, direction, metric, kwargs) # X ticks xtck = [0, 12, 24, 36, 48] # ticks xlab = [-1, 0, 1, 2, 3] # minutes ax.set_xlim([0, SEGMENT_REAR]) ax.set_xticks(xtck) ax.set_xticklabels(xlab) ax.set_xlabel("") if i in [0, 1, 2, 3, 4, 5, 6]: # Y ticks - Power in MW MW = 1000000 ytck = [4 * MW, 6 * MW, 8 * MW, 10 * MW] ytlb = [4, 6, 8, 10] ylim = [2000000, 12000000] ylab = "Power\n(MW)" else: # Y ticks - PUE ytck = [1.0, 1.1, 1.2, 1.3, 1.4] ytlb = [1.0, 1.1, 1.2, 1.3, 1.4] ylim = [1.0, 1.4] ylab = "PUE" ax.set_ylim(ylim) ax.set_yticks(ytck) ax.set_yticklabels(ytlb) ax.set_ylabel(ylab) # Overlay ax.axvline(SEGMENT_FRONT, color='r', linewidth=0.3) fig = plt.gcf() fig.text(0.5, -0.09, 'Time after Rising Edges (mins)', ha='center') plt.subplots_adjust(hspace=.0) plt.savefig("../plots/power_dynamics_per_amp_summer.pdf", format="pdf", bbox_inches='tight', pad_inches=0.01) plt.show()Power Dynamics - Full Year Full year power dynamicsThis one will be the substitute for the rise / fall edge power graphs. Should give a more comprehensive view.Will be using a 16 x 4 strip for this one%%time # Get all the rise & falling segments rise_df = get_segments(date_keys_only_power, direction="rise") fall_df = get_segments(date_keys_only_power, direction="fall") rise_df['type'] = "rise" fall_df['type'] = "fall" rf_df_fullyr = pd.concat([rise_df, fall_df]) %%time # initialize rf_df = rf_df_fullyr plt.close() # Override acm-ieee-latex-pubstyle plt.figure(figsize=(16, 2)) sns.despine(top=False, right=False) # Layout gs = mpl.gridspec.GridSpec(2, 7, height_ratios=[1, 1]) gs.update(hspace=0.1, wspace=0.1) plt.xlabel('Time (minutes)', loc='center') ################################################## # Comparison def plot(rf_df, ax, amp_class, direction, metric, kwargs): mask = ( (rf_df['type'] == direction) & (rf_df['amplitude_class_mw'] == amp_class) ) df = rf_df[mask].reset_index() # TODO: Some massage (melting) sns.lineplot(ax=ax, data=df, x="index", y=metric, sort=True, ci=95, **kwargs) layout = [ # Rise (0, gs[0, 0], 1, '1MW', 'rise', POWER), (1, gs[0, 1], 2, '2MW', 'rise', POWER), (2, gs[0, 2], 3, '3MW', 'rise', POWER), (3, gs[0, 3], 4, '4MW', 'rise', POWER), (4, gs[0, 4], 5, '5MW', 'rise', POWER), (5, gs[0, 5], 6, '6MW', 'rise', POWER), (6, gs[0, 6], 7, '7MW', 'rise', POWER), # Fall (7, gs[1, 0], 1, '1MW', 'rise', PUE), (8, gs[1, 1], 2, '2MW', 'rise', PUE), (9, gs[1, 2], 3, '3MW', 'rise', PUE), (10, gs[1, 3], 4, '4MW', 'rise', PUE), (11, gs[1, 4], 5, '5MW', 'rise', PUE), (12, gs[1, 5], 6, '6MW', 'rise', PUE), (13, gs[1, 6], 7, '7MW', 'rise', PUE), ] for i, loc, amp_class, amp_label, direction, metric in layout: # Subplot item ax = plt.subplot(loc) kwargs = { "linewidth": 1, "legend": False, } # Count number of segments per amplitude class seg_count = rf_df[rf_df['type'] == 'rise'].groupby('amplitude_class_mw')['timestamp'].count() # Location customization if i not in [0, 7]: plt.setp(ax.get_yticklabels(), visible=False) if i in [0, 1, 2, 3, 4, 5, 6]: ax.set_title(f"{amp_label} - {seg_count[amp_class]:,}") plt.setp(ax.get_xticklabels(), visible=False) # Plot plot(rf_df, ax, amp_class, direction, metric, kwargs) # X ticks xtck = [0, 12, 24, 36, 48] # ticks xlab = [-1, 0, 1, 2, 3] # minutes ax.set_xlim([0, SEGMENT_REAR]) ax.set_xticks(xtck) ax.set_xticklabels(xlab) ax.set_xlabel("") if i in [0, 1, 2, 3, 4, 5, 6]: # Y ticks - Power in MW MW = 1000000 ytck = [4 * MW, 6 * MW, 8 * MW, 10 * MW] ytlb = [4, 6, 8, 10] ylim = [2000000, 12000000] ylab = "Power\n(MW)" else: # Y ticks - PUE ytck = [1.0, 1.1, 1.2, 1.3, 1.4] ytlb = [1.0, 1.1, 1.2, 1.3, 1.4] ylim = [1.0, 1.4] ylab = "PUE" ax.set_ylim(ylim) ax.set_yticks(ytck) ax.set_yticklabels(ytlb) ax.set_ylabel(ylab) # Overlay ax.axvline(SEGMENT_FRONT, color='r', linewidth=0.3) fig = plt.gcf() fig.text(0.5, -0.09, 'Time after Rising Edges (mins)', ha='center') plt.subplots_adjust(hspace=.0) plt.savefig("../plots/power_dynamics_per_amp_fullyr.pdf", format="pdf", bbox_inches='tight', pad_inches=0.01) plt.show()**Problem 2 - Even Fibonacci numbers**Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with $1$ and $2$, the first $10$ terms will be:$$1, 2, 3, 5, 8, 13, 21, 34, 55, 89, \dots$$By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.N = 4000000 F[1] = 1 F[2] = 2 sum = F[2] # sum the even-valued k = 2 while F[k] <= N: k+=1 F[k] = F[k-1] + F[k-2] # calculate the k-th Fibonacci number if F[k]%2==0: sum += F[k] print(sum)4613732Initial Imports#Imports import category_encoders as ce import pandas as pd import joblib from joblib import dump, load import json from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score, roc_auc_score, roc_curve, precision_score, recall_score, accuracy_score from xgboost import XGBClassifier # read data previously cleaned and reduced to neccessary columns # these csv files can be found in 'data'files of this github repository df1a = pd.read_csv('data/Clean_Data_1.csv') df1b = pd.read_csv('data/Clean_Data_2.csv') df1 = pd.concat([df1a, df1b], ignore_index=True) df1.reset_index(drop=True, inplace=True) print(df1.shape) df1.head() df1['category'].value_counts() # Assign datatypes for processing purposes df1['blurb'] = df1['blurb'].astype(str) df1['country'] = df1['country'].astype(str) df1['campaign_success'] = df1['campaign_success'].astype(int)Data Splits Train, Validate, Test Splitdf1['launched_at'].describe() # Choose cutoffs based on unix time, 75th & 50th percentile cutoff1 = 1466003000 cutoff2 = 1530120000 test = df1[df1['launched_at']>=cutoff2] train = df1[df1['launched_at']cutoff1] train = train[train['launched_at']<=cutoff1] # Drop launched_at column, not used in model train = train.drop(columns=['launched_at']) val = val.drop(columns=['launched_at']) test = test.drop(columns=['launched_at']) print(train.shape) print(val.shape) print(test.shape) # check that data is balanced train['campaign_success'].value_counts(normalize=True)Target Vector / Features Matrix Split# Target, is the client a defaulter target = 'campaign_success' # Features features = list(train.drop(columns = [target])) # Arrange data into X features matrix and y target vector X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] y_test = test[target]Wrangle Data Create Encoder#Create and train transformations # Encode catagorical features on X_train encoder = ce.OrdinalEncoder(cols=['country', 'category', 'subcategory']) encoder.fit(X_train) print('') def wrangle(X, encoder): # copy to avoid errors X = X.reset_index(drop=True).copy() X = encoder.transform(X) X['blurb_length'] = X['blurb'].apply(lambda x: len(x)) X['blurb_words'] = X['blurb'].apply(lambda x: len(x.split())) X['blurb_uppers'] = X['blurb'].apply(lambda x: sum(map(str.isupper, x.split()))) X['blurb_qmarks'] = X['blurb'].apply(lambda x: x.count("?")) X['blub_exclamation'] = X['blurb'].apply(lambda x: x.count("!")) X = X.drop(columns=['blurb']) return(X) X_train_e = wrangle(X_train, encoder) X_val_e = wrangle(X_val, encoder) X_test_e = wrangle(X_test, encoder) print(X_train_e.shape) print(X_val_e.shape) print(X_test_e.shape)(86636, 10) (45201, 10) (47619, 10)XGBoost Model""" Define Model""" # First XGBoost Model booster= XGBClassifier(n_estimators=100, random_state=42, n_jobs=-1) # XBG Simple xgb_simple= booster.fit(X_train_e, y_train) #fit on train """ Run Model XGBoost Simple """ xs_y_pred_v = booster.predict(X_val_e) m_name = 'XGBoost Simple' y = y_val pred = xs_y_pred_v print('Classification Report:\n\n', classification_report(y, pred)) """ Run Model""" xs_y_pred_test = booster.predict(X_test_e) m_name = 'XGBoost Simple TEST' model = booster.fit(X_train_e, y_train) X = X_test_e y = y_test pred = xs_y_pred_test print('Classification Report:\n\n', classification_report(y, pred))Classification Report: precision recall f1-score support 0 0.65 0.64 0.64 15914 1 0.82 0.83 0.82 31705 accuracy 0.76 47619 macro avg 0.73 0.73 0.73 47619 weighted avg 0.76 0.76 0.76 47619Creating Pickles via JobLib# Pickling encoder created above, no changes required dump(encoder, 'encoder_m.joblib' ) encoder_m = load('encoder_m.joblib') # Wrangle Function to pickle # allows for state of incoming data def wrangler(X, encoder): X = pd.DataFrame.from_dict(X, orient='index') X = X.T X = X.reset_index(drop=True).copy() X.rename(columns={'x1':'goal', 'x2':'campaign_length', 'x3':'country', 'x4':'category', 'x5':'subcategory', 'x6':'blurb'}, inplace=True) X = encoder.transform(X) X = X[['country', 'goal', 'category', 'subcategory', 'campaign_length', 'blurb']] X['blurb_length'] = X['blurb'].apply(lambda x: len(x)) X['blurb_words'] = X['blurb'].apply(lambda x: len(x.split())) X['blurb_uppers'] = X['blurb'].apply(lambda x: sum(map(str.isupper, x.split()))) X['blurb_qmarks'] = X['blurb'].apply(lambda x: x.count("?")) X['blub_exclamation'] = X['blurb'].apply(lambda x: x.count("!")) X = X.drop(columns=['blurb']) X = X.astype(int) return(X) dump(wrangler, 'wrangler_m.joblib' ) wrangler_m = load('wrangler_m.joblib') # XGBoost Model to Pickle def kick_boost(X): prediction = booster.predict_proba(X)[0][1] prediction = round(prediction, 3) return(prediction) dump(kick_boost, 'kick_boost_m.joblib' ) kick_boost_m = load('kick_boost_m.joblib')Testing data through pickled model# Dummy Data test_1 = { "x1": 10000, "x2": 8, "x3": "Canada", "x4": "Science", "x5": "Material Thread Science", "x6": "I am making somthing that will do something awesome!!! Answer the question 'will it be amazing?'" } result = wrangler_m(test_1, encoder_m) prediction = kick_boost_m(result) predictionIntroduction to cuDF You will begin your accelerated data science training with an introduction to [cuDF](https://github.com/rapidsai/cudf), the RAPIDS API that enables you to create and manipulate GPU-accelerated dataframes. cuDF implements a very similar interface to Pandas so that Python data scientists can use it with very little ramp up. Throughout this notebook we will provide Pandas counterparts to the cuDF operations you perform to build your intuition about how much faster cuDF can be, even for seemingly simple operations. Objectives By the time you complete this notebook you will be able to:- Read and write data to and from disk with cuDF- Perform basic data exploration and cleaning operations with cuDF Imports Here we import cuDF and CuPy for GPU-accelerated dataframes and math operations, plus the CPU libraries Pandas and NumPy on which they are based and which we will use for performance comparisons:import cudf import cupy as cp import pandas as pd import numpy as npReading and Writing Data Using [cuDF](https://github.com/rapidsai/cudf), the RAPIDS API providing a GPU-accelerated dataframe, we can read data from [a variety of formats](https://rapidsai.github.io/projects/cudf/en/0.10.0/api.htmlmodule-cudf.io.csv), including csv, json, parquet, feather, orc, and Pandas dataframes, among others.For the first part of this workshop, we will be reading almost 60 million records (corresponding to the entire population of England and Wales) which were synthesized from official UK census data. Here we read this data from a local csv file directly into GPU memory:%time gdf = cudf.read_csv('../data/data_pop.csv') gdf.shape gdf.drop(gdf.columns[0], axis=1, inplace=True) gdf.dtypesHere for comparison we read the same data into a Pandas dataframe:%time df = pd.read_csv('../data/data_pop.csv') df.drop(df.columns[0], axis=1, inplace=True) gdf.shape == df.shapeBecause of the sophisticated GPU memory management behind the scenes in cuDF, the first data load into a fresh RAPIDS memory environment is sometimes substantially slower than subsequent loads. The RAPIDS Memory Manager is preparing additional memory to accommodate the array of data science operations that you may be interested in using on the data, rather than allocating and deallocating the memory repeatedly throughout your workflow.We will be using `gdf` regularly in this workshop to represent a GPU dataframe, as well as `df` for a CPU dataframe when comparing performance. Writing to File cuDF also provides methods for writing data to files. Here we create a new dataframe specifically containing residents of Blackpool county and then write it to `blackpool.csv`, before doing the same with Pandas for comparison. cuDF%time blackpool_residents = gdf.loc[gdf['county'] == 'Blackpool'] print(f'{blackpool_residents.shape[0]} residents') %time blackpool_residents.to_csv('blackpool.csv')Pandas%time blackpool_residents_pd = df.loc[df['county'] == 'Blackpool'] %time blackpool_residents_pd.to_csv('blackpool_pd.csv')Exercise: Initial Data Exploration Now that we have some data loaded, let's do some initial exploration.Use the `head`, `dtypes`, and `columns` methods on `gdf`, as well as the `value_counts` on individual `gdf` columns, to orient yourself to the data. If you're interested, use the `%time` magic command to compare performance against the same operations on the Pandas `df`. You can create additional interactive cells by clicking the `+` button above, or by switching to command mode with `Esc` and using the keyboard shortuts `a` (for new cell above) and `b` (for new cell below).If you fill up the GPU memory at any time, don't forget that you can restart the kernel and rerun the cells up to this point quite quickly.# Begin your initial exploration here. Create more cells as needed.Basic Operations with cuDF Except for being much more performant with large datasets, cuDF looks and feels a lot like Pandas. In this section we highlight a few very simple operations. When performing data operations on cuDF dataframes, column operations are typically much more performant than row-wise operations. Converting Data Types For machine learning later in this workshop, we will sometimes need to convert integer values into floats. Here we convert the `age` column from `int64` to `float32`, comparing performance with Pandas: cuDF%time gdf['age'] = gdf['age'].astype('float32')Pandas%time df['age'] = df['age'].astype('float32')Column-Wise Aggregations Similarly, column-wise aggregations take advantage of the GPU's architecture and RAPIDS' memory format. cuDF%time gdf['age'].mean()Pandas%time df['age'].mean()String Operations Although strings are not a datatype traditionally associated with GPUs, cuDF supports powerful accelerated string operations. cuDF%time gdf['name'] = gdf['name'].str.title() gdf.head()Pandas%time df['name'] = df['name'].str.title() df.head()Data Subsetting with `loc` and `iloc` cuDF also supports the core data subsetting tools `loc` (label-based locator) and `iloc` (integer-based locator). Range Selection Our data's labels happen to be incrementing numbers, though as with Pandas, `loc` will include every value it is passed whereas `iloc` will give the half-open range (omitting the final value).gdf.loc[100:105] gdf.iloc[100:105]`loc` with Boolean Selection We can use `loc` with boolean selections: cuDF# as of version 0.10, the startswith method returns a list, so we convert it back to a Series for efficiency # in a future version, that method and other string methods will return a Series when appropriate %time e_names = gdf.loc[cudf.Series(gdf['name'].str.startswith('E'))] e_names.head()Pandas%time e_names_pd = df.loc[df['name'].str.startswith('E')]Combining with NumPy Methods We can combine cuDF methods with NumPy methods. Here we use `np.logical_and` for elementwise boolean selection. cuDF%time ed_names = gdf.loc[np.logical_and(gdf['name'].str.startswith('E'), gdf['name'].str.endswith('d'))] ed_names.head()For better performance, we can use CuPy instead of NumPy, thereby performing the elementwise boolean `logical_and` operation on GPU.%time ed_names = gdf.loc[cudf.Series(cp.logical_and(cudf.Series(gdf['name'].str.startswith('E')), cudf.Series(gdf['name'].str.endswith('d'))))] ed_names.head()Pandas%time ed_names_pd = df.loc[np.logical_and(df['name'].str.startswith('E'), df['name'].str.endswith('d'))]Semantic analysis SENTIMENTdata <- read.csv('abstracts_scored.csv', stringsAsFactors = FALSE) ### Exclude missing abstracts nrow(data) #2926 sum(data$WordCount==0) #1020 sum(is.na(data$IndexedAbstract)) #1020 table(data$Tag, is.na(data$IndexedAbstract)) # FALSE TRUE # OpenScience 674 204 # Reproducibility 1232 815 data <- filter(data, WordCount>0) #Exclude non-English titles data$Language <- textcat(as.character(data$Title)) table(data$Language) addmargins(table(data$Tag, data$Language=='english')) data <- filter(data, Language=='english') #check for weirdly short abstracts data$nchar <- nchar(data$IndexedAbstract) hist(data$nchar, breaks=seq(0,7300,10)) #spike at 1023 due to truncation of some abstracts hist(data$nchar, xlim=c(0,2000), breaks=seq(0,7300,10)) hist(data$nchar, xlim=c(0,500), breaks=seq(0,7300,10)) #no small weird values #look at boxplots and histograms of sentiment by field pdf('figures/Postivity_FieldFemale.pdf', width=5, height=4) data$femaleLead <- (data$X1st==1) | (data$last==1) ggplot(filter(data, !is.na(femaleLead)), aes(x=femaleLead, y=PositivityQDAP, fill=femaleLead)) + geom_boxplot() + facet_grid(. ~ Tag) + scale_fill_manual(values=c('lightblue','pink')) + guides(fill=FALSE) dev.off() #no noticable gender differences, but sizeable difference between fields (Reproducibility less positive) ### QDAP pdf('figures/SentimentPositivityQDAP_hist.pdf', width=10, height=5) par(mfrow=c(1,2)) #Sentiment tmp <- filter(data, SentimentQDAP >= -0.2) hist(tmp$SentimentQDAP[tmp$Tag=='OpenScience'], freq=FALSE, breaks=seq(-0.2,0.5,0.03), col=rgb(0,1,1,0.5), main='Sentiment', xlab='QDAP Sentiment Score') hist(tmp$SentimentQDAP[tmp$Tag=='Reproducibility'], freq=FALSE, breaks=seq(-0.2,0.5,0.03), col=rgb(1,0,1,0.5), add=TRUE) legend('topright', legend=c('Open Science', 'Reproducibility'), fill=c(rgb(0,1,1,0.5), rgb(1,0,1,0.5))) #Positivity hist(tmp$PositivityQDAP[tmp$Tag=='OpenScience'], freq=FALSE, breaks=seq(-0.2,0.5,0.03), col=rgb(0,1,1,0.5), ylim=c(0,9), main='Positivity', xlab='QDAP Positivity Score') hist(tmp$PositivityQDAP[tmp$Tag=='Reproducibility'], freq=FALSE, breaks=seq(-0.2,0.5,0.03), col=rgb(1,0,1,0.5), add=TRUE) legend('topright', legend=c('Open Science', 'Reproducibility'), fill=c(rgb(0,1,1,0.5), rgb(1,0,1,0.5))) dev.off() ### GI pdf('figures/SentimentPositivityGI_hist.pdf', width=10, height=5) par(mfrow=c(1,2)) #Sentiment tmp <- filter(data, SentimentGI >= -0.2) hist(tmp$SentimentGI[tmp$Tag=='OpenScience'], freq=FALSE, breaks=seq(-0.2,0.6,0.03), col=rgb(0,1,1,0.5), main='Sentiment', xlab='GI Sentiment Score') hist(tmp$SentimentGI[tmp$Tag=='Reproducibility'], freq=FALSE, breaks=seq(-0.2,0.6,0.03), col=rgb(1,0,1,0.5), add=TRUE) legend('topright', legend=c('Open Science', 'Reproducibility'), fill=c(rgb(0,1,1,0.5), rgb(1,0,1,0.5))) #Positivity hist(tmp$PositivityGI[tmp$Tag=='OpenScience'], freq=FALSE, breaks=seq(-0.2,0.6,0.03), col=rgb(0,1,1,0.5), main='Positivity', xlab='GI Positivity Score') hist(tmp$PositivityGI[tmp$Tag=='Reproducibility'], freq=FALSE, breaks=seq(-0.2,0.6,0.03), col=rgb(1,0,1,0.5), add=TRUE) legend('topright', legend=c('Open Science', 'Reproducibility'), fill=c(rgb(0,1,1,0.5), rgb(1,0,1,0.5))) dev.off() ### Team Size pdf('figures/SentimentPositivity_TeamSize.pdf', width=10, height=5) ggplot(data, aes(x=authorCount, y=SentimentQDAP, color=Tag, group=interaction(authorCount,Tag))) + geom_boxplot() + geom_smooth() + xlim(0,10) + theme(legend.position='bottom') ggplot(data, aes(x=authorCount, y=PositivityQDAP, color=Tag, group=interaction(authorCount,Tag))) + geom_boxplot() + geom_smooth() + xlim(0,10) + theme(legend.position='bottom') dev.off() #no noticeable effect of team size within either literature. #differences between the two fields are preserved across different team sizesWarning message: “Removed 100 rows containing missing values (stat_boxplot).”`geom_smooth()` using method = 'loess' and formula 'y ~ x' Warning message: “Removed 100 rows containing non-finite values (stat_smooth).”Warning message: “Removed 1 rows containing missing values (geom_boxplot).”CUSTOM DICTIONARIESdata <- read.csv('output/abstracts_scored_custom.csv', stringsAsFactors = FALSE) #make sure variable names match constructs dictionary <- read.csv('input/Lancet Dictionaries.csv', stringsAsFactors = FALSE) dictionary <- dictionary[,1:2] constructs <- levels(as.factor(dictionary$IndivConstruct)) neaten <- function(x){ x <- gsub(pattern = " ", replacement = "", x) #remove spaces x <- gsub(pattern = "/", replacement = "_", x) #replace slashes x <- gsub(pattern = "-", replacement = "", x) #replace dashes return(x) } constructs2 <- neaten(constructs) ### Exclude missing abstracts table(data$Tag, data$IndexedAbstract=='') #how many excluded/included? # FALSE TRUE # OpenScience 674 205 # Reproducibility 1232 815 data <- filter(data, (IndexedAbstract != '')) ###Exclude non-English titles data$Language <- textcat(as.character(data$Title)) table(data$Language) # afrikaans albanian breton catalan danish dutch english esperanto # 2 1 1 21 9 2 1764 1 # french frisian german indonesian italian latin manx middle_frisian # 6 3 28 2 1 7 2 4 # portuguese romanian rumantsch scots scots_gaelic slovak-ascii spanish swedish # 9 5 4 14 1 2 13 4 table(data$Tag, data$Language!='english') #how many excluded/included? # FALSE TRUE # OpenScience 595 79 # Reproducibility 1169 63 data <- filter(data, Language=='english') nrow(data) #1764 table(data$Tag) # OpenScience Reproducibility # 595 1169Compute composite scores#Exploratory Analysis #dat_dict2 <- filter(dat_dict, Year >= 2010, Year < 2018, authorCount <= 15) pdf('figures/Construct_boxplots.pdf', width=4, height=6) for(c in constructs2){ print(c) data_c <- data[data[,c] > 0,] #only include observations with at least one instance of a construct word names(data_c)[names(data_c)==c] <- 'var' title_c <- paste0(c, ' (n = ',nrow(data_c),')') print(ggplot(data_c, aes(x=Tag, y=var, group=Tag)) + geom_boxplot() + ylab(c) + ggtitle(title_c)) } dev.off() # Histograms for Pro-Social Construct data_OS <- filter(data, Tag=='OpenScience') data_RR <- filter(data, Tag=='Reproducibility') pdf('figures/ProSocialHist.pdf', width=6, height=5) angle <- c(45,-45) hist(data_RR$ProsocialMotives, border='gray', main='Distribution of Pro-Social Motives Construct Score', xlab='Pro-Social Motives Construct Score', breaks=seq(0,0.2,0.01)) hist(data_OS$ProsocialMotives, border='black', add=TRUE, breaks=seq(0,0.2,0.01)) legend('topright',legend=c('Open Science','Reproducibility'),fill='white',border=c('black','gray')) # hist(dat_dict_RR[,c], angle=angle[2], density=30, col='gray', main='Distribution of Pro-Social Motives Construct Score', xlab='Pro-Social Motives Construct Score', breaks=seq(0,0.2,0.01)) # hist(dat_dict_OS[,c], angle=angle[1], density=30, col='turquoise', add=TRUE, breaks=seq(0,0.2,0.01)) # legend('topright',legend=c('Open Science','Reproducibility'),fill=c('turquoise','gray'),angle=angle,density=30) dev.off() # Pro-social word density mean(data_OS$ProsocialMotives)*100 #2.380103 median(data_OS$ProsocialMotives)*100 #1.818182 mean(data_RR$ProsocialMotives)*100 #0.9095105 median(data_RR$ProsocialMotives)*100 #0 # Test for Differences in Pro-Social Construct Score mean_diff <- mean(data_OS$ProsocialMotives) - mean(data_RR$ProsocialMotives) #0.01470592 med_diff <- median(data_OS$ProsocialMotives) - median(data_RR$ProsocialMotives) #0.01818182 #perform permutation test M <- 100000 labels_true <- data$Tag n <- length(data$Tag) mean_diff_null <- rep(NA, M) #construct the null distribution for the difference in means med_diff_null <- rep(NA, M) #construct the null distribution for the difference in medians for(m in 1:M){ #print(m) labels_m <- sample(labels_true, n, replace=FALSE) data_OS_m <- data[labels_m=='OpenScience',] data_RR_m <- data[labels_m=='Reproducibility',] mean_diff_null[m] <- mean(data_OS_m$ProsocialMotives) - mean(data_RR_m$ProsocialMotives) med_diff_null[m] <- median(data_OS_m$ProsocialMotives) - median(data_RR_m$ProsocialMotives) } #p-value (percent of times the permutation-based mean/median difference exceeds the observed one) mean(abs(mean_diff_null) > abs(mean_diff)) #0 (< 1/100000 = 1e-05) mean(abs(med_diff_null) > abs(med_diff)) #0 (< 1/100000 = 1e-05) # Test for Differences in Use of Any Pro-Social Words #what percentage of papers use ANY pro-social words? tab <- table(data$ProsocialMotives > 0, data$Tag) tab # OpenScience Reproducibility # FALSE 141 654 # TRUE 454 515 tab/matrix(colSums(tab), nrow=2, ncol=2, byrow=TRUE) # OpenScience Reproducibility # FALSE 0.2369748 0.5594525 # TRUE 0.7630252 0.4405475 # % of papers with any pro-social words in the abstract n_OS <- (tab[2,1]+tab[1,1]) n_RR <- (tab[2,2]+tab[1,2]) n1_OS <- tab[2,1] n1_RR <- tab[2,2] pct_OS <- n1_OS/n_OS #76% pct_RR <- n1_RR/n_RR #44% #H0: p_OS = p_RR pct = (n1_OS+n1_RR)/(n_OS+n_RR) t_compare <- (pct_OS - pct_RR)/sqrt(pct*(1-pct)*(1/n_OS + 1/n_RR)) 2*pnorm(t_compare, lower.tail = FALSE) #2.737252e-38Import Librariesimport spacy m = spacy.load('en_core_web_sm')Examplesexample1 = "Allen is here in Bangalore, India. One of the major uses cases of named entity recognition involves automating the recommendation process. Recommendation systems dominate how we discover new content and ideas in today’s world. The example of Netflix shows that developing an effective recommendation system can work wonders for the fortunes of a media company by making their platforms more engaging and event addictive. For news publishers, using Named Entity Recognition to recommend similar articles is a proven approach. The below example from BBC news shows how recommendations for similar articles are implemented in real life. This can be done by extracting entities from a particular article and recommending the other articles which have the most similar entities mentioned in them. This is an approach that we have effectively used to develop content recommendations for a media industry client." example2 = "To install additional data tables for lemmatization in spaCy v2.2+ you can run pip install spacy[lookups] or install spacy-lookups-data separately. The lookups package is needed to create blank models with lemmatization data, and to lemmatize in languages that don’t yet come with pretrained models and aren’t powered by third-party libraries." example3 = "Now, if you pass it through the Named Entity Recognition API, it pulls out the entities Bandra (location) and Fitbit (Product). This can be then used to categorize the complaint and assign it to the relevant department within the organization that should be handling this."Labelsdoc = m(example1) for ent in doc.ents: print(ent.text, "-", ent.label_) doc = m(example2) for ent in doc.ents: print(ent.text, "-", ent.label_) doc = m(example3) for ent in doc.ents: print(ent.text, "-", ent.label_)the Named Entity Recognition - ORG Bandra - PERSONWord of Art & FACwoa = m("Smooth Criminal") for ent in woa.ents: print(ent.text, "-", ent.label_) fac = m("Tibetan Plateau and Asia") for ent in fac.ents: print(ent.text, "-", ent.label_)Tibetan - NORPQueryinglocs = [('Omnicom', 'IN', 'New York'),('DDB Needham', 'IN', 'New York'),('Kaplan Thaler Group', 'IN', 'New York'),('BBDO South', 'IN', 'Atlanta'), ('Georgia-Pacific', 'IN', 'Atlanta')] query = [e1 for (e1, rel, e2) in locs if e2=='Atlanta']Domain Specific Jargonc = "The ubiquitin-proteasome system is the major pathway for the maintenance of protein homeostasis. Its inhibition causes accumulation of ubiquitinated proteins; this accumulation has been associated with several of the most common neurodegenerative diseases. Several genetic factors have been identified for most neurodegenerative diseases, however, most cases are considered idiopathic, thus making the study of the mechanisms of protein accumulation a relevant field of research. It is often mentioned that the biggest risk factor for neurodegenerative diseases is aging, and several groups have reported an age-related alteration of the expression of some of the 26S proteasome subunits and a reduction of its activity. Proteasome subunits interact with proteins that are known to accumulate in neurodegenerative diseases such as α-synuclein in Parkinson's, tau in Alzheimer's, and huntingtin in Huntington's diseases" doc = m(c) for ent in doc.ents: print(ent.text, "-", ent.label_)26S - CARDINAL Huntington - GPE"EDA and Prediction of Mushroom Edibility using Select ML Algorithms"- toc: true- badges: true- comments: true Introduction Today, we'll work on a classification problem. The dataset we have chosen is the [mushroom-classification](https://www.kaggle.com/datasets/uciml/mushroom-classification) dataset available on Kaggle. This dataset was provided by UCI Machine Learning repository nearly 3 decades ago. The dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family Mushroom drawn from The Audubon Society Field Guide to North American Mushrooms (1981). Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended.Our task making successful predictions begins first by the setup of the system for training. Setup# Import the required libraries and get the file path import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # data visualization import seaborn as sns # data visualization from sklearn.model_selection import train_test_split, cross_validate, GridSearchCV # validation from sklearn.preprocessing import OneHotEncoder, LabelEncoder # data preparation from sklearn.metrics import ConfusionMatrixDisplay, RocCurveDisplay, precision_score, recall_score, accuracy_score, f1_score # metrics from sklearn.pipeline import make_pipeline # build pipeline # ML models from sklearn.dummy import DummyClassifier from sklearn.linear_model import LogisticRegression, RidgeClassifier from sklearn.ensemble import RandomForestClassifier, HistGradientBoostingClassifier from xgboost import XGBClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC # ignore warnings import warnings warnings.filterwarnings("ignore") # get file path import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) file_dir = "/kaggle/input/mushroom-classification/mushrooms.csv" # inspect file size !ls -lh {file_dir} # inspect first 5 rows of dataset !head {file_dir}class,cap-shape,cap-surface,cap-color,bruises,odor,gill-attachment,gill-spacing,gill-size,gill-color,stalk-shape,stalk-root,stalk-surface-above-ring,stalk-surface-below-ring,stalk-color-above-ring,stalk-color-below-ring,veil-type,veil-color,ring-number,ring-type,spore-print-color,population,habitat p,x,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,u e,x,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g e,b,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m p,x,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,u e,x,s,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g e,x,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g e,b,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m e,b,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m p,x,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,gObservations* The dataset file size is 366 KB.* It will be safe to import the whole dataset.* The prediction class is the first column.* There appears to be no index column# read file df = pd.read_csv(file_dir) # view all columns pd.set_option("display.max_columns", None) df.head()EDA and Data Preparation# split datasets for training and testing X = df.copy() y = X.pop("class") X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)Preliminary AnalysisX_train.shape X_train.info() X_train.describe()Observations- The dataset has 22 features.- There are 6499 enries. - All features are categorical in nature.- Most of the features have unique values less than 10. Target Class# target classes and their distribution class_vc = df["class"].value_counts() class_vc sns.barplot(x = class_vc.index, y = class_vc)Observations- The class counts are not much imbalanced. Distribution of Featuresfor i, cols in enumerate(df): feature_vc = df[cols].value_counts() print(feature_vc, "\n_________\n") plt.figure(i) sns.barplot(x = feature_vc.index, y = feature_vc)e 4208 p 3916 Name: class, dtype: int64 _________ x 3656 f 3152 k 828 b 452 s 32 c 4 Name: cap-shape, dtype: int64 _________ y 3244 s 2556 f 2320 g 4 Name: cap-surface, dtype: int64 _________ n 2284 g 1840 e 1500 y 1072 w 1040 b 168 p 144 c 44 u 16 r 16 Name: cap-color, dtype: int64 _________ f 4748 t 3376 Name: bruises, dtype: int64 _________ n 3528 f 2160 y 576 s 576 a 400 l 400 p 256 c 192 m 36 Name: odor, dtype: int64 _________ f 7914 a 210 Name: gill-attachment, dtype: int64 _________ c 6812 w 1312 Name: gill-spacing, dtype: int64 _________ b 5612 n 2512 Name: gill-size, dtype: int64 _________ b 1728 p 1492 w 1202 n 1048 g 752 h 732 u 492 k 408 e 96 y 86 o 64 r 24 Name: gill-color, dtype: int64 _________ t 4608 e 3516 Name: stalk-shape, dtype: int64 _________ b [...]Observations- All the feautures are categorical in nature.- All classes are anonymized.- We'll need to OneHotEncode the data.- Because the number of no feature classes is too large, we can OneHotEncode each feature class. - Feature `vei-type` has only one class. Thus it'll need to be dropped. Feature Distribution against Target Classfor i, feature in enumerate(X_train): plt.figure(i) sns.countplot(x = feature, hue = y_train, data = X_train)Observations- Allmost all the features differ and distinguish between the two target classes.- Their distributions are different for the target classes.It means that models may also offer incredible scores in classifying the two target classes. Null ValuesNow, we'll look at the number of null values in the data.df.isnull().sum()Observations- There are no null values in the dataset. - It will need no imputation or transformation Data PreparationThere isn't much to prepare in the data. We just need to OneHotEncode all the categorical features. And we'll also encode the target class.# OneHotEncoding Categorical features ohe = OneHotEncoder(drop = "first", handle_unknown = "ignore", sparse = False) ohe_train_data = ohe.fit_transform(X_train) ohe_test_data = ohe.transform(X_test) ohe_train_data # Encode the target in train dataset le = LabelEncoder() y_train_le = le.fit_transform(y_train) # Encode target in test dataset y_test_le = le.transform(y_test)So, we can now move on to training ML models. Train Hardcoded Model and Evaluate ResultsFirst we'll train a hardcoded model, which will just predict the most frequent target class which is 'edible'. This will help us in giving us a baseline score which our future models should at least beat. It helps to find errors in training.mf_model = DummyClassifier(random_state = 42, strategy = "most_frequent") mf_cross_val = cross_validate(mf_model, ohe_train_data, y_train_le, scoring = ["accuracy", "precision", "recall", "f1", "roc_auc"]) mf_cross_valObservations- The test roc_auc score is 0.5 in all validations, which means the model is not able to distinguish between the two target classes in any way. - This model also gives a test accuracy of 0.52.We'll need to at least improve on these scores. Model SelectionIn this section, we'll train multiple ML models and compare their performance after cross validation. Then we'll choose the best performing one for final training.models = {"LogisticRegression": LogisticRegression(random_state = 42), "RidgeClassification": RidgeClassifier(random_state = 42), "GaussianNB": GaussianNB(), "RandomForestClassifier": RandomForestClassifier(n_estimators = 70, random_state = 42), "XGBClassifier": XGBClassifier(n_estimators = 70, objective = "binary:logistic", learning_rate = 0.05, n_jobs = -1, scoring = "auc", random_state = 42)} model_scores = {} # cross validate all models for model_name, model in models.items(): cross_val = cross_validate(model, ohe_train_data, y_train_le, n_jobs = -1, scoring = ["accuracy", "precision", "recall", "f1", "roc_auc"]) del cross_val["fit_time"] del cross_val["score_time"] model_scores[model_name] = cross_val # put results into a dataframe model_scores_df = pd.DataFrame.from_dict(model_scores) model_scores_df = model_scores_df.applymap(np.mean) model_scores_dfObservations- Surprisingly, all of the models performed incredibly well and fit almost perfectly to the training dataset. - Out of these, RandomForestClassifier performed the best with a perfect score of 1.0 in all scores.Thus, we'll train the RandomForestClassifier to make final predictions. Train Final Model and make predictions# train model forest_clas = RandomForestClassifier(random_state = 42) forest_clas.fit(ohe_train_data, y_train_le) # make predictions preds = forest_clas.predict(ohe_test_data) # decode predictions into their original labels preds_in = le.inverse_transform(preds) preds_in # plot results ConfusionMatrixDisplay.from_predictions(y_test, preds_in) accuracy_score(y_test, preds_in) recall_score(y_test_le, preds) f1_score(y_test_le, preds) RocCurveDisplay.from_estimator(forest_clas, ohe_test_data, y_test_le)1 Load Datadata = {"AGVs": [], "randseed": [], "delay": [], "horizon": [], "total_time": [], "improvement": [], "comp_time_vec": [], "comp_time_avg": [], "comp_time_max": []} yaml_list = glob.glob("ICAPS/*.yaml") horizon_0_data = {"AGVs": [], "randseed": [], "delay": [], "total_time": []} for file in yaml_list: split_filename = file.split("_") horizon = str(split_filename[-1].split(".")[0]) delay = str(split_filename[-3]) seed = str(split_filename[-5]) AGVs = str(split_filename[-7]) with open(file, "r") as stream: try: yaml_data = yaml.safe_load(stream) cumulative_time = yaml_data["results"]["total time"] comp_time_vec = yaml_data["results"]["comp time"]["solve_time"] comp_time_avg = yaml_data["results"]["comp time"]["avg"] comp_time_max = yaml_data["results"]["comp time"]["max"] data["AGVs"].append(int(AGVs)) data["randseed"].append(int(seed)) data["delay"].append(int(delay)) data["horizon"].append(int(horizon)) data["total_time"].append(int(cumulative_time)) data["improvement"].append(int(cumulative_time)) data["comp_time_vec"].append(comp_time_vec) data["comp_time_avg"].append(comp_time_avg) data["comp_time_max"].append(comp_time_max) except yaml.YAMLError as exc: print(exc) columns = ["AGVs", "randseed", "delay", "horizon", "total_time", "improvement", "comp_time_vec", "comp_time_avg", "comp_time_max"] df = pd.DataFrame(data, columns=columns) print(df)AGVs randseed delay horizon total_time improvement \ 0 60 13 3 2 3783 3783 1 60 0 3 5 4254 4254 2 40 11 20 1 2671 2671 3 60 22 10 2 5101 5101 4 50 0 1 4 3020 3020 ... ... ... ... ... ... ... 2743 40 11 10 0 2543 2543 2744 60 28 5 5 3866 3866 2745 30 8 20 4 2542 2542 2746 60 21 10 2 4967 4967 2747 50 24 5 1 3096 3096 comp_time_vec comp_time_avg \ 0 [[0.18550990500000353, 0.1744489530000024, 0.2... 0.077854 1 [[1.1187443609999974, 1.0838291840000025, 2.00... 0.210590 [...]2 Plot horizon vs avg computation timempl.rcParams['pdf.fonttype'] = 42 mpl.rcParams['ps.fonttype'] = 42 sns.set(style="ticks") sns.set_palette("bright") sns_col = sns.color_palette("bright", n_colors=5) plt.figure(1) ax = plt.gca() ax.set(yscale="log") sns.lineplot(x="horizon", y="comp_time_avg", hue="AGVs", ci=100, data=df, palette=sns_col) plt.xlabel("Horizon H") plt.ylabel("Average computation time [s]") plt.grid() ax = plt.gca() ax.figure.set_size_inches(6,4) plt.subplots_adjust(left=0.12, bottom=0.13, right=0.98, top=0.98, wspace=None, hspace=None) save_loc = "/home/alberndt/Documents/research/bosch/figures/" plt.savefig(save_loc + "avg_comp_time.pdf", format="pdf", pad_inches=0.01, transparent=True)/home/alberndt/anaconda3/envs/sadg/lib/python3.6/site-packages/seaborn/_core.py:1187: UserWarning: Data has no positive values, and therefore cannot be log-scaled. x_visible = any(t.get_visible() for t in ax.get_xticklabels()) /home/alberndt/anaconda3/envs/sadg/lib/python3.6/site-packages/ipykernel_launcher.py:25: UserWarning: Data has no positive values, and therefore cannot be log-scaled.3 Plot max computation timesns.set(style="ticks") sns.set_palette("bright") sns_col = sns.color_palette("bright", n_colors=5) mpl.rcParams['pdf.fonttype'] = 42 mpl.rcParams['ps.fonttype'] = 42 plt.figure(1) ax = plt.gca() ax.set(yscale="log") sns.lineplot(x="horizon", y="comp_time_max", hue="AGVs", ci=100, data=df, palette=sns_col) plt.xlabel("Horizon H") plt.ylabel("Peak computation time $[s]$") plt.grid() ax = plt.gca() ax.figure.set_size_inches(6,4) plt.subplots_adjust(left=0.12, bottom=0.13, right=0.98, top=0.98, wspace=None, hspace=None) plt.savefig(save_loc + "max_comp_time.pdf", format="pdf", pad_inches=0.01, transparent=True)CNNSaket TiwariDate: 26 Jul 2019#flat karne se some information humlog loose kar rhe h #imagesize- filter size + 1= no of iterations #filter apply karne ko convolve karna bolte h #convolve karne se we are preserving(capturing) special features of matrix. #neural network lagane se pehle humlog filter laga lenge #convolution ke wagah neural network me kai saare features jaaynge like ek neuron horizontal line store karega, ev curves,ek vertical line,ek color #depending on the weight of filters , we will extract different features #humlog neural network lagane se pehle filters ko laga ke, features ko update #to humlog feature images k basis par prediction karenge # dimension of the matrix after filter is applied: (row -row_filter +1)x(column -column_filter +1) import numpy as np import matplotlib.pyplot as plt %matplotlib inline ! pip install opencv-python import cv2 im=cv2.imread('Assignments/Self/Images/landscape.jpg') img = cv2.cvtColor( im, cv2.COLOR_BGR2GRAY) print(img.shape) plt.imshow(img, cmap='gray') #plt.show #Filter # K =np.array([ # [0,-4,0], # [-1,4,-1], # [0,-1,0]]) # print(K.shape) #Horixontal-linedetection filter () Sobel filter # K =np.array([ # [-1,-2,-1], # [0,0,0], # [1,2,1]]) #Vertical # K =np.array([ # [-1,0,1], # [-2,0,2], # [-1,0,1]]) #Sharpen # K =np.array([ # [0,-1,0], # [-1,5,-1], # [0,-1,0]]) #Blur K =np.array([ [.1,.1,.1], [.1,.1,.1], [.1,.1,.1]]) out_r= img.shape[0]- K.shape[0]+1 out_c= img.shape[1] - K.shape[0] +1 new_img= np.zeros((out_r,out_c)) print(new_img.shape) for r in range(out_r): for c in range(out_c): image_patch=img[ r: r+K.shape[0], c:c+K.shape[1]] prod= image_patch* K prod=prod.sum() new_img[r,c] = prod if prod > 0 else 0 plt.imshow(new_img)Customizable VAE demo Author: , inspired by code written by Date: 19/12/13The purpose of this notebook:- With the help of `vae-designer-demo.ipynb`, I hope to construct arbitrary VAE architectures based on various model-level and layer-level parameters.- Specially, I use this notebook to write up a function that takes in these parameters and output the desired VAE for training. Todos:- Remove `DataParallel` because I am agnostic towards how using multi-GPU training affects model convergence. (d)- Instead of building the VAE from one class, build two subclasses (Encoder and Decoder) and let VAE inherit from them - the benefit is that now we can use `super(self, VAE).__init__` to directly initialize the encoder and decoder network. (d) - within the init function of VAE, pytorch only collects parameters that are of certain pytorch types, which prevents me from setting attributes to instances of type Encoder and Decoder; instead, I will create two methods- Run nb2py on this notebook.- Use the resulting script to help train a VAE for MNIST in `vae_fast_train_demo.ipynb`%reload_ext autoreload %autoreload 2 %matplotlib inline #export import os import json import torch.nn as nn import torch.optim from collections import OrderedDictIdeal workflow for creating a trainable VAE *vae-designer -(hyperparameters and values)-> design-dict -> methods for users*- design a vae using **vae-designer**, take note of all the hyper-parameters and their values- capture these values of all those parameters in a **design dict**- pass the design dict to **methods for users** to get vae and optimizer Convolutional Sampler#export def conv_sampler( in_channels:int, layer_num:int, kernel_nums:tuple, kernel_sizes:tuple, strides:tuple, paddings:tuple, final_activation:str=None, up_sample:bool=False, output_paddings:tuple=None, output_type:str='OrderedDict' )->OrderedDict: """ Return a convolutional sampler (nn.Sequential) with batch-normalizations and leaky ReLUs (for down-samplers) or ReLUs (for up-samplers). The DCGAN paper recommends that kernel sizes should be greater than 3, that strides should be greater than 1, and batch-normalization should be used to guarantee a healthy gradient-flow. :param up_sample: whether the returned sampler is a up-sampler (default: False) """ assert (up_sample and output_paddings is not None) or (not up_sample and output_paddings is None), \ AssertionError('output_paddings cannot be None when up_sample is True.') HYPERPARAMS = { 'conv2d-bias':False, # set to false because bn introduces biases 'lrelu-negslope':0.2 } # this insight comes from the dcgan paper if up_sample: core_layer = nn.ConvTranspose2d core_layer_name = 'convtranpose2d' activation_name = 'relu' activation = nn.ReLU() else: core_layer = nn.Conv2d core_layer_name = 'conv2d' activation_name = 'lrelu' activation = nn.LeakyReLU(HYPERPARAMS['lrelu-negslope']) layers = OrderedDict([]) for i in range(layer_num): if not up_sample: layers[f'block{i}-{core_layer_name}'] = core_layer( in_channels=in_channels, out_channels=kernel_nums[i], kernel_size=kernel_sizes[i], stride=strides[i], padding=paddings[i], bias=HYPERPARAMS['conv2d-bias'] ) else: layers[f'block{i}-{core_layer_name}'] = core_layer( in_channels=in_channels, out_channels=kernel_nums[i], kernel_size=kernel_sizes[i], stride=strides[i], padding=paddings[i], bias=HYPERPARAMS['conv2d-bias'], output_padding=output_paddings[i] ) layers[f'block{i}-bn'] = nn.BatchNorm2d(kernel_nums[i]) if i == layer_num - 1: if final_activation is not None: if final_activation == 'sigmoid': layers[f'block{i}-{final_activation}'] = nn.Sigmoid() elif final_activation == 'relu': layers[f'block{i}-{final_activation}'] = nn.ReLU() else: layers[f'block{i}-{activation_name}'] = activation in_channels = kernel_nums[i] # if output_type == 'nn.Sequential': # return nn.Sequential(layers) # elif output_type == 'OrderedDict': return layers # useful for adding extra layersVAEDesign (inspired by Learner)#export class VAEDesign(): def __init__(self, down_sampler_design:dict, up_sampler_design:dict, h_dim:int, z_dim:int, unflatten_out_shape:tuple): self.down_sampler_design = down_sampler_design self.up_sampler_design = up_sampler_design self.h_dim = h_dim self.z_dim = z_dim self.unflatten_out_shape = unflatten_out_shape def save_as_json(self, json_fpath:str): fdir = os.makedirs(''.join(json_fpath.split('/')[:-1]), exist_ok=True) with open(json_fpath, 'w') as json_f: # https://stackoverflow.com/questions/9170288/pretty-print-json-data-to-a-file-using-python json.dump(self.design_dict, json_f, indent=4) @property def design_dict(self): return { 'down_sampler_design':self.down_sampler_design, 'up_sampler_design':self.up_sampler_design, 'h_dim':self.h_dim, 'z_dim':self.z_dim, 'unflatten_out_shape':self.unflatten_out_shape } @classmethod def from_json(cls, json_fpath:str): with open(json_fpath, 'r') as json_f: design_dict = json.load(json_f) return cls(**design_dict)Custom layers#export class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), -1) # view(batch_size, flattened_example) class UnFlatten(nn.Module): def __init__(self, out_shape:tuple): super().__init__() self.out_shape = out_shape def forward(self, input): return input.view(input.size(0), *self.out_shape)VAE class (design_dict, dev)#export class VAE(nn.Module): """Trainable variational auto-encoder implemented in PyTorch.""" def __init__(self, design:VAEDesign, dev:str): super(VAE, self).__init__() self.dev = dev # the down-sampler is an OrderedDict of layers down_sampler_od = conv_sampler(**design.down_sampler_design) down_sampler_od['flatten'] = Flatten() # append a new layer at the end self.encoder = nn.Sequential(down_sampler_od) h_dim, z_dim = design.h_dim, design.z_dim self.z_dim = z_dim # used for the self.generate method self.fc1 = nn.Linear(h_dim, z_dim) # get means self.fc2 = nn.Linear(h_dim, z_dim) # get logvars self.fc3 = nn.Linear(z_dim, h_dim) # process the samples for the up_sampler # the up-sampler is also an OrderedDict of layers up_sampler_od = conv_sampler(**design.up_sampler_design) up_sampler_od['unflatten'] = UnFlatten(out_shape=design.unflatten_out_shape) up_sampler_od.move_to_end('unflatten', last=False) # append a new layer at the front self.decoder = nn.Sequential(up_sampler_od) def reparametrize(self, mu:torch.Tensor, logvar:torch.Tensor)->torch.Tensor: """Helper method to self.bottleneck""" std = logvar.mul(0.5).exp_() # logvar to std esp = torch.randn(*mu.size()) # number of std z = mu + std * esp.to(self.dev).double() # sample latent vectors return z def bottleneck(self, h:torch.Tensor)->tuple: """Helper method to self.encode""" mu, logvar = self.fc1(h), self.fc2(h) z = self.reparametrize(mu, logvar) return z, mu, logvar def encode(self, x:torch.Tensor)->tuple: """Helper method to self.forward""" h = self.encoder(x) z, mu, logvar = self.bottleneck(h) return z, mu, logvar def decode(self, z:torch.Tensor)->torch.Tensor: """Helper method to self.forward""" z = self.fc3(z) z = self.decoder(z) return z def forward(self, x:torch.Tensor)->tuple: z, mu, logvar = self.encode(x) z = self.decode(z) return z, mu, logvar def generate(self, n:int)->torch.Tensor: zs = torch.randn((n, self.z_dim)).double().to(self.dev) with torch.no_grad(): gens = self.decoder(self.fc3(zs)) return gensMethods for users#export def get_vae_and_opt(design_json_fpath:str, dev:str): """Get a trainable VAE and its optimizer.""" vae = VAE(design=VAEDesign.from_json(design_json_fpath), dev=dev) # this dev is used in the VAE.reparameterize and VAE.generate method vae = vae.to(dev).double() # this dev decides where model parameters are loaded opt = torch.optim.Adam(vae.parameters(), lr=1e-3) return vae, opt def load_vae(path:str, design_json_fpath:str, dev:str='cpu'): """ Load trained weights into a VAE architecture. :param path: the path to the trained weights :param design_dict: the Design object of a VAE architecture :param dev: where the resulting model would exist (options: 'cpu', 'cuda') (default: 'cpu') """ vae = VAE(design=VAEDesign.from_json(design_json_fpath), dev=dev) vae = vae.to(dev).double() vae.load_state_dict(torch.load(path, map_location=dev)) return vaeInner join What column to merge on? First exerciseChicago provides a list of taxicab owners and vehicles licensed to operate within the city, for public safety. Your goal is to merge two tables together. One table is called taxi_owners, with info about the taxi cab company owners, and one is called taxi_veh, with info about each taxi cab vehicle. Both the taxi_owners and taxi_veh tables have been loaded for you and you can explore them in the console.Choose the column you would use to merge the two tables on using the .merge() method.Answer - on='vid' Your first inner joinYou have been tasked with figuring out what the most popular types of fuel used in Chicago taxis are. To complete the analysis, you need to merge the taxi_owners and taxi_veh tables together on the vid column. You can then use the merged table along with the .value_counts() method to find the most common fuel_type.Since you'll be working with pandas throughout the course, the package will be preloaded for you as pd in each exercise in this course. Also the taxi_owners and taxi_veh DataFrames are loaded for you. 1Merge taxi_owners with taxi_veh on the column vid, and save the result to taxi_own_veh.# Merge the taxi_owners and taxi_veh tables taxi_own_veh = taxi_owners.merge(taxi_veh, on = "vid") # Print the column names of the taxi_own_veh print(taxi_own_veh.columns)2Set the left and right table suffixes for overlapping columns of the merge to _own and _veh, respectively.# Merge the taxi_owners and taxi_veh tables setting a suffix taxi_own_veh = taxi_owners.merge(taxi_veh, on='vid', suffixes=("_own", "_veh")) # Print the column names of taxi_own_veh print(taxi_own_veh.columns)3Select the fuel_type column from taxi_own_veh and print the value_counts() to find the most popular fuel_types used.# Merge the taxi_owners and taxi_veh tables setting a suffix taxi_own_veh = taxi_owners.merge(taxi_veh, on='vid', suffixes=('_own','_veh')) # Print the value_counts to find the most popular fuel_type print(taxi_own_veh['fuel_type'].value_counts())Inner joins and number of rows returnedAll of the merges you have studied to this point are called inner joins. It is necessary to understand that inner joins only return the rows with matching values in both tables. You will explore this further by reviewing the merge between the wards and census tables, then comparing it to merges of copies of these tables that are slightly altered, named wards_altered, and census_altered. The first row of the wards column has been changed in the altered tables. You will examine how this affects the merge between them. The tables have been loaded for you.For this exercise, it is important to know that the wards and census tables start with 50 rows. 1Merge wards and census on the ward column and save the result to wards_census.# Merge the wards and census tables on the ward column wards_census = wards.merge(census, on = "ward") # Print the shape of wards_census print('wards_census table shape:', wards_census.shape)2Merge the wards_altered and census tables on the ward column, and notice the difference in returned rows.# In the ward column change '1' to '61' wards_altered = wards wards_altered.loc[wards['ward'] == '1', "ward"] = '61' # Merge the wards and census tables on the ward column wards_altered_census = wards_altered.merge(census, on='ward') # Print the shape of wards_census print(wards_altered[['ward']].head())3Merge the wards and census_altered tables on the ward column, and notice the difference in returned rows.# Change '1' to None in `ward` col census_altered = census census_altered.loc[census['ward'] == '1', "ward"] = None # Merge the wards and census tables on the ward column wards_census_altered = wards.merge(census_altered, on='ward') # Print the shape of wards_census print(census_altered[['ward']].head())One-to-many mergeA business may have one or multiple owners. In this exercise, you will continue to gain experience with one-to-many merges by merging a table of business owners, called biz_owners, to the licenses table. Recall from the video lesson, with a one-to-many relationship, a row in the left table may be repeated if it is related to multiple rows in the right table. In this lesson, you will explore this further by finding out what is the most common business owner title. (i.e., secretary, CEO, or vice president)The licenses and biz_owners DataFrames are loaded for you. 1Starting with the licenses table on the left, merge it to the biz_owners table on the column account, and save the results to a variable named licenses_owners.# Merge the licenses and biz_owners table on account licenses_owners = licenses.merge(biz_owners, on = "account") # Group the results by title then count the number of accounts counted_df = licenses_owners.groupby("title").agg({'account':'count'}) # Sort the counted_df in desending order sorted_df = counted_df.sort_values(by = ["account"], ascending = False) # Use .head() method to print the first few rows of sorted_df print(sorted_df.head())Total riders in a monthYour goal is to find the total number of rides provided to passengers passing through the Wilson station (station_name == 'Wilson') when riding Chicago's public transportation system on weekdays (day_type == 'Weekday') in July (month == 7). Luckily, Chicago provides this detailed data, but it is in three different tables. You will work on merging these tables together to answer the question. This data is different from the business related data you have seen so far, but all the information you need to answer the question is below.The cal, ridership, and stations DataFrames have been loaded for you. The relationship between the tables can be seen in the diagram below. 1Merge the ridership and cal tables together, starting with the ridership table on the left and save the result to the variable ridership_cal. If you code takes too long to run, your merge conditions might be incorrect.# Merge the ridership and cal tables ridership_cal = ridership.merge(cal, on = ["year", "month", "day"])2Extend the previous merge to three tables by also merging the stations table.# Merge the ridership, cal, and stations tables ridership_cal_stations = ridership.merge(cal, on=['year','month','day']) \ .merge(stations, on = "station_id")3Create a variable called filter_criteria to select the appropriate rows from the merged table so that you can sum the rides column.# Merge the ridership, cal, and stations tables ridership_cal_stations = ridership.merge(cal, on=['year','month','day']) \ .merge(stations, on='station_id') # Create a filter to filter ridership_cal_stations filter_criteria = ((ridership_cal_stations['month'] == 7) & (ridership_cal_stations['day_type'] == "Weekday") & (ridership_cal_stations['station_name'] == "Wilson")) # Use .loc and the filter to select for rides print(ridership_cal_stations.loc[filter_criteria, 'rides'].sum())Three table mergeTo solidify the concept of a three DataFrame merge, practice another exercise. A reasonable extension of our review of Chicago business data would include looking at demographics information about the neighborhoods where the businesses are. A table with the median income by zip code has been provided to you. You will merge the licenses and wards tables with this new income-by-zip-code table called zip_demo.The licenses, wards, and zip_demo DataFrames have been loaded for you.**Instructions**Starting with the licenses table, merge to it the zip_demo table on the zip column. Then merge the resulting table to the wards table on the ward column. Save result of the three merged tables to a variable named licenses_zip_ward.Group the results of the three merged tables by the column alderman and find the median income.# Merge licenses and zip_demo, on zip; and merge the wards on ward licenses_zip_ward = licenses.merge(zip_demo, on = "zip") \ .merge(wards, on = "ward") # Print the results by alderman and show median income print(licenses_zip_ward.groupby("alderman").agg({'income':'median'}))One-to-many merge with multiple tablesIn this exercise, assume that you are looking to start a business in the city of Chicago. Your perfect idea is to start a company that uses goats to mow the lawn for other businesses. However, you have to choose a location in the city to put your goat farm. You need a location with a great deal of space and relatively few businesses and people around to avoid complaints about the smell. You will need to merge three tables to help you choose your location. The land_use table has info on the percentage of vacant land by city ward. The census table has population by ward, and the licenses table lists businesses by ward.The land_use, census, and licenses tables have been loaded for you. 1Merge land_use and census on the ward column. Merge the result of this with licenses on the ward column, using the suffix _cen for the left table and _lic for the right table. Save this to the variable land_cen_lic.# Merge land_use and census and merge result with licenses including suffixes land_cen_lic = land_use.merge(census, on = "ward") \ .merge(licenses, on = "ward", suffixes = ("_cen", "_lic"))2Group land_cen_lic by ward, pop_2010 (the population in 2010), and vacant, then count the number of accounts. Save the results to pop_vac_lic.# Merge land_use and census and merge result with licenses including suffixes land_cen_lic = land_use.merge(census, on='ward') \ .merge(licenses, on='ward', suffixes=('_cen','_lic')) # Group by ward, pop_2010, and vacant, then count the # of accounts pop_vac_lic = land_cen_lic.groupby(["ward", "pop_2010", "vacant"], as_index=False).agg({'account':'count'})3Sort pop_vac_lic by vacant, account, andpop_2010 in descending, ascending, and ascending order respectively. Save it as sorted_pop_vac_lic.# Merge land_use and census and merge result with licenses including suffixes land_cen_lic = land_use.merge(census, on='ward') \ .merge(licenses, on='ward', suffixes=('_cen','_lic')) # Group by ward, pop_2010, and vacant, then count the # of accounts pop_vac_lic = land_cen_lic.groupby(['ward','pop_2010','vacant'], as_index=False).agg({'account':'count'}) # Sort pop_vac_lic and print the results sorted_pop_vac_lic = pop_vac_lic.sort_values(by = ["vacant", "account", "pop_2010"], ascending=[False, True, True]) # Print the top few rows of sorted_pop_vac_lic print(sorted_pop_vac_lic.head())Merging tables with different join types Counting missing rows with left joinThe Movie Database is supported by volunteers going out into the world, collecting data, and entering it into the database. This includes financial data, such as movie budget and revenue. If you wanted to know which movies are still missing data, you could use a left join to identify them. Practice using a left join by merging the movies table and the financials table.The movies and financials tables have been loaded for you. 2Merge the movies table, as the left table, with the financials table using a left join, and save the result to movies_financials.# Merge movies and financials with a left join movies_financials = movies.merge(financials, on = 'id', how = 'left')3Count the number of rows in movies_financials with a null value in the budget column.# Merge the movies table with the financials table with a left join movies_financials = movies.merge(financials, on='id', how='left') # Count the number of rows in the budget column that are missing number_of_missing_fin = movies_financials['budget'].isnull().sum() # Print the number of movies missing financials print(number_of_missing_fin)Enriching a datasetSetting how='left' with the .merge()method is a useful technique for enriching or enhancing a dataset with additional information from a different table. In this exercise, you will start off with a sample of movie data from the movie series Toy Story. Your goal is to enrich this data by adding the marketing tag line for each movie. You will compare the results of a left join versus an inner join.The toy_story DataFrame contains the Toy Story movies. The toy_story and taglines DataFrames have been loaded for you. 1Merge toy_story and taglines on the id column with a left join, and save the result as toystory_tag.# Merge the toy_story and taglines tables with a left join toystory_tag = toy_story.merge(taglines, on = 'id', how = 'left') # Print the rows and shape of toystory_tag print(toystory_tag) print(toystory_tag.shape)2With toy_story as the left table, merge to it taglines on the id column with an inner join, and save as toystory_tag.# Merge the toy_story and taglines tables with a inner join toystory_tag = toy_story.merge(taglines, on = "id") # Print the rows and shape of toystory_tag print(toystory_tag) print(toystory_tag.shape)How many rows with a left join?Select the true statement about left joins.Try running the following code statements in the console.left_table.merge(one_to_one, on='id', how='left').shapeleft_table.merge(one_to_many, on='id', how='left').shapeNote that the left_table starts out with 4 rows.**Answer**The output of a one-to-many merge with a left join will have greater than or equal rows than the left table. Right join to find unique moviesMost of the recent big-budget science fiction movies can also be classified as action movies. You are given a table of science fiction movies called scifi_movies and another table of action movies called action_movies. Your goal is to find which movies are considered only science fiction movies. Once you have this table, you can merge the movies table in to see the movie names. Since this exercise is related to science fiction movies, use a right join as your superhero power to solve this problem.The movies, scifi_movies, and action_movies tables have been loaded for you. 1Merge action_movies and scifi_movies tables with a right join on movie_id. Save the result as action_scifi.# Merge action_movies to scifi_movies with right join action_scifi = action_movies.merge(scifi_movies, on = 'movie_id', how = "right")2Update the merge to add suffixes, where '_act' and '_sci' are suffixes for the left and right tables, respectively.# Merge action_movies to scifi_movies with right join action_scifi = action_movies.merge(scifi_movies, on='movie_id', how='right', suffixes = ('_act', '_sci')) # Print the first few rows of action_scifi to see the structure print(action_scifi.head())3From action_scifi, subset only the rows where the genre_act column is null.# Merge action_movies to the scifi_movies with right join action_scifi = action_movies.merge(scifi_movies, on='movie_id', how='right', suffixes=('_act','_sci')) # From action_scifi, select only the rows where the genre_act column is null scifi_only = action_scifi[action_scifi["genre_act"].isnull()]4Merge movies and scifi_only using the id column in the left table and the movie_id column in the right table with an inner join.# Merge action_movies to the scifi_movies with right join action_scifi = action_movies.merge(scifi_movies, on='movie_id', how='right', suffixes=('_act','_sci')) # From action_scifi, select only the rows where the genre_act column is null scifi_only = action_scifi[action_scifi['genre_act'].isnull()] # Merge the movies and scifi_only tables with an inner join movies_and_scifi_only = movies.merge(scifi_only, left_on = "id", right_on = "movie_id") # Print the first few rows and shape of movies_and_scifi_only print(movies_and_scifi_only.head()) print(movies_and_scifi_only.shape)Popular genres with right joinWhat are the genres of the most popular movies? To answer this question, you need to merge data from the movies and movie_to_genres tables. In a table called pop_movies, the top 10 most popular movies in the movies table have been selected. To ensure that you are analyzing all of the popular movies, merge it with the movie_to_genres table using a right join. To complete your analysis, count the number of different genres. Also, the two tables can be merged by the movie ID. However, in pop_movies that column is called id, and in movies_to_genres it's called movie_id.The pop_movies and movie_to_genres tables have been loaded for you.**Instructions**Merge movie_to_genres and pop_movies using a right join. Save the results as genres_movies.Group genres_movies by genre and count the number of id values.# Use right join to merge the movie_to_genres and pop_movies tables genres_movies = movie_to_genres.merge(pop_movies, how='right', left_on = "movie_id", right_on = "id") # Count the number of genres genre_count = genres_movies.groupby('genre').agg({'id':'count'}) # Plot a bar chart of the genre_count genre_count.plot(kind='bar') plt.show()Using outer join to select actorsOne cool aspect of using an outer join is that, because it returns all rows from both merged tables and null where they do not match, you can use it to find rows that do not have a match in the other table. To try for yourself, you have been given two tables with a list of actors from two popular movies: Iron Man 1 and Iron Man 2. Most of the actors played in both movies. Use an outer join to find actors who did not act in both movies.The Iron Man 1 table is called iron_1_actors, and Iron Man 2 table is called iron_2_actors. Both tables have been loaded for you and a few rows printed so you can see the structure.**Instructions**Save to iron_1_and_2 the merge of iron_1_actors (left) with iron_2_actors tables with an outer join on the id column, and set suffixes to ('_1','_2').Create an index that returns True if name_1 or name_2 are null, and False otherwise.# Merge iron_1_actors to iron_2_actors on id with outer join using suffixes iron_1_and_2 = iron_1_actors.merge(iron_2_actors, on = "id", how = "outer", suffixes=("_1", "_2")) # Create an index that returns true if name_1 or name_2 are null m = ((iron_1_and_2['name_1'].isnull()) | (iron_1_and_2['name_2'].isnull())) # Print the first few rows of iron_1_and_2 print(iron_1_and_2[m].head())Self joinMerging a table to itself can be useful when you want to compare values in a column to other values in the same column. In this exercise, you will practice this by creating a table that for each movie will list the movie director and a member of the crew on one row. You have been given a table called crews, which has columns id, job, and name. First, merge the table to itself using the movie ID. This merge will give you a larger table where for each movie, every job is matched against each other. Then select only those rows with a director in the left table, and avoid having a row where the director's job is listed in both the left and right tables. This filtering will remove job combinations that aren't with the director.The crews table has been loaded for you. 1To a variable called crews_self_merged, merge the crews table to itself on the id column using an inner join, setting the suffixes to '_dir' and '_crew' for the left and right tables respectively.# Merge the crews table to itself crews_self_merged = crews.merge(crews, on = "id", suffixes = ('_dir', '_crew'))2Create a Boolean index, named boolean_filter, that selects rows from the left table with the job of 'Director' and avoids rows with the job of 'Director' in the right table.# Merge the crews table to itself crews_self_merged = crews.merge(crews, on='id', how='inner', suffixes=('_dir','_crew')) # Create a Boolean index to select the appropriate boolean_filter = ((crews_self_merged['job_dir'] == "Director") & (crews_self_merged['job_crew'] != "Director")) direct_crews = crews_self_merged[boolean_filter]3Use the .head() method to print the first few rows of direct_crews.# Merge the crews table to itself crews_self_merged = crews.merge(crews, on='id', how='inner', suffixes=('_dir','_crew')) # Create a boolean index to select the appropriate rows boolean_filter = ((crews_self_merged['job_dir'] == 'Director') & (crews_self_merged['job_crew'] != 'Director')) direct_crews = crews_self_merged[boolean_filter] # Print the first few rows of direct_crews print(direct_crews.head())Index merge for movie ratingsTo practice merging on indexes, you will merge movies and a table called ratings that holds info about movie ratings. Make sure your merge returns all of the rows from the movies table and not all the rows of ratings table need to be included in the result.The movies and ratings tables have been loaded for you.**Instructions**Merge movies and ratings on the index and save to a variable called movies_ratings, ensuring that all of the rows from the movies table are returned.# Merge to the movies table the ratings table on the index movies_ratings = movies.merge(ratings, on = 'id') # Print the first few rows of movies_ratings print(movies_ratings.head())Do sequels earn more?It is time to put together many of the aspects that you have learned in this chapter. In this exercise, you'll find out which movie sequels earned the most compared to the original movie. To answer this question, you will merge a modified version of the sequels and financials tables where their index is the movie ID. You will need to choose a merge type that will return all of the rows from the sequels table and not all the rows of financials table need to be included in the result. From there, you will join the resulting table to itself so that you can compare the revenue values of the original movie to the sequel. Next, you will calculate the difference between the two revenues and sort the resulting dataset.The sequels and financials tables have been provided. 1With the sequels table on the left, merge to it the financials table on index named id, ensuring that all the rows from the sequels are returned and some rows from the other table may not be returned, Save the results to sequels_fin.# Merge sequels and financials on index id sequels_fin = sequels.merge(financials, on = 'id', how = 'left')2Merge the sequels_fin table to itself with an inner join, where the left and right tables merge on sequel and id respectively with suffixes equal to ('_org','_seq'), saving to orig_seq.# Merge sequels and financials on index id sequels_fin = sequels.merge(financials, on='id', how='left') # Self merge with suffixes as inner join with left on sequel and right on id orig_seq = sequels_fin.merge(sequels_fin, how="inner", left_on="sequel", right_on="id", right_index=True, suffixes=('_org','_seq')) # Add calculation to subtract revenue_org from revenue_seq orig_seq['diff'] = orig_seq['revenue_seq'] - orig_seq['revenue_org']3Select the title_org, title_seq, and diff columns of orig_seq and save this as titles_diff.# Merge sequels and financials on index id sequels_fin = sequels.merge(financials, on='id', how='left') # Self merge with suffixes as inner join with left on sequel and right on id orig_seq = sequels_fin.merge(sequels_fin, how='inner', left_on='sequel', right_on='id', right_index=True, suffixes=('_org','_seq')) # Add calculation to subtract revenue_org from revenue_seq orig_seq['diff'] = orig_seq['revenue_seq'] - orig_seq['revenue_org'] # Select the title_org, title_seq, and diff titles_diff = orig_seq[["title_org", "title_seq", "diff"]]4Sort by titles_diff by diff in descending order and print the first few rows.# Merge sequels and financials on index id sequels_fin = sequels.merge(financials, on='id', how='left') # Self merge with suffixes as inner join with left on sequel and right on id orig_seq = sequels_fin.merge(sequels_fin, how='inner', left_on='sequel', right_on='id', right_index=True, suffixes=('_org','_seq')) # Add calculation to subtract revenue_org from revenue_seq orig_seq['diff'] = orig_seq['revenue_seq'] - orig_seq['revenue_org'] # Select the title_org, title_seq, and diff titles_diff = orig_seq[['title_org','title_seq','diff']] # Print the first rows of the sorted titles_diff print(titles_diff.sort_values(by = "diff", ascending = False).head())Advanced Merging and Concatenating Steps of a semi-joinIn the last video, you were shown how to perform a semi-join with pandas. In this exercise, you'll solidify your understanding of the necessary steps. Recall that a semi-join filters the left table to only the rows where a match exists in both the left and right tables.**Instructions**Sort the steps in the correct order of the technique shown to perform a semi-join in pandas.**Answer**Merge the left and right tables on key column using an inner-join.Search if the key column in the left table is in the merged tables using the .isin() method creating a Boolean Series.Subset the rows of the left table.# Example genres_tracks = genres.merge(top_tracks, on='gid', how='left', indicator=True) gid_list = genres_tracks.loc[genres_tracks['_merge'] == 'left_only','gid'] non_top_genres = genres[genres['gid'].isin(gid_list)] print(non_top_genres.head())Performing and anti-joinIn our music streaming company dataset, each customer is assigned an employee representative to assist them. In this exercise, filter the employee table by a table of top customers, returning only those employees who are not assigned to a customer. The results should resemble the results of an anti-join. The company's leadership will assign these employees additional training so that they can work with high valued customers.The top_cust and employees tables have been provided for you. 1Merge employees and top_cust with a left join, setting indicator argument to True. Save the result to empl_cust.# Merge employees and top_cust empl_cust = employees.merge(top_cust, on="srid", how="left", indicator=True)4Select the srid column of empl_cust and the rows where _merge is 'left_only'. Save the result to srid_list.# Select the srid column where _merge is left_only srid_list = empl_cust.loc[empl_cust["_merge"] == "left_only", 'srid']3Subset the employees table and select those rows where the srid is in the variable srid_list and print the results.# Get employees not working with top customers print(employees[employees["srid"].isin(srid_list)])Performing a semi-joinSome of the tracks that have generated the most significant amount of revenue are from TV-shows or are other non-musical audio. You have been given a table of invoices that include top revenue-generating items. Additionally, you have a table of non-musical tracks from the streaming service. In this exercise, you'll use a semi-join to find the top revenue-generating non-musical tracks..The tables non_mus_tcks, top_invoices, and genres have been loaded for you.**Instructions**- Merge non_mus_tcks and top_invoices on tid using an inner join. Save the result as tracks_invoices.- Use .isin() to subset the rows of non_mus_tck where tid is in the tid column of tracks_invoices. Save the result as top_tracks.- Group top_tracks by gid and count the tid rows. Save the result to cnt_by_gid.- Merge cnt_by_gid with the genres table on gid and print the result.# Merge the non_mus_tcks and top_invoices tables on tid tracks_invoices = non_mus_tcks.merge(top_invoices, on='tid') # Use .isin() to subset non_mus_tcks to rows with tid in tracks_invoices top_tracks = non_mus_tcks[non_mus_tcks['tid'].isin(tracks_invoices['tid'])] # Group the top_tracks by gid and count the tid rows cnt_by_gid = top_tracks.groupby(['gid'], as_index=False).agg({'tid':'count'}) # Merge the genres table to cnt_by_gid on gid and print print(cnt_by_gid.merge(genres, on='gid'))Merging Ordered and Time-Series DataIn this final chapter, you’ll step up a gear and learn to apply pandas' specialized methods for merging time-series and ordered data together with real-world financial and economic data from the city of Chicago. You’ll also learn how to query resulting tables using a SQL-style format, and unpivot data using the melt method. Sumary.merge() method- Columns to join on:** on, left_on, right_on- Type of join** how (left, right, inner, outer)** **default** inner- Overlapping column names** suffixes- Calling the method** df1.merge(df2)- fill_method** Forward fill: ffillDifferents with merge_ordered() method:- **Default** type of join: outer.- Calling the function: pd.merge_ordered(df1, df2) Correlation between GDP and S&P500In this exercise, you want to analyze stock returns from the S&P 500. You believe there may be a relationship between the returns of the S&P 500 and the GDP of the US. Merge the different datasets together to compute the correlation.Two tables have been provided for you, named sp500, and gdp. As always, pandas has been imported for you as pd. 1- Use merge_ordered() to merge gdp and sp500 using a left join on year and date. Save the results as gdp_sp500.- Print gdp_sp500 and look at the returns for the year 2018.# Use merge_ordered() to merge gdp and sp500 on year and date gdp_sp500 = pd.merge_ordered(gdp, sp500, left_on="year", right_on="date", how="left") # Print gdp_sp500 print(gdp_sp500)2Use merge_ordered(), again similar to before, to merge gdp and sp500 use the function's ability to interpolate missing data to forward fill the missing value for returns, assigning this table to the variable gdp_sp500.# Use merge_ordered() to merge gdp and sp500, interpolate missing value gdp_sp500 = pd.merge_ordered(gdp, sp500, left_on="year", right_on="date", how="left", fill_method="ffill") # Print gdp_sp500 print (gdp_sp500)3- Subset the gdp_sp500 table, select the gdp and returns columns, and save as gdp_returns.- Print the correlation matrix of the gdp_returns table.# Use merge_ordered() to merge gdp and sp500, interpolate missing value gdp_sp500 = pd.merge_ordered(gdp, sp500, left_on='year', right_on='date', how='left', fill_method='ffill') # Subset the gdp and returns columns gdp_returns = gdp_sp500[["gdp", "returns"]] # Print gdp_returns correlation print (gdp_returns.corr())Phillips curve using merge_ordered()There is an economic theory developed by which states that inflation and unemployment have an inverse relationship. The theory claims that with economic growth comes inflation, which in turn should lead to more jobs and less unemployment.You will take two tables of data from the U.S. Bureau of Labor Statistics, containing unemployment and inflation data over different periods, and create a Phillips curve. The tables have different frequencies. One table has a data entry every six months, while the other has a data entry every month. You will need to use the entries where you have data within both tables.The tables unemployment and inflation have been loaded for you.**Instructions**Use merge_ordered() to merge the inflation and unemployment tables on date with an inner join, and save the results as inflation_unemploy.Print the inflation_unemploy variable.Using inflation_unemploy, create a scatter plot with unemployment_rate on the horizontal axis and cpi (inflation) on the vertical axis.# Use merge_ordered() to merge inflation, unemployment with inner join inflation_unemploy = pd.merge_ordered(inflation, unemployment, on='date', how='inner' ) # Print inflation_unemploy print(inflation_unemploy) # Plot a scatter plot of unemployment_rate vs cpi of inflation_unemploy inflation_unemploy.plot(kind='scatter', x='unemployment_rate', y='cpi') plt.show()merge_ordered() caution, multiple columnsWhen using merge_ordered() to merge on multiple columns, the order is important when you combine it with the forward fill feature. The function sorts the merge on columns in the order provided. In this exercise, we will merge GDP and population data from the World Bank for the Australia and Sweden, reversing the order of the merge on columns. The frequency of the series are different, the GDP values are quarterly, and the population is yearly. Use the forward fill feature to fill in the missing data. Depending on the order provided, the fill forward will use unintended data to fill in the missing values.The tables gdp and pop have been loaded. 1Use merge_ordered() on gdp and pop, merging on columns date and country with the fill feature, save to ctry_date.# Merge gdp and pop on date and country with fill and notice rows 2 and 3 ctry_date = pd.merge_ordered(gdp, pop, on=['date', 'country'], fill_method='ffill') # Print ctry_date print(ctry_date)2Perform the same merge of gdp and pop, but join on country and date (reverse of step 1) with the fill feature, saving this as date_ctry.# Merge gdp and pop on country and date with fill date_ctry = pd.merge_ordered(gdp, pop, on=['country', 'date'], fill_method='ffill') # Print date_ctry print(date_ctry)Using merge_asof() to study stocksYou have a feed of stock market prices that you record. You attempt to track the price every five minutes. Still, due to some network latency, the prices you record are roughly every 5 minutes. You pull your price logs for three banks, JP Morgan (JPM), Wells Fargo (WFC), and Bank Of America (BAC). You want to know how the price change of the two other banks compare to JP Morgan. Therefore, you will need to merge these three logs into one table. Afterward, you will use the pandas .diff() method to compute the price change over time. Finally, plot the price changes so you can review your analysis.The three log files have been loaded for you as tables named jpm, wells, and bac.**Instructions**- Use merge_asof() to merge jpm (left table) and wells together on the date_time column, where the rows with the nearest times are matched, and with suffixes=('', '_wells'). Save to jpm_wells.- Use merge_asof() to merge jpm_wells (left table) and bac together on the date_time column, where the rows with the closest times are matched, and with suffixes=('_jpm', '_bac'). Save to jpm_wells_bac.- Using price_diffs, create a line plot of the close price of JPM, WFC, and BAC only.# Use merge_asof() to merge jpm and wells jpm_wells = pd.merge_asof(jpm, wells, on='date_time' , suffixes=('', '_wells'), direction='nearest' ) # Use merge_asof() to merge jpm_wells and bac jpm_wells_bac = pd.merge_asof(jpm_wells, bac, on='date_time' , suffixes=('_jpm', '_bac'), direction='nearest' ) # Compute price diff price_diffs = jpm_wells_bac.diff() # Plot the price diff of the close of jpm, wells and bac only price_diffs.plot(y=['close_jpm', 'close_wells', 'close_bac']) plt.show()Using merge_asof() to create datasetThe merge_asof() function can be used to create datasets where you have a table of start and stop dates, and you want to use them to create a flag in another table. You have been given gdp, which is a table of quarterly GDP values of the US during the 1980s. Additionally, the table recession has been given to you. It holds the starting date of every US recession since 1980, and the date when the recession was declared to be over. Use merge_asof() to merge the tables and create a status flag if a quarter was during a recession. Finally, to check your work, plot the data in a bar chart.The tables gdp and recession have been loaded for you.**Instructions**- Using merge_asof(), merge gdp and recession on date, with gdp as the left table. Save to the variable gdp_recession.- Create a list using a list comprehension and a conditional expression, named is_recession, where for each row if the gdp_recession['econ_status'] value is equal to 'recession' then enter 'r' else 'g'.- Using gdp_recession, plot a bar chart of gdp versus date, setting the color argument equal to is_recession.# Merge gdp and recession on date using merge_asof() gdp_recession = pd.merge_asof(gdp, recession, on='date') # Create a list based on the row value of gdp_recession['econ_status'] is_recession = ['r' if s=='recession' else 'g' for s in gdp_recession['econ_status']] # Plot a bar chart of gdp_recession gdp_recession.plot(kind='bar', y='gdp', x='date', color=is_recession, rot=90) plt.show()Subsetting rows with .query()In this exercise, you will revisit GDP and population data for Australia and Sweden from the World Bank and expand on it using the .query() method. You'll merge the two tables and compute the GDP per capita. Afterwards, you'll use the .query() method to sub-select the rows and create a plot. Recall that you will need to merge on multiple columns in the proper order.The tables gdp and pop have been loaded for you. 1Use merge_ordered() on gdp and pop on columns country and date with the fill feature, save to gdp_pop and print.# Merge gdp and pop on date and country with fill gdp_pop = pd.merge_ordered(gdp, pop, on=['country','date'], fill_method='ffill')2Add a column named gdp_per_capita to gdp_pop that divides gdp by pop.# Add a column named gdp_per_capita to gdp_pop that divides the gdp by pop gdp_pop['gdp_per_capita'] = gdp_pop['gdp'] / gdp_pop['pop']3Pivot gdp_pop so values='gdp_per_capita', index='date', and columns='country', save as gdp_pivot.# Pivot table of gdp_per_capita, where index is date and columns is country gdp_pivot = gdp_pop.pivot_table('gdp_per_capita', 'date', 'country')4Use .query() to select rows from gdp_pivot where date is greater than equal to 1991-01-01". Save as recent_gdp_pop.# Select dates equal to or greater than 1991-01-01 recent_gdp_pop = gdp_pivot.query('date >= "1991-01-01"') # Plot recent_gdp_pop recent_gdp_pop.plot(rot=90) plt.show()Reshaping data with .melt()The melt method transform a wide format dataframe in a tall format.**Example**social_fin_tall = social_fin.melt(id_vars=['financial', 'company'], value_vars=['2018', '2017'], var_name=['year'], value_name='dollars'] Using .melt() to reshape government dataThe US Bureau of Labor Statistics (BLS) often provides data series in an easy-to-read format - it has a separate column for each month, and each year is a different row. Unfortunately, this wide format makes it difficult to plot this information over time. In this exercise, you will reshape a table of US unemployment rate data from the BLS into a form you can plot using .melt(). You will need to add a date column to the table and sort by it to plot the data correctly.The unemployment rate data has been loaded for you in a table called ur_wide. You are encouraged to view the table in the console before beginning the exercise.**Instructions**Use .melt() to unpivot all of the columns of ur_wide except year and ensure that the columns with the months and values are named month and unempl_rate, respectively. Save the result as ur_tall.Add a column to ur_tall named date which combines the year and month columns as year-month format into a larger string, and converts it to a date data type.Sort ur_tall by date and save as ur_sorted.Using ur_sorted, plot unempl_rate on the y-axis and date on the x-axis.# unpivot everything besides the year column ur_tall = ur_wide.melt(id_vars=['year'], var_name=['month'], value_name='unempl_rate') # Create a date column using the month and year columns of ur_tall ur_tall['date'] = pd.to_datetime(ur_tall['year'] + '-' + ur_tall['month']) # Sort ur_tall by date in ascending order ur_sorted = ur_tall.sort_values(by=['date']) # Plot the unempl_rate by date ur_sorted.plot(y='unempl_rate', x='date') plt.show()Using .melt() for stocks vs bond performanceIt is widespread knowledge that the price of bonds is inversely related to the price of stocks. In this last exercise, you'll review many of the topics in this chapter to confirm this. You have been given a table of percent change of the US 10-year treasury bond price. It is in a wide format where there is a separate column for each year. You will need to use the .melt() method to reshape this table.Additionally, you will use the .query() method to filter out unneeded data. You will merge this table with a table of the percent change of the Dow Jones Industrial stock index price. Finally, you will plot data.The tables ten_yr and dji have been loaded for you.**Instructions**Use .melt() on ten_yr to unpivot everything except the metric column, setting var_name='date' and value_name='close'. Save the result to bond_perc.Using the .query() method, select only those rows were metric equals 'close', and save to bond_perc_close.Use merge_ordered() to merge dji (left table) and bond_perc_close on date with an inner join, and set suffixes equal to ('_dow', '_bond'). Save the result to dow_bond.Using dow_bond, plot only the Dow and bond values.# Use melt on ten_yr, unpivot everything besides the metric column bond_perc = ten_yr.melt(id_vars=['metric'], var_name=['date'], value_name='close' ) # Use query on bond_perc to select only the rows where metric=close bond_perc_close = bond_perc.query('metric == "close"') # Merge (ordered) dji and bond_perc_close on date with an inner join dow_bond = pd.merge_ordered(dji, bond_perc_close, on='date', how='inner', suffixes=('_dow', '_bond')) # Plot only the close_dow and close_bond columns dow_bond.plot(y=['close_dow', 'close_bond'], x='date', rot=90) plt.show()This notebook contains all code needed to re-create Figures 2 and 3 from the paper. I've also included some additional supplemental analyses and code that are relevant to the main paper. Basic setup (import statements, define basic file IO functions, etc.)import warnings warnings.simplefilter('ignore') import numpy as np import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib as mpl import scipy.signal as sgn import pickle import os import hypertools as hyp import future import ipykernel from notebook import notebookapp import json from pathlib import Path import urllib from glob import glob from scipy.spatial.distance import cdist, pdist, squareform from scipy.interpolate import pchip from scipy.signal import find_peaks as localmax from scipy.stats import ttest_ind as ttest %matplotlib inline #annoying hack needed to get the current filename; source: https://stackoverflow.com/questions/12544056/how-do-i-get-the-current-ipython-notebook-name def notebook_path(): """Returns the absolute path of the Notebook or None if it cannot be determined NOTE: works only when the security is token-based or there is also no password """ connection_file = os.path.basename(ipykernel.get_connection_file()) kernel_id = connection_file.split('-', 1)[1].split('.')[0] for srv in notebookapp.list_running_servers(): try: if srv['token']=='' and not srv['password']: # No token and no password, ahem... req = urllib.request.urlopen(srv['url']+'api/sessions') else: req = urllib.request.urlopen(srv['url']+'api/sessions?token='+srv['token']) sessions = json.load(req) for sess in sessions: if sess['kernel']['id'] == kernel_id: return os.path.join(srv['notebook_dir'],sess['notebook']['path']) except: pass # There may be stale entries in the runtime directory return NoneSet path, load Sherlock video and recall trajectoriesCredit:1. Organized and pre-processed data (Heusser et al., 2018): [[link](https://github.com/ContextLab/sherlock-topic-model-paper)]2. Original study data (Chen et al., 2017): [[link](https://dataspace.princeton.edu/jspui/handle/88435/dsp01nz8062179)]Descriptions (description of methods may be found [here](https://www.biorxiv.org/content/early/2018/09/07/409987)):- `video_model` contains the scene-by-scene topic trajectories based on human anotations - `recall_model` contains the sentence-by-sentence topic vectors for each participant's recallsrootdir = str(Path(notebook_path()).parent.parent.parent) datadir = os.path.join(rootdir, 'data') figdir = os.path.join(rootdir, 'data', 'figs') if not os.path.exists(figdir): os.makedirs(figdir) video_model, recall_models = np.load(os.path.join(datadir, 'processed', 'models_t100_v50_r10.npy'), allow_pickle=True)Some plotting and data wrangling code# Plot a correlation matrix def plot_corrmat(c, vmin=0, vmax=1, labels=True, cbar=True, title=''): n = c.shape[0] ticks = np.linspace(0, n, 5) sns.heatmap(c, vmin=vmin, vmax=vmax, cbar=cbar) if labels: plt.xlabel('Relative time') plt.ylabel('Relative time') plt.xticks(ticks, np.divide(ticks, n)) plt.yticks(ticks, np.divide(ticks, n)) else: plt.xticks([], []) plt.yticks([], []) plt.title(title, fontsize=32) # Plot a timeseries with error ribbons (95% confidence intervals) def plot_ribbon(x, color='k', ribbon_alpha=0.2): mean = x.mean() ci = 1.96 * np.divide(x.std(), np.sqrt(x.shape[0])) xvals = x.columns h = plt.gcf() plt.fill_between(xvals, mean - ci, mean + ci, color=color, alpha=ribbon_alpha) plt.plot(xvals, mean, color=color) return h # Re-sample an array along the given axis or axes def resample(x, axis=0, nbins=100, interpolate=True): if not (type(x) is pd.DataFrame): x = pd.DataFrame(x) if type(axis) is list: if len(axis) == 0: return x else: if not(type(nbins) is list): nbins = [nbins] * len(axis) x = resample(x, axis[0], nbins=nbins[0], interpolate=interpolate) for i in np.arange(1, len(axis)): x = resample(x, axis[i], nbins=nbins[i], interpolate=interpolate) return x if axis == 1: return resample(x.T, axis=0, nbins=nbins, interpolate=interpolate).T elif not (axis == 0): raise Exception('resampling must be along axis 0 or 1') if interpolate: vals = np.array(x.axes[axis]) minval = np.min(vals) maxval = np.max(vals) newvals = np.linspace(minval, maxval, nbins) normed_newvals = np.divide(newvals, np.max([np.abs(minval), np.abs(maxval)])) y = np.zeros([nbins, x.shape[1]]) for i in np.arange(x.shape[1]): y[:, i] = pchip(vals, x.iloc[:, i], extrapolate=False)(newvals) return pd.DataFrame(y, index=normed_newvals, columns=x.columns) else: if x.shape[0] > nbins: x = x.loc[:nbins, :] vals = np.nan * np.ones(nbins) n = np.min([x.shape[0], nbins]) vals[:n] = np.arange(n) / n template = np.nan * np.ones([nbins, x.shape[1]]) template[:n, :] = x.iloc[:n, :] return pd.DataFrame(template, index=vals, columns=x.columns)Figure 2: temporal correlation matrices of the dynamic content of a television episode and people's recalls of that episode Figure 2An = 100 video_corrs = resample(pd.DataFrame(video_model).T.corr(), axis=[0, 1], nbins=n) plot_corrmat(video_corrs) plt.gcf().savefig(os.path.join(figdir, 'sherlock_video_corrs.pdf'))Figure 2Ball_recall_corrs = [resample(pd.DataFrame(r).T.corr(), axis=[0, 1], nbins=n) for r in recall_models] recall_corrs = np.sum(np.stack(resampled, axis=2), axis=2) / len(recall_models) plot_corrmat(recall_corrs) plt.gcf().savefig(os.path.join(figdir, 'sherlock_recall_corrs.pdf'))Figure 2Cfor i, r in enumerate(all_recall_corrs): plt.clf() plot_corrmat(r, cbar=False, labels=False, title=f'P{i+1}') plt.gcf().savefig(os.path.join(figdir, f'sherlock_recall_corrs_P{i}.pdf'))Figure 3: Content drift in a television episode and participants' recalls of that episode Dynamics time warpingFollowing [Heusser et al. (2020)](https://www.biorxiv.org/content/10.1101/409987v3), use time warping to project all participant's recalls onto a common timeline.dtw_files = list(map(lambda x: f's{x}_recall_model_dtw.npy', np.arange(len(recall_models)))) warped_recalls = list(map(lambda f: np.load(os.path.join(datadir, 'processed', f)), dtw_files))Data wrangling and plotting functions...# Vertically align the diagonal entries of a correlation matrix and fill in the remaining entries with nans # This is used for doing autocorrelation analyses def parse_diag(M): n = M.shape[1] x = np.nan * np.zeros([M.shape[0], 2 * n + 1]) for i in np.arange(n): x[i, (n - i):n] = M[i, 0:i] x[i, n] = M[i, i] x[i, (n+1):(n + n - i)] = M[i, (i+1):] return pd.DataFrame(x, columns=np.arange(-n, n+1)) # Plot the average autocorrelation function for a given correlation matrix # - First diagonalize the matrix # - Then (optionally) resample it # - Then plot a timeseries ribbon def autocorr_plot(r, color='k', res=False, nbins=101, pass_matrix=False, show_plot=True): if pass_matrix: M = r else: M = pd.DataFrame(r).T.corr().values x = parse_diag(M).loc[:, 0:] x = x.iloc[:, :-1] #get rid of last timepoint (always all nans) if res: y = resample(x, axis=1, nbins=nbins) else: y = x if show_plot: h = plot_ribbon(y, color=color) plt.ylabel('Correlation') if res: plt.xlabel('Relative time') else: plt.xlabel('Time (%)') return y # Create a sample aligned correlation matrix for the example participant (P0) M = pd.DataFrame(recall_models[0]).T.corr().values np.fill_diagonal(M, np.nan) x = parse_diag(M) h = sns.heatmap(x)Figure 3Acolors = sns.color_palette('Spectral', n_colors=len(recall_models)) for i, r in enumerate(all_recall_corrs): autocorr_plot(r.values, color=colors[i], res=False, pass_matrix=True); autocorr_plot(video_corrs.values, color='k', res=False, pass_matrix=True); plt.ylim([-0.05, 1.05]) plt.xlim([0, 99]) plt.gcf().savefig(os.path.join(figdir, 'reinstatement_resampled.pdf'))Define some stats functions#Fisher z-transformation def r2z(r): warnings.simplefilter('ignore') return 0.5 * (np.log(1 + r) - np.log(1 - r)) #Inverse of Fisher z-transformation def z2r(z): warnings.simplefilter('ignore') if isinstance(z, list): z = np.array(z) r = (np.exp(2 * z) - 1) / (np.exp(2 * z) + 1) if isinstance(r, np.ndarray): r[np.isinf(z) & (z > 0)] = 1 r[np.isinf(z) & (z < 0)] = -1 else: if np.isinf(z) & (z > 0): return 1 elif np.isinf(z) & (z < 0): return -1 return rFigure 2Bdef reinstatement_plot(video, recalls, relative_time, res=False, nbins=101, include_average=True, include_autocorr=True, height=0.1, thresh=None, dist=50): if not (type(recalls) is list): recalls = [recalls] if res: video = resample(video, nbins=nbins).values for r in recalls: recalls[0] = resample(r, nbins=nbins).values ts = np.linspace(0, 1, video.shape[0]) xlabel = 'Relative time' if len(recalls) == 1: colors = ['k'] else: colors = sns.color_palette('Spectral', n_colors=len(recalls)) x = np.zeros(ts.shape) for i, r in enumerate(recalls): t = int(np.round(relative_time * (r.shape[0] - 1))) next = np.squeeze(1 - cdist(np.atleast_2d(r[t, :]), video, 'correlation')) x += r2z(next) plt.plot(ts, next, color=colors[i], linewidth=1, alpha=0.4) y = plt.ylim() if include_autocorr: t = int(np.round(relative_time * video.shape[0] - 1)) autocorr = np.squeeze(1 - cdist(np.atleast_2d(video[t, :]), video, 'correlation')) lm = localmax(autocorr, height=height, threshold=thresh, distance=dist)[0] for m in lm: t = ts[m] * video.shape[0] p = ts[m] print(f'Local video max at t = {t} (prop: {p})') #plt.plot([ts[m], ts[m]], [y[0], autocorr[m]], color=[0.5, 0.5, 0.5], linewidth=2) plt.plot(ts, autocorr, color=[0.5, 0.5, 0.5], linewidth=2) if include_average: ave = z2r(x / len(recalls)) plt.plot(ts, ave, color='k', linewidth=2) lm = localmax(ave, height=height, threshold=thresh, distance=dist)[0] lm_colors = sns.cubehelix_palette(n_colors=len(lm)) for i, m in enumerate(lm): t = ts[m] * video.shape[0] p = ts[m] print(f'Local recall max at t = {t} (prop: {p})') plt.plot(ts[m], ave[m], color=lm_colors[i], marker='o', markeredgecolor='w') else: lm = [] plt.xlabel(xlabel) plt.ylabel('Correlation') plt.xlim([np.min(ts), np.max(ts)]) plt.ylim([-0.05, 1.05]) return lm lm = reinstatement_plot(video_model, warped_recalls, 0.328) plt.gcf().savefig(os.path.join(figdir, 'wave_function.pdf'))Local video max at t = 596.3017721518987 (prop: 0.3017721518987342) Local video max at t = 651.3296202531646 (prop: 0.329620253164557) Local recall max at t = 217.10987341772153 (prop: 0.109873417721519) Local recall max at t = 347.1756962025317 (prop: 0.17569620253164558) Local recall max at t = 477.2415189873418 (prop: 0.24151898734177216) Local recall max at t = 570.2886075949368 (prop: 0.2886075949367089) Local recall max at t = 626.3169620253166 (prop: 0.31696202531645573) Local recall max at t = 684.3463291139241 (prop: 0.34632911392405064) Local recall max at t = 751.3802531645571 (prop: 0.38025316455696206) Local recall max at t = 813.4116455696203 (prop: 0.41164556962025317) Local recall max at t = 1113.5635443037975 (prop: 0.5635443037974683) Local recall max at t = 1207.6111392405064 (prop: 0.6111392405063292) Local recall max at t = 1597.8086075949368 (prop: 0.8086075949367089)Key to movie times (from `Sherlock_Segments_1000_NN_2017.xlsx`)annotations = pd.read_excel(os.path.join(datadir, 'raw', 'Sherlock_Segments_1000_NN_2017.xlsx')) starts = annotations['Start Time (s) '] ends = annotations['End Time (s) '] notes = annotations['Scene Details - A Level '] starts_TR = annotations['Start Time (TRs, 1.5s)'] ends_TR = annotations['End Time (TRs, 1.5s)'] cutpoint = 482 #movie is divided into two clips, and times (in seconds, not TRs) reset when the second clip starts correction = ends[cutpoint-1] starts.iloc[cutpoint:] += correction ends.iloc[cutpoint:] += correction def btwn(x, a, b): return (x >= a) & (x <= b) def print_annotation(t): def print_time(n, x): if n >= cutpoint: clip = 2 c = correction else: clip = 1 c = 0 x -= c minute = int(np.floor(x / 60)) second = int(x - (60*minute)) return f'c{clip}_{minute}m{second}s' i = np.where(btwn(t, starts_TR, ends_TR))[0][0] print(f'Time: {print_time(i, starts.iloc[i])}--{print_time(i, ends.iloc[i])} (TR: {int(starts_TR.iloc[i])}--{int(ends_TR.iloc[i])}); Description: {notes.iloc[i]}\n') for m in lm: print_annotation(m)Time: c1_5m22s--c1_5m28s (TR: 216--219); Description: Beth is seen outside in the dark at night having slipped out of the venue. She is next to her car looking for her keys in her bag. She sighs when she can't find them and looks around helplessly. Time: c1_8m39s--c1_8m40s (TR: 347--347); Description: John uncomfortably says: "Yeah, I'm not the …" and never finishes his sentence. Time: c1_11m53s--c1_11m57s (TR: 476--478); Description: John turns back to Sherlock and says: "We don't know a thing about each other; I don't know where we're meeting; I don't even know your name." Time: c1_14m13s--c1_14m17s (TR: 570--571); Description: Sherlock trots up the stairs to the first floor landing, then pauses and waits for John. Time: c1_15m37s--c1_15m41s (TR: 626--627); Description: John looks up at Sherlock and explains: "You said you could identify a software designer by his tie and an airplane pilot by his left thumb." Time: c1_17m5s--c1_17m7s (TR: 684--685); Description: Mrs.[...]Test runs of the SEIR model for Belgium - coronavirus**Author:** **Date:** 14 march 2020 **Licence:** [CC-BY](https://creativecommons.org/licenses/by/4.0/)*Warning:* I am not an epidemiologist. I wrote this notebook to have a startingpoint for comparing estimates that I found online, for instance the one by NicolasVandewalle show below.I used a standard epidemiological model (SEIR) along with parameters from a recentarticle by a team of chinese scientists for the case of Wuhan.The absolute values are likely incorrect. The trends are good to know.![](figure_vdwnico_20200308.jpeg)Source: [@vdwnico](https://twitter.com/vdwnico/status/1236542044685959168) on Twitter, 8 march 2020. Data from Belgian governmentBelow, I write down the number of new cases in Belgium since march 2.`data_I` is the total of infected cases (there are no recoveries recorded yet).new_I = [2, 6, 5, 10, 27, 59, 60, 31, 39, 28, 47, 85, 160, 133, 197, 172] # starting march 1 data_I = np.cumsum(new_I) data_dates = np.arange('2020-03-01', len(new_I), dtype=np.datetime64)For estimating the healthcare network saturation, I use the estimate of Philippe Devos regarding theavailability of intensive care beds and the estimate of 6.1% of infected people needing intensive care(no other information is taken from that article).Article by in La Libre Belgique, 2 march 2020:https://www.lalibre.be/debats/opinions/coronavirus-sans-mesures-de-precaution-drastiques-on-risque-d-avoir-850-000-personnes-infectees-et-50-000-morts-en-belgique-5e5cf60f9978e23106a0bfd9available_icu_beds = 140 fraction_of_icu_need = 0.061 saturation_of_icu_beds = 140/fraction_of_icu_need print("Number of cases to saturate the ICU beds", saturation_of_icu_beds)Basic logarithmic plot and trendBelow, I plot the data on a lin-log plot to assess the exponentialcharacter of the number of confirmed infected cases.I superimpose an exponential trend for the coming month.plt.figure() trend_start, trend_stop = 5, len(data_I)-1 plt.plot(data_dates, data_I, marker='o', label='reported I') more_dates = np.arange(data_dates[0], data_dates[0]+30) delta = np.array(more_dates - data_dates[trend_start], dtype=float) current_exponential_rate = np.log(data_I[trend_stop]/data_I[trend_start]) / np.array(data_dates[trend_stop]-data_dates[trend_start], dtype=float) plt.plot(more_dates, data_I[trend_start]*np.exp(delta*current_exponential_rate), marker='v') plt.axhline(saturation_of_icu_beds) plt.yscale('log') plt.grid() print(f"Current exponential rate {current_exponential_rate}") print(f"Multiplication per day {np.exp(current_exponential_rate)}") ax = plt.gca() plt.setp(ax.get_xticklabels(), rotation=30, ha="right");Definition of the model and parametersI take the SEIR model defined in Wang et al. Cell Discovery (2020) 6:10 https://doi.org/10.1038/s41421-020-0148-0# parameters for the model, page 6 σ = 1/5.2 # inverse of mean period γ = 1/18 # time for which a case should be recovered N = 11e6 / 3 # population size # definition of model page 6 def SEIR(x, t, R0): S, E, I, R = x β = R0*γ return [ - β*I*S/N, β*I*S/N - σ*E, σ*E - γ*I, γ*I, ]Estimate for BelgiumI start with the record of 2 infected cases on march 2 at https://www.info-coronavirus.be/fr/2020/03/01/deuxieme-infection-au-covid-19-en-belgique/ and list the new cases day per day (see above).The "day 0" is thus march 2 and the initial number of infected people is 2.To circumvent the low statistics of the first few days, I allow to delay the initial conditionof the SEIR model by an offset of `start_day`, taking the corresponding cumulated number of casesfor I0.start_day = 1 I0 = data_I[start_day] E0 = 20*I0 # number of initially exposed people, see Wang et al t = np.arange(0, 200, dtype=float) # running for 100 days t_display = np.arange(data_dates[0], len(t), dtype=np.datetime64) + start_day plt.figure() plt.plot(data_dates, data_I, marker='o', label='reported I') for R0 in [1.9, 3.1, 4.5, 7]: model = odeint(SEIR, [N-I0-E0, E0, I0, 0], t, args=(R0,)) plt.plot(t_display, model[:,2], label=f'model I. R0={R0}') plt.yscale('log') plt.xlabel('time') plt.axhline(saturation_of_icu_beds, label='saturation', c='k', ls='--') ylo, yhi = plt.ylim() plt.ylim(data_I[0], yhi) plt.legend() plt.grid() ax = plt.gca() plt.setp(ax.get_xticklabels(), rotation=30, ha="right"); start_day = 1 I0 = data_I[start_day] E0 = 20*I0 # number of initially exposed people, see Wang et al t = np.arange(0, 30, dtype=float) # running for 100 days t_display = np.arange(data_dates[0], len(t), dtype=np.datetime64) + start_day plt.figure() plt.plot(data_dates, data_I, marker='o', label='reported I') for R0 in [1.9, 3.1, 4.5, 7]: model = odeint(SEIR, [N-I0-E0, E0, I0, 0], t, args=(R0,)) plt.plot(t_display, model[:,2], label=f'model I. R0={R0}') plt.yscale('log') plt.xlabel('time') plt.axhline(saturation_of_icu_beds, label='saturation', c='k', ls='--') ylo, yhi = plt.ylim() plt.ylim(data_I[0], yhi) plt.legend() plt.grid() ax = plt.gca() plt.setp(ax.get_xticklabels(), rotation=30, ha="right");Risk of under-reportingAgain, as a test, I use below twice the number of reported cases tounderstand the risk related to under-reporting.If the actual number of cases is twice as high, the peak infection arisesearlier (mid-end july instead of mid-august) and the saturation of ICU bedsoccurs late march instead of mid-april.I0 = 2*data_I[start_day] E0 = 20*I0 t = np.arange(0, 200, dtype=float) # running for 100 days t_display = np.arange(data_dates[0], len(t), dtype=np.datetime64) + start_day plt.figure() plt.plot(data_dates, 2*data_I, marker='o', label='reported I') for R0 in [1.9, 3.1, 4.5]: model = odeint(SEIR, [N-I0-E0, E0, I0, 0], t, args=(R0,)) plt.plot(t_display, model[:,2], label=f'model I. R0={R0}') plt.yscale('log') plt.xlabel('time') plt.axhline(saturation_of_icu_beds, label='saturation', c='k', ls='--') plt.legend() plt.grid() ax = plt.gca() plt.setp(ax.get_xticklabels(), rotation=30, ha="right"); start_day = 3 I0 = 2*data_I[start_day] E0 = 20*I0 t = np.arange(0, 30, dtype=float) # running for 100 days t_display = np.arange(data_dates[0], len(t), dtype=np.datetime64) + start_day plt.figure() plt.plot(data_dates, 2*data_I, marker='o', label='reported I') for R0 in [1.9, 3.1, 4.5]: model = odeint(SEIR, [N-I0-E0, E0, I0, 0], t, args=(R0,)) plt.plot(t_display, model[:,2], label=f'model I. R0={R0}') plt.yscale('log') plt.xlabel('time') plt.axhline(saturation_of_icu_beds, label='saturation', c='k', ls='--') plt.legend() plt.grid() ax = plt.gca() plt.setp(ax.get_xticklabels(), rotation=30, ha="right");Ingest Data From s3 to BTrDBThis notebook will demonstrate how to subclass `CloudMixin` to create a `DataParser` that can load data from AWS s3.import os import s3fs import btrdb import pandas as pd from pgimport.csv_parser import MyCSVParser from pgimport.ingest import DataIngestor from pgimport.cloud import S3Mixin, S3FileCreating a Custom FileWe can extend the `File` object to include a `count`, which can be passed to a `DataIngestor` and allows for a progress bar during ingestion.class S3ProgBarFile(S3File): def __init__(self, path, count=True, header=False): super().__init__(path) # TODO: this works but its slow, need to find a more efficient way to do this if count: header_rows = 1 if header else 0 df = pd.read_csv(path) self.count = (len(df) - header_rows) * (len(df.columns) - 1)Creating a Custom DataParserWe can create a custom `DataParser` by extending the `MyCSVParser` and combining it with the `S3Mixin`, which provides a connection to s3 by assigning a `client` attribute that is equivalent to `boto3.client("s3")`class S3CSVParser(S3Mixin, MyCSVParser): """ Parameters ----------- collection_prefix: str prefix to add to all streams' collection names regex: str regex string to use to retrieve collection from file name metadata: dict or str either a dict of metadata or a str filename referring to a yaml/json metadata file meta_func: callback function to use to map metadata to Stream objects bucket: str s3 bucket that contains raw data files s3_prefix: str (optional) subdirectory within bucket that contains raw data files **kwargs: dict (optional) key/value pairs specifying required AWS credentials. Can be left as None if credentials are stored as environtment variables """ def __init__(self, collection_prefix=None, regex=None, metadata=None, meta_func=None, bucket=None, s3_prefix=None, **kwargs): MyCSVParser.__init__(self, None, collection_prefix=collection_prefix, regex=regex, metadata=metadata, meta_func=meta_func) # establish attrs that are specific to this class self.bucket = bucket self.s3_prefix = s3_prefix # init S3Mixin, connects to s3 and provides a handle via self.client S3Mixin.__init__(self, **kwargs) self.client = self.connect() def collect_files(self): return [S3ProgBarFile(fpath, header=True) for fpath in self.list_objects(self.bucket, prefix=self.s3_prefix)]Ingest s3 Data Next we will use the `S3CSVParser` to load data from s3, create `Stream` objects, and pass them to the `DataIngestor` for insertion into BTrDB.bucket = "your-bucket" subdir = "subdirectory-within-bucket" # explicity setting credentials is optional, you can also just store them as env vars aws_creds = { "aws_access_key_id": os.environ["AWS_ACCESS_KEY_ID"], "aws_secret_access_key": os.environ["AWS_SECRET_ACCESS_KEY"], "aws_session_token": os.environ["AWS_SESSION_TOKEN"] } s3 = S3CSVParser(collection_prefix="test_ingest", bucket=bucket, s3_prefix=subdir, **aws_creds) files = s3.collect_files() total_points = sum([f.count for f in files]) print(f"Found {len(files)} files") print(f"Files contain {total_points} points") # replace with your BTrDB credentials conn = btrdb.connect(os.environ["BTRDB_ENDPOINTS"], apikey=os.environ["BTRDB_API_KEY"]) ingestor = DataIngestor(conn, total_points=total_points) for stream in s3.instantiate_streams(files): ingestor.ingest(stream)6480060it [05:20, 27569.91it/s]Solutions - Problem 1%%HTML Get total number of flights as well as number of flights which are delayed in departure and number of flights delayed in arrival.* Output should contain 3 columns - **FlightCount**, **DepDelayedCount**, **ArrDelayedCount** Let us start spark context for this Notebook so that we can execute the code provided. You can sign up for our [10 node state of the art cluster/labs](https://labs.itversity.com/plans) to learn Spark SQL using our unique integrated LMS.from pyspark.sql import SparkSession import getpass username = getpass.getuser() spark = SparkSession. \ builder. \ config('spark.ui.port', '0'). \ config("spark.sql.warehouse.dir", f"/user/{username}/warehouse"). \ enableHiveSupport(). \ appName(f'{username} | Python - Basic Transformations'). \ master('yarn'). \ getOrCreate()If you are going to use CLIs, you can use Spark SQL using one of the 3 approaches.**Using Spark SQL**```spark2-sql \ --master yarn \ --conf spark.ui.port=0 \ --conf spark.sql.warehouse.dir=/user/${USER}/warehouse```**Using Scala**```spark2-shell \ --master yarn \ --conf spark.ui.port=0 \ --conf spark.sql.warehouse.dir=/user/${USER}/warehouse```**Using Pyspark**```pyspark2 \ --master yarn \ --conf spark.ui.port=0 \ --conf spark.sql.warehouse.dir=/user/${USER}/warehouse``` Reading airtraffic dataairtraffic_path = "/public/airtraffic_all/airtraffic-part/flightmonth=200801" airtraffic = spark. \ read. \ parquet(airtraffic_path) airtraffic.printSchema()root |-- Year: integer (nullable = true) |-- Month: integer (nullable = true) |-- DayofMonth: integer (nullable = true) |-- DayOfWeek: integer (nullable = true) |-- DepTime: string (nullable = true) |-- CRSDepTime: integer (nullable = true) |-- ArrTime: string (nullable = true) |-- CRSArrTime: integer (nullable = true) |-- UniqueCarrier: string (nullable = true) |-- FlightNum: integer (nullable = true) |-- TailNum: string (nullable = true) |-- ActualElapsedTime: string (nullable = true) |-- CRSElapsedTime: integer (nullable = true) |-- AirTime: string (nullable = true) |-- ArrDelay: string (nullable = true) |-- DepDelay: string (nullable = true) |-- Origin: string (nullable = true) |-- Dest: string (nullable = true) |-- Distance: string (nullable = true) |-- TaxiIn: string (nullable = true) |-- TaxiOut: string (nullable = true) |-- Cancelled: integer (nullable = true) |-- CancellationCode: string (nullable = true) |-- Diverted: integer (nullable = true) |-- Car[...]Get flights with delayed arrival# SQL Style airtraffic.filter("IsArrDelayed = 'YES' AND Cancelled = 0").show() # Data Frame Style airtraffic.filter((airtraffic["IsArrDelayed"] == 'YES') & (airtraffic["Cancelled"] == 0)).show() airtraffic.filter((airtraffic.IsArrDelayed == 'YES') & (airtraffic.Cancelled == 0)).show()+----+-----+----------+---------+-------+----------+-------+----------+-------------+---------+-------+-----------------+--------------+-------+--------+--------+------+----+--------+------+-------+---------+----------------+--------+------------+------------+--------+-------------+-----------------+------------+------------+ |Year|Month|DayofMonth|DayOfWeek|DepTime|CRSDepTime|ArrTime|CRSArrTime|UniqueCarrier|FlightNum|TailNum|ActualElapsedTime|CRSElapsedTime|AirTime|ArrDelay|DepDelay|Origin|Dest|Distance|TaxiIn|TaxiOut|Cancelled|CancellationCode|Diverted|CarrierDelay|WeatherDelay|NASDelay|SecurityDelay|LateAircraftDelay|IsArrDelayed|IsDepDelayed| +----+-----+----------+---------+-------+----------+-------+----------+-------------+---------+-------+-----------------+--------------+-------+--------+--------+------+----+--------+------+-------+---------+----------------+--------+------------+------------+--------+-------------+-----------------+------------+------------+ |2008| 1| [...]Get delayed countsairtraffic. \ select('IsDepDelayed', 'IsArrDelayed', 'Cancelled'). \ distinct(). \ show() ## Departure Delayed Count airtraffic. \ filter(airtraffic.IsDepDelayed == "YES"). \ count() ## Departure Delayed Count airtraffic. \ filter((airtraffic.IsDepDelayed == "YES") & (airtraffic.Cancelled == 0)). \ count() ## Arrival Delayed Count airtraffic. \ filter(airtraffic.IsArrDelayed == "YES"). \ count() ## Arrival Delayed Count airtraffic. \ filter((airtraffic.IsArrDelayed == "YES") & (airtraffic.Cancelled == 0)). \ count() airtraffic. \ filter("(IsDepDelayed = 'YES' OR IsArrDelayed = 'YES') AND Cancelled = 0"). \ select('Year', 'Month', 'DayOfMonth', 'FlightNum', 'IsDepDelayed', 'IsArrDelayed' ). \ show() from pyspark.sql.functions import col, lit, count, sum, expr ## Both Departure Delayed and Arrival Delayed airtraffic. \ filter('Cancelled = 0'). \ agg(count(lit(1)).alias("FlightCount"), sum(expr("CASE WHEN IsDepDelayed = 'YES' THEN 1 ELSE 0 END")).alias("DepDelayedCount"), sum(expr("CASE WHEN IsArrDelayed = 'YES' THEN 1 ELSE 0 END")).alias("ArrDelayedCount") ). \ show() from pyspark.sql.functions import when ## Both Departure Delayed and Arrival Delayed airtraffic. \ filter('Cancelled = 0'). \ agg(count(lit(1)).alias("FlightCount"), sum(when(col('IsDepDelayed') == 'YES', 1).otherwise(lit(0))).alias("DepDelayedCount"), sum(when(col('IsArrDelayed') == lit('YES'), 1).otherwise(lit(0))).alias("ArrDelayedCount") ). \ show()+-----------+---------------+---------------+ |FlightCount|DepDelayedCount|ArrDelayedCount| +-----------+---------------+---------------+ | 588366| 247905| 280663| +-----------+---------------+---------------+Assignment 3: Convolutional neural networks for classificationThe goal of this assignment is to demonstrate the Keras API for implementing and training convolutional neural network architectures. Furthermore, you get to work with the PatchCAMELYON (or PCAM) dataset that you should also use for the main project work. Essentially, this assignment demonstrated a minimal working example for the main project work. PreliminariesThe full working code of the example convolutional neural network can be found in the `cnn.py` file. As before, we will go over the components of the code in this Python notebook, however, you are strongly encouraged to perform all experiments using `cnn.py`. We start with importing the required libraries and defining the size of the images in the PCAM dataset.import os import numpy as np from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense, Flatten from keras.layers import Conv2D, MaxPool2D from keras.optimizers import SGD from keras.callbacks import ModelCheckpoint, TensorBoard # unused for now, to be used for ROC analysis from sklearn.metrics import roc_curve, auc # the size of the images in the PCAM dataset IMAGE_SIZE = 96Using TensorFlow backend.Instantiating data generatorsCompared to the MNIST dataset, the PatchCAMELYON dataset is too big to fit in the working memory of most personal computers. This is why, we need to define some functions that will read the image data batch by batch, so only a single batch of images needs to be stored in memory at one time point. We can use the handy ImageDataGenerator function from the Keras API to do this. Note that the generators are defined within a function that returns them as output arguments. This function will later be called from the main code body.def get_pcam_generators(base_dir, train_batch_size=32, val_batch_size=32): # dataset parameters TRAIN_PATH = os.path.join(base_dir, 'train+val', 'train') VALID_PATH = os.path.join(base_dir, 'train+val', 'valid') RESCALING_FACTOR = 1./255 # instantiate data generators datagen = ImageDataGenerator(rescale=RESCALING_FACTOR) train_gen = datagen.flow_from_directory(TRAIN_PATH, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=train_batch_size, class_mode='binary') val_gen = datagen.flow_from_directory(VALID_PATH, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=val_batch_size, class_mode='binary', shuffle=False) return train_gen, val_genBuilding a convolutional neural network classification modelThe convolutional neural network model is also defined within a function. Organizing the code into functions instead of piling everything up in a single script makes the code more clear to read and understand, and helps reuse functionality that is already implemented. For example, we can use the `get_pcam_generators()` function to create data generators with different batch sizes just by calling the function with a different set of parameters. Or, we can use the `get_mode()` function to generate networks with different number of feature maps (see below). The convolutional neural network model consists of two convolutional layers, each one followed by a max pooling layer and a fully connected layer with 64 neurons. The kernal size and number of filters of the two convolutional layers, and the size of the max pooling regions can be passed as input parameters to the function (however, note that default values are set so the function can be called without parameters). ReLU nonlinearities are used throughout the network, except for the output neuron that is activated with a sigmoid.def get_model(kernel_size=(3,3), pool_size=(4,4), first_filters=32, second_filters=64): # build the model model = Sequential() model.add(Conv2D(first_filters, kernel_size, activation = 'relu', padding = 'same', input_shape = (IMAGE_SIZE, IMAGE_SIZE, 3))) model.add(MaxPool2D(pool_size = pool_size)) model.add(Conv2D(second_filters, kernel_size, activation = 'relu', padding = 'same')) model.add(MaxPool2D(pool_size = pool_size)) model.add(Flatten()) model.add(Dense(64, activation = 'relu')) model.add(Dense(1, activation = 'sigmoid')) # compile the model model.compile(SGD(lr=0.01, momentum=0.95), loss = 'binary_crossentropy', metrics=['accuracy']) return modelNow, the two functions that define the model and the data generators can be called from the main code body. Before executing the code block below, do not forget to change the path where the PatchCAMELYON dataset is located (that is, the location of the folder that contains `train+val` that you previously downloaded and unpacked).If everything is correct, the following output will be printed on screen after executing the code block:`Found 144000 images belonging to 2 classes.``Found 16000 images belonging to 2 classes.`# get the model model = get_model() # get the data generators train_gen, val_gen = get_pcam_generators('C:/Users/max/stack/TUE/Sync_laptop/Imaging project/.data')WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:74: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:4138: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:3976: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\s[...]If you want to know the shapes of the outputs of all layers in the network (the dimensionality of the feature maps), you can print them in the following way:for layer in model.layers: print(layer.output_shape)(None, 96, 96, 32) (None, 24, 24, 32) (None, 24, 24, 64) (None, 6, 6, 64) (None, 2304) (None, 64) (None, 1)Training and evaluating the modelFinally, the model can be trained using data generated by the data generators and then evaluated. This is done in a similar way to the previous assignment. One notable exception is that now the `fit_generator()` function is used, which works with data generators instead of a dataset that is fully stored in memory. Furthermore, in addition to the Tensorflow callback, an additional callback that saves the "best" version of the trained model to a file is added, and the model structure is saved to a json file. This enables loading the model and corresponding weights at a later time point (e.g. when we want to evaluate the model on a test set).# save the model and weights model_name = 'my_first_cnn_model' model_filepath = model_name + '.json' weights_filepath = model_name + '_weights.hdf5' model_json = model.to_json() # serialize model to JSON with open(model_filepath, 'w') as json_file: json_file.write(model_json) # define the model checkpoint and Tensorboard callbacks checkpoint = ModelCheckpoint(weights_filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') tensorboard = TensorBoard(os.path.join('logs', model_name)) callbacks_list = [checkpoint, tensorboard] # train the model train_steps = train_gen.n//train_gen.batch_size val_steps = val_gen.n//val_gen.batch_size history = model.fit_generator(train_gen, steps_per_epoch=train_steps, validation_data=val_gen, validation_steps=val_steps, epochs=3, callbacks=callbacks_list)WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:986: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:973: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:2741: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:174: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:181: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. WARNING:tensorflow:From C:\Users\max\Anaconda3\l[...]word2vecの高速化* Embeddingレイヤの導入* Negative Samplingという新しい損失関数の導入。 問題点* 入力層がone-hot表現のため語彙数が増えるとそれ相応のメモリサイズが必要になる。→Embedding層の追加* 中間層の問題(多くの計算が必要になる)→Negative sampling Embeddingレイヤ* 単語IDに該当する行を抜き出すレイヤ(**Embedding層**)* 単語の密なベクトル表現を単語の埋め込み(Embedding)や単語の分散表現と呼ばれる。 中間層の問題* 多値分類から二値分類に変換する。(これはAであるか否かみたいな感じ) シグモイド関数と交差エントロピー誤差* 多値分類の場合は出力層にはソフトマックス関数、損失関数には交差エントロピー誤差* 二値分類の出力層にはシグモイド関数、損失関数には交差エントロピー誤差$$L=-(t\log{y}+(1-t)\log{(1-y)})$$import numpy as np import os class Embedding: def __init__(self, W): self.params = [W] self.grads = [np.zeros_like(W)] self.idx = None def forward(self, idx): W, = self.params self.idx = idx out = W[idx]#特定の行を抜き出す return out # def backward(self, dout): # dW, = self.grads # dW[...] = 0 # dW[self.idx] = dout # return None #重複問題が発生するため、代入ではなく加算を行う。 def backward(self, dout): dW, = self.grads dW[...] = 0 np.add.at(dW, self.idx, dout)#Numpyの方が早い return None class EmbeddingDot: def __init__(self, W): self.embed = Embedding(W) self.params = self.embed.params self.grads = self.embed.grads self.cache = None def forward(self, h, idx): target_W = self.embed.forward(idx) out = np.sum(target_W * h, axis=1) self.cache = (h, target_W) return out def backward(self, dout): h, target_W = self.cache dout = dout.reshape(dout.shape[0], 1) dtarget_W = dout * h self.embed.backward(dtarget_W) dh = dout * target_W return dhNegative Sampling* 一部だけ取ってくる* コーパスから各単語の出現回数を求め、確率分布で表現する。(その確率分布から単語をサンプリングする)print(np.random.choice(10)) words = ["you", "say", "goodbye", "I", "hello", "."] print(np.random.choice(words)) print(np.random.choice(words, size=5)) print(np.random.choice(words, size=5, replace=False)) p = [0.5, 0.1, 0.05, 0.2, 0.05, 0.1] print(np.random.choice(words, p=p))3 you ['say' 'hello' '.' 'I' 'you'] ['goodbye' '.' 'hello' 'say' 'I'] .* 次のようにすることで少しだけ確率が上がる$$P^{'}(w_i)=\frac{P(w_i)^{0.75}}{\sum_j^{n}P(w_j)^{0.75}}$$p = [0.7, 0.29, 0.01] new_p = np.power(p, 0.75) new_p /= np.sum(new_p) print(new_p) os.chdir("/work/NaturalProcessing/deep-learning-from-scratch-2-master/") corpus = np.array([0, 1 ,2, 3, 4, 1, 2, 3]) power = 0.75 sample_size = 2 from ch04.negative_sampling_layer import UnigramSampler sampler = UnigramSampler(corpus, power, sample_size) target = np.array([1, 3, 0]) negative_sample = sampler.get_negative_sample(target) print(negative_sample) from common.layers import SigmoidWithLoss class NegativeSamplingLoss: def __init__(self, W, corpus, power=0.75, sample_size=5): self.sample_size = sample_size self.sampler = UnigramSampler(corpus, power, sample_size) self.loss_layer = [SigmoidWithLoss() for _ in range(sample_size + 1)] self.embed_dot_layers = [EmbeddingDot(W) for _ in range(sample_size + 1)] self.params, self.grads = [], [] for layer in self.embed_dot_layers: self.params += layer.params self.grads += layer.grads def forward(self, h, target): batch_size = target.shape[0] negative_sample = self.sampler.get_negative_sample(target) score = self.embed_dot_layers[0].forward(h, target) correct_label = np.ones(batch_size, dtype=np.int32) loss = self.loss_layer[0].forward(score, correct_label) negative_label = np.zeros(batch_size, dtype=np.int32) for i in range(self.sample_size): negative_target = negative_sample[:, i] score = self.embed_dot_layers[1 + i].forward(h, negative_target) loss += self.loss_layer[1 + i].forward(score, negative_label) return loss def backward(self, dout=1): dh = 0 for l0, l1 in zip(self.loss_layer, self.embed_dot_layers): dscore = l0.backward(dout) dh += l1.backward(dscore) return dh class CBOW: def __init__(self, vocab_size, hidden_size, window_size, corpus): V, H = vocab_size, hidden_size W_in = 0.01 * np.random.randn(V, H).astype("f") W_out = 0.01 * np.random.randn(V, H).astype("f") self.in_layers = [] for i in range(2 * window_size): layer = Embedding(W_in) self.in_layers.append(layer) self.ns_loss = NegativeSamplingLoss(W_out, corpus, power=0.75, sample_size=5) layers = self.in_layers + [self.ns_loss] self.params, self.grads = [], [] for layer in layers: self.params += layer.params self.grads += layer.grads self.word_vecs = W_in def forward(self, contexts, target): h = 0 for i, layer in enumerate(self.in_layers): h += layer.forward(contexts[:, i]) h *= 1 / len(self.in_layers) loss = self.ns_loss.forward(h, target) return loss def backword(self, dout=1): dout = self.ns_loss.backword(dout) dout *= 1 / len(self.in_layers) for layer in self.in_layers: layer.backword(dout) return None from common import config # GPUで実行する場合は、下記のコメントアウトを消去(要cupy) # =============================================== # config.GPU = True # =============================================== from common.np import * import pickle from common.trainer import Trainer from common.optimizer import Adam from ch04.cbow import CBOW from ch04.skip_gram import SkipGram from common.util import create_contexts_target, to_cpu, to_gpu from dataset import ptb # ハイパーパラメータの設定 window_size = 5 hidden_size = 100 batch_size = 100 max_epoch = 3 # データの読み込み corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) contexts, target = create_contexts_target(corpus, window_size) if config.GPU: contexts, target = to_gpu(contexts), to_gpu(target) # モデルなどの生成 model = CBOW(vocab_size, hidden_size, window_size, corpus) # model = SkipGram(vocab_size, hidden_size, window_size, corpus) optimizer = Adam() trainer = Trainer(model, optimizer) # 学習開始 trainer.fit(contexts, target, max_epoch, batch_size) trainer.plot() # 後ほど利用できるように、必要なデータを保存 word_vecs = model.word_vecs if config.GPU: word_vecs = to_cpu(word_vecs) params = {} params['word_vecs'] = word_vecs.astype(np.float16) params['word_to_id'] = word_to_id params['id_to_word'] = id_to_word pkl_file = 'cbow_params.pkl' # or 'skipgram_params.pkl' with open(pkl_file, 'wb') as f: pickle.dump(params, f, -1) from common.util import most_similar, analogy import pickle pkl_file = 'cbow_params.pkl' # pkl_file = 'skipgram_params.pkl' with open(pkl_file, 'rb') as f: params = pickle.load(f) word_vecs = params['word_vecs'] word_to_id = params['word_to_id'] id_to_word = params['id_to_word'] # most similar task querys = ['you', 'year', 'car', 'toyota'] for query in querys: most_similar(query, word_to_id, id_to_word, word_vecs, top=5) # analogy task print('-'*50) analogy('king', 'man', 'queen', word_to_id, id_to_word, word_vecs) analogy('take', 'took', 'go', word_to_id, id_to_word, word_vecs) analogy('car', 'cars', 'child', word_to_id, id_to_word, word_vecs) analogy('good', 'better', 'bad', word_to_id, id_to_word, word_vecs)[query] you we: 0.89990234375 i: 0.89208984375 why: 0.8662109375 else: 0.8447265625 really: 0.83447265625 [query] year month: 0.94189453125 summer: 0.86669921875 week: 0.8662109375 spring: 0.8232421875 decade: 0.6748046875 [query] car truck: 0.79541015625 supercomputer: 0.77734375 portable: 0.77392578125 cap: 0.75927734375 machine: 0.75634765625 [query] toyota weyerhaeuser: 0.8603515625 hewlett-packard: 0.84912109375 packaging: 0.84619140625 alberta: 0.841796875 occidental: 0.841796875 -------------------------------------------------- [analogy] king:man = queen:? woman: 5.2578125 veto: 4.9375 thing: 4.90234375 wife: 4.6171875 know: 4.421875 [analogy] take:took = go:? was: 3.837890625 're: 3.724609375 years: 3.564453125 a.m: 3.48046875 seems: 3.37890625 [analogy] car:cars = child:? a.m: 6.11328125 daffynition: 4.58984375 i: 4.390625 rape: 4.2109375 years: 3.9921875 [analogy] good:better = bad:? more: 4.99609375 less: 4.2421875 than: 4.07421[...]Load Datasales = pd.read_csv('../data/external/sales_train.csv.gz') shops = pd.read_csv('../data/external/shops.csv') items = pd.read_csv('../data/external/items.csv') item_cats = pd.read_csv('../data/external/item_categories.csv') sales = sales[sales['shop_id'].isin([26, 27, 28])]Get Feature Matrixdef downcast_dtypes(df): ''' Changes column types in the dataframe: `float64` type to `float32` `int64` type to `int32` ''' # Select columns to downcast float_cols = [c for c in df if df[c].dtype == "float64"] int_cols = [c for c in df if df[c].dtype == "int64"] # Downcast df[float_cols] = df[float_cols].astype(np.float32) df[int_cols] = df[int_cols].astype(np.int32) return dfCreate Grid# Create "grid" with columns index_cols = ['shop_id', 'item_id', 'date_block_num'] # For every month we create a grid from all shops/items combinations from that month grid = [] for block_num in sales['date_block_num'].unique(): cur_shops = sales.loc[sales['date_block_num'] == block_num, 'shop_id'].unique() cur_items = sales.loc[sales['date_block_num'] == block_num, 'item_id'].unique() grid.append(np.array(list(product(*[cur_shops, cur_items, [block_num]])),dtype='int32')) # Turn the grid into a dataframe grid = pd.DataFrame(np.vstack(grid), columns = index_cols,dtype=np.int32) # Groupby data to get shop-item-month aggregates gb = sales.groupby(index_cols,as_index=False).agg({'item_cnt_day': 'sum'}).rename(columns = {'item_cnt_day':'target'}) # Join it to the grid all_data = pd.merge(grid, gb, how='left', on=index_cols).fillna(0) # Same as above but with shop-month aggregates gb = sales.groupby(['shop_id', 'date_block_num'],as_index=False).agg({'item_cnt_day': 'sum'}).rename(columns = {'item_cnt_day':'target_shop'}) all_data = pd.merge(all_data, gb, how='left', on=['shop_id', 'date_block_num']).fillna(0) # Same as above but with item-month aggregates gb = sales.groupby(['item_id', 'date_block_num'],as_index=False).agg({'item_cnt_day': 'sum'}).rename(columns = {'item_cnt_day':'target_item'}) all_data = pd.merge(all_data, gb, how='left', on=['item_id', 'date_block_num']).fillna(0) # Downcast dtypes from 64 to 32 bit to save memory all_data = downcast_dtypes(all_data) del grid, gb gc.collect();Create Features# List of columns that we will use to create lags cols_to_rename = list(all_data.columns.difference(index_cols)) shift_range = [1, 2, 3, 4, 5, 12] for month_shift in shift_range: train_shift = all_data[index_cols + cols_to_rename].copy() train_shift['date_block_num'] = train_shift['date_block_num'] + month_shift foo = lambda x: '{}_lag_{}'.format(x, month_shift) if x in cols_to_rename else x train_shift = train_shift.rename(columns=foo) all_data = pd.merge(all_data, train_shift, on=index_cols, how='left').fillna(0) del train_shift # Don't use old data from year 2013 all_data = all_data[all_data['date_block_num'] >= 12] # List of all lagged features fit_cols = [col for col in all_data.columns if col[-1] in [str(item) for item in shift_range]] # We will drop these at fitting stage to_drop_cols = list(set(list(all_data.columns)) - (set(fit_cols)|set(index_cols))) + ['date_block_num'] # Category for each item item_category_mapping = items[['item_id','item_category_id']].drop_duplicates() all_data = pd.merge(all_data, item_category_mapping, how='left', on='item_id') all_data = downcast_dtypes(all_data) gc.collect();Train/Test Split# Save `date_block_num`, as we can't use them as features, but will need them to split the dataset into parts dates = all_data['date_block_num'] last_block = dates.max() print(f'Test `date_block_num` is {last_block}') dates_train = dates[dates < last_block] dates_test = dates[dates == last_block] X_train = all_data.loc[dates < last_block].drop(to_drop_cols, axis=1) X_test = all_data.loc[dates == last_block].drop(to_drop_cols, axis=1) y_train = all_data.loc[dates < last_block, 'target'].values y_test = all_data.loc[dates == last_block, 'target'].values print(dates_train.unique()) print(dates_test.unique())[12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32] [33]First Level Models Test meta-features# Run linear regression on numeric columns and # get predictions for the last month lr = LinearRegression() lr.fit(X_train.values, y_train) pred_lr = lr.predict(X_test.values) print(f'Test R-squared for linreg is {r2_score(y_test, pred_lr)}') ## Run LightGBM lgb_params = { 'feature_fraction': 0.75, 'metric': 'rmse', 'nthread':1, 'min_data_in_leaf': 2**7, 'bagging_fraction': 0.75, 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2**7, 'num_leaves': 2**7, 'bagging_freq':1, 'verbose':0, 'force_row_wise':True } model = lgb.train(lgb_params, lgb.Dataset(X_train, label=y_train), 100) pred_lgb = model.predict(X_test) print(f'Test R-squared for LightGBM is {r2_score(y_test, pred_lgb)}') ## Concatenate test predictions to get test meta-features X_test_level2 = np.c_[pred_lr, pred_lgb]Train meta-featuresdates_train_level2 = dates_train[dates_train.isin([27, 28, 29, 30, 31, 32])] # That is how we get target for the 2nd level dataset y_train_level2 = y_train[dates_train.isin([27, 28, 29, 30, 31, 32])] print(f'shape of y_train_level2: {y_train_level2.shape}') # And here we create 2nd level feeature matrix, init it with zeros first X_train_level2 = np.zeros([y_train_level2.shape[0], 2]) # Now fill `X_train_level2` with metafeatures for cur_block_num in [27, 28, 29, 30, 31, 32]: print(cur_block_num, end='') ''' 1. Split `X_train` into parts Remember, that corresponding dates are stored in `dates_train` 2. Fit linear regression 3. Fit LightGBM and put predictions 4. Store predictions from 2. and 3. in the right place of `X_train_level2`. You can use `dates_train_level2` for it Make sure the order of the meta-features is the same as in `X_test_level2` ''' X_train_block = all_data.loc[dates < cur_block_num].drop(to_drop_cols, axis=1) X_test_block = all_data.loc[dates == cur_block_num].drop(to_drop_cols, axis=1) y_train_block = all_data.loc[dates < cur_block_num, 'target'].values y_test_block = all_data.loc[dates == cur_block_num, 'target'].values print(': X_train_block.shape={}'.format(X_train_block.shape), end='') print(', X_test_block.shape={}'.format(X_test_block.shape), end='') print(', Total Size={}'.format(X_train_block.shape[0] + X_test_block.shape[0]), end='') print() lr.fit(X_train_block.values, y_train_block) X_train_level2[dates_train_level2 == cur_block_num, 0] = lr.predict(X_test_block.values) model = lgb.train(lgb_params, lgb.Dataset(X_train_block, label=y_train_block), 100) X_train_level2[dates_train_level2 == cur_block_num, 1] = model.predict(X_test_block) plt.scatter(X_train_level2[:, 0], X_train_level2[:, 1])EnsemblingWhen the meta-features are created, we can ensemble our first level models. Simple Convex MixWe need to find an optimal $\alpha$. And it is very easy, as it is feasible to do grid search. Next, find the optimal $\alpha$ out of `alphas_to_try` array.alphas_to_try = np.linspace(0, 1, 1001) r2_scores = np.array([r2_score(y_train_level2, np.dot(X_train_level2, [alpha, 1 - alpha])) for alpha in alphas_to_try]) best_alpha = alphas_to_try[r2_scores.argmax()] r2_train_simple_mix = r2_scores.max() print(f'Best alpha: {best_alpha}; Corresponding r2 score on train: {r2_train_simple_mix}')Best alpha: 0.762; Corresponding r2 score on train: 0.6271958302095351Now use the $\alpha$ you've found to compute predictions for the test settest_preds = best_alpha * pred_lr + (1 - best_alpha) * pred_lgb r2_test_simple_mix = r2_score(y_test, test_preds) print(f'Test R-squared for simple mix is {r2_test_simple_mix}')Test R-squared for simple mix is 0.7812102964363891StackingA more advanced ensembling technique. Fit a linear regression model to the meta-features. Use the same parameters as in the model above.lr.fit(X_train_level2, y_train_level2) print(f'Coefficient: {lr.coef_}') print(f'Normalized Coefficient: {lr.coef_ / lr.coef_.sum()}') train_preds = lr.predict(X_train_level2) r2_train_stacking = r2_score(y_train_level2, train_preds) test_preds = lr.predict(np.vstack((pred_lr, pred_lgb)).T) r2_test_stacking = r2_score(y_test, test_preds) print(f'Train R-squared for stacking is {r2_train_stacking}') print(f'Test R-squared for stacking is {r2_test_stacking}')Train R-squared for stacking is 0.632092200807368 Test R-squared for stacking is 0.771341806729652Pandas for panel data 1 Overview 2 Slicing and reshaping dataimport pandas as pd #set_option(para,value)函数 pd.set_option('display.max_columns', 6) # 显示6列 #格式化浮点数,显示两位小数 pd.options.display.float_format = '{:,.2f}'.format realwage = pd.read_csv('https://github.com/QuantEcon/QuantEcon.lectures.code/raw/master/pandas_panel/realwage.csv') #realwage.tail() realwage.head() realwage = realwage.pivot_table(values='value', index='Time', columns=['Country', 'Series', 'Pay period']) realwage.head() #to_datetime函数处理日期格式 realwage.index=pd.to_datetime(realwage.index) type(realwage.index) type(realwage.columns) realwage.columns.names #选取美国的数据 realwage['United States'].head() #.stack() 内层的行转换为列 .unstack() 内层的列转换成行 对行列进行变化 realwage.stack().head() #指定stack的层级 realwage.stack(level='Country').head() realwage['2015'].stack(level=(1,2)).head() # 第2和第3层转为行 #选择一年,一个特定的层级,transpose转置 realwage['2015'].stack(level=(1,2)).transpose().head() # 第2、3层转为行、转置 #用2015汇率测算的,不同国家在不同年份每小时最低工资,xs.根据标签选取行或者列 realwage_f = realwage.xs(('Hourly', 'In 2015 constant prices at 2015 USD exchange rates'), level=('Pay period', 'Series'), axis=1) realwage_f.head()3 Merging dataframes and filling NaNsworlddata = pd.read_csv('https://github.com/QuantEcon/QuantEcon.lectures.code/raw/master/pandas_panel/countries.csv', sep=';') worlddata.head() #选取国家和所在大陆 worlddata=worlddata[['Country (en)','Continent']] worlddata=worlddata.rename(columns={'Country (en)':'Country'}) worlddata.head() #merge worlddata和realwage_f这两个dataframe,pandas默认的是行合并 realwage_f.transpose().head() #merge有四种形式,left join ,right join ,outer join,inner join,默认为inner join merged = pd.merge(realwage_f.transpose(), worlddata, how='left', left_index=True, right_on='Country') merged.head() #判断是否有缺失值 .isnull() merged[merged['Continent'].isnull()] #创建一个包含这些缺失国家和对应所在大陆的dictionary missing_continents = {'Korea': 'Asia', 'Russian Federation': 'Europe', 'Slovak Republic': 'Europe'} merged['Country'].map(missing_continents) #.fillna()用来填补缺失值 merged['Continent'] = merged['Continent'].fillna(merged['Country'].map(missing_continents)) # Check for whether continents were correctly mapped merged[merged['Country'] == 'Korea'] #将美洲国家汇总起来 replace = ['Central America', 'North America', 'South America'] for country in replace: merged['Continent'].replace(to_replace=country, value='America', inplace=True) #.sort_index() merged = merged.set_index(['Continent', 'Country']).sort_index() merged.head() merged.columns #to_datetime() merged.columns = pd.to_datetime(merged.columns) merged.columns = merged.columns.rename('Time') merged.columns merged = merged.transpose() merged.head()4 Grouping and summarizing data#Groupby分组函数:mean() merged.mean().head(10) import matplotlib.pyplot as plt import matplotlib matplotlib.style.use('seaborn') #内嵌绘图 %matplotlib inline merged.mean().sort_values(ascending=False).plot(kind='bar', title="Average real minimum wage 2006 - 2016") #Set country labels country_labels = merged.mean().sort_values(ascending=False).index.get_level_values('Country').tolist() plt.xticks(range(0, len(country_labels)), country_labels) plt.xlabel('Country') plt.show() merged.mean(axis=1).head() merged.mean(axis=1).plot() plt.title('Average real minimum wage 2006 - 2016') plt.ylabel('2015 USD') plt.xlabel('Year') plt.show() merged.mean(level='Continent', axis=1).head() merged.mean(level='Continent', axis=1).plot() plt.title('Average real minimum wage') plt.ylabel('2015 USD') plt.xlabel('Year') plt.show() merged = merged.drop('Australia', level='Continent', axis=1) merged.mean(level='Continent', axis=1).plot() plt.title('Average real minimum wage') plt.ylabel('2015 USD') plt.xlabel('Year') plt.show() merged merged.stack().head() merged.stack().describe() #group分组函数,生成一个groupby对象 grouped = merged.groupby(level='Continent', axis=1) grouped #.size()返回一个含有分组大小的Series grouped.size() import seaborn as sns continents = grouped.groups.keys() #.kdeplot用于核密度估计,shade=True控制阴影 for continent in continents: sns.kdeplot(grouped.get_group(continent)['2015'].unstack(), label=continent, shade=True) plt.title('Real minimum wages in 2015') plt.xlabel('US dollars') plt.show() groupedExercisesemploy = pd.read_csv('https://github.com/QuantEcon/QuantEcon.lectures.code/raw/master/pandas_panel/employ.csv') employ = employ.pivot_table(values='Value', index=['DATE'], columns=['UNIT','AGE', 'SEX', 'INDIC_EM', 'GEO']) employ.index = pd.to_datetime(employ.index) # ensure that dates are datetime format employ.head() employ.columns.names #.unique()函数去除其中重复的元素,并按元素由小到大返回一个无重复元素的元组或列表 for name in employ.columns.names: print(name, employ.columns.get_level_values(name).unique()) #swaplevel调整索引级别,.sort_index()按行或者列排序 employ.columns = employ.columns.swaplevel(0,-1) employ = employ.sort_index(axis=1) #.tolist()矩阵转换成列表 geo_list = employ.columns.get_level_values('GEO').unique().tolist() countries = [x for x in geo_list if not x.startswith('Euro')] employ = employ[countries] employ.columns.get_level_values('GEO').unique() employ_f = employ.xs(('Percentage of total population', 'Active population'), level=('UNIT', 'INDIC_EM'), axis=1) employ_f.head() employ_f = employ_f.drop('Total', level='SEX', axis=1) box = employ_f['2015'].unstack().reset_index() #箱体图,showfliers:是否显示异常值,x="AGE"指定绘图数据 sns.boxplot(x="AGE", y=0, hue="SEX", data=box, palette=("husl"), showfliers=False) plt.xlabel('') plt.xticks(rotation=35) plt.ylabel('Percentage of population (%)') plt.title('Employment in Europe (2015)') #bbox_to_anchor:表示legend的位置,前一个表示左右,后一个表示上下 plt.legend(bbox_to_anchor=(1,0.5)) plt.show()NLP - Cleaning the dataset# Reading in the data import pandas as pd train = pd.read_csv('/Users/helenabelloff/Desktop/NLP/train.csv') train.head() # Now we can remove hyperlinks using some regex code # NOTE: we can include some hyperlinks after our initial RF run to see if accuracy improves # Ex: www.Bloomberg/blah blah, www.bbc/blah because those are probably links to REAL disaster stories # But let's remove them all for now and see what happens # ALSO: Let's remove them before tokenization def hyperlink_remove(text): hyperlink_remove = train['text'].str.replace('http\S+|http.\S+', '', case=False) return hyperlink_remove train['hyperlink_remove'] = hyperlink_remove(train['text']) # Removing punctuation import string string.punctuation def remove_punct(text): nopunct = "".join([char for char in text if char not in string.punctuation]) return nopunct train['text_no_punct'] = train['hyperlink_remove'].apply(lambda x: remove_punct(x)) # Remove digits import string train['text_no_punct'] = train['text_no_punct'].str.replace(r"\d","", regex= True) # Text to lower def text_lower(text): text = text.lower() return text train['text_no_punct_lower'] = train['text_no_punct'].apply(lambda x: text_lower(x)) # Getting rid of accents etc. import unicodedata train['text_no_punct_lower'] = train['text_no_punct_lower'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8') # Trying to standardize words from itertools import product import itertools train['text_no_punct_lower'] = train['text_no_punct_lower'].apply(lambda x:''.join(''.join(s)[:2] for _, s in itertools.groupby(x))) # Tokenizing import re def tokenize(text): tokens = re.split('\W+', text) return tokens train['tokenized_text'] = train['text_no_punct_lower'].apply(lambda x: tokenize(x.lower())) # Remove stopwords like "the", "for", "are" # These don't really do anything for us # This is a list that I looked up and then I added some stuff after I realized that things like "am" and "pm" # wouldn't add anything to the classification analysis stopword = {'ourselves', 'hers', 'between', 'yourself', 'but', 'again', 'there', 'about', 'once', 'during', 'out', 'very', 'having', 'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', 'its', 'yours', 'such', 'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', 'or', 'who', 'as', 'from', 'him', 'each', 'the', 'themselves', 'until', 'below', 'are', 'we', 'these', 'your', 'his', 'through', 'don', 'nor', 'me', 'were', 'her', 'more', 'himself', 'this', 'down', 'should', 'our', 'their', 'while', 'above', 'both', 'up', 'to', 'ours', 'had', 'she', 'all', 'no', 'when', 'at', 'any', 'before', 'them', 'same', 'and', 'been', 'have', 'in', 'will', 'on', 'does', 'yourselves', 'then', 'that', 'because', 'what', 'over', 'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you', 'herself', 'has', 'just', 'where', 'too', 'only', 'myself', 'which', 'those', 'i', 'after', 'few', 'whom', 't', 'being', 'if', 'theirs', 'my', 'against', 'a', 'by', 'doing', 'it', 'how', 'further', 'was', 'here', 'than', 'amp', 'wa', 'am', 'pm', 'im', 'leh', 'ind', 'inciweb', 'ina'} # I've opted to not remove the word "near" because near is a common word found in real disaster tweets # (as indicated by my wordclouds) def remove_stopwords(tokenized_list): text = [word for word in tokenized_list if word not in stopword] return text train['tokenized_nostopwords'] = train['tokenized_text'].apply(lambda x: remove_stopwords(x)) # Need to remove stems like "ing" and "ly" - destemming import nltk port = nltk.PorterStemmer() def stemming(tokenized_text): text = [port.stem(word) for word in tokenized_text] return text train['tokenized_destemmed'] = train['tokenized_nostopwords'].apply(lambda x: stemming(x)) # Lemmatizing basically does the same thing as stemming, but it takes an informed analysis with # the context of the word in mind # Stemming will just chop off the ending of the word with no context # Lemmatizing will analyze the root word: Ex: "Entitled", "Entitling" --> "Entitle" # I will ultimately use the lemmatizer import nltk lemmatizer = nltk.WordNetLemmatizer() def lemmatizing(tokenized_text): text = [lemmatizer.lemmatize(word) for word in tokenized_text] return text train['token_lemmatized'] = train['tokenized_nostopwords'].apply(lambda x: lemmatizing(x)) def join_text(text): text = ' '.join(text) return text train['token_lemmatized'] = train['token_lemmatized'].apply(lambda x: join_text(x))Text to csvfile = open('./../Naive_Bayes/tweets/p_n_n_tweets.lv.txt','r',encoding='utf8') Lines = file.readlines() count = 0 # Strips the newline character for line in Lines: count += 1 count df['message_lv_tilde']=pd.Series(dtype='float64') df.head() i = 0 for line in Lines: df['message_lv_tilde'][i] = line i += 1 df.head() df.iloc[174643]['message_lv_tilde'] df.to_csv('./../Naive_Bayes/tweets/df_tilde_lv.csv', index=False)df2.headway_score.mean() df.Data loader testingimport os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) os.chdir('../') print(os.getcwd()) import numpy as np import matplotlib.pyplot as plt import logging %load_ext autoreload %autoreload 2 from IPython.core.display import display, HTML display(HTML("")) def loadConfig(filename): import yaml with open(filename, 'r') as f: config = yaml.load(f) return config # load config logging.basicConfig(format='[%(asctime)s %(levelname)s] %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) %env TMPDIR=tmp/ # filename = 'configs/magicpoint_kitti_train.yaml' # filename = 'configs/superpoint_coco_train_heatmap.yaml' # filename = 'configs/superpoint_kitti_train_heatmap.yaml' # filename = 'configs/magicpoint_coco_export.yaml' # filename = 'configs/magicpoint_shapes.yaml' # filename = 'configs/magicpoint_shapes_pair.yaml' filename = 'configs/magicpoint_repeatability_heatmap.yaml' config = loadConfig(filename) print("config: ", config) ## test set from utils.loader import dataLoader_test as dataLoader task = config['data']['dataset'] data = dataLoader(config, dataset=task) test_set, test_loader = data['test_set'], data['test_loader'] print(f"{len(test_set)}") from utils.loader import dataLoader task = config['data']['dataset'] print("task: ", task) data = dataLoader(config, dataset=task, warp_input=True) # data train_loader, val_loader = data['train_loader'], data['val_loader'] logging.info('== train split size %d in %d batches, val split size %d in %d batches'%\ (len(train_loader)*config['model']['batch_size'], len(train_loader), len(val_loader)*config['model']['batch_size'], len(val_loader))) # val # for i, sample in enumerate(val_loader): # if i > 1: # print(list(sample)) # break # train print("load training samples") for i, sample in enumerate(train_loader): if i > 1: print(list(sample)) break from datasets.data_tools import np_to_tensortrain set (not loader)train_set = data['train_set'] sample = train_set[0] def unsqueeze_dict(sample, dim=0): for i in list(sample): if type(element) is torch.Tensor: sample[i] = sample[i].unsqueeze(dim) unsqueeze_dict(sample, dim=0)visualize dataimport torch # print shape entries = list(sample) for i in entries: element = sample[i] # print(type(element)) if type(element) is torch.Tensor: print("shape of ", i, " ", element.shape) img, labels_2D, mask_2D = sample['image'], sample['labels_2D'], sample['valid_mask'] sample['labels_2D_gaussian'].dtype def img_overlap(img_r, img_g, img_gray): # img_b repeat img = np.concatenate((img_gray, img_gray, img_gray), axis=0) img[0, :, :] += img_r[0, :, :] img[1, :, :] += img_g[0, :, :] img[img > 1] = 1 img[img < 0] = 0 img = img.transpose([1,2,0]) return img def toNumpy(tensor): return tensor.detach().cpu().numpy() result_overlap = img_overlap(toNumpy(1 - mask_2D[0,:,:,:]), toNumpy(labels_2D[0, :, :, :]), toNumpy(img[0, :, :, :])) print(result_overlap.shape) plt.imshow(result_overlap) plt.show()(384, 1248, 3)Test gaussian# gauss_map = sample['warped_labels_gaussian'] # gauss_map = sample['warped_labels_gaussian'] # gauss_map = sample['warped_labels'] gauss_map = sample['labels_2D_gaussian'] plt.imshow(toNumpy(gauss_map[0,0,:,:])) print("gaussian: ", toNumpy(gauss_map[0,0,:,:])) print("kernel size: ", config['data']['gaussian_label']['params']['GaussianBlur']['sigma']) plt.show() # gauss_map = sample['labels_2D'] # - sample['warped_labels_bi'] maps = ['labels_2D', 'labels_2D_gaussian', 'warped_labels', 'warped_labels_gaussian'] for m in maps: gauss_map = sample[m] plt.imshow(toNumpy(gauss_map[0,0,:,:])) plt.show() # gauss_map = sample['warped_labels_gaussian'] - sample['warped_labels'] gauss_map = sample['labels_2D_gaussian'] - sample['labels_2D'] # gauss_map = sample['warped_labels'] plt.imshow(toNumpy(gauss_map[0,0,:,:])) print("gaussian: ", toNumpy(gauss_map[0,0,:,:]).max()) # print("kernel size: ", config['data']['gaussian_label']['sigma']) plt.show() from utils.utils import inv_warp_image_batch labels_unwarp = inv_warp_image_batch(sample['warped_labels'], sample['homographies'], mode = 'nearest') # labels_unwarp = inv_warp_image_batch(sample['warped_labels_bi'], sample['homographies'], mode = 'nearest') # gauss_map = labels_unwarp - sample['labels_2D'] # gauss_map = sample['labels_2D'] - labels_unwarp gauss_map = labels_unwarp # gauss_map = sample['warped_labels'] plt.imshow(toNumpy(gauss_map[0,0,:,:])) print("gaussian: ", toNumpy(gauss_map[0,0,:,:]).sum()) # print("kernel size: ", config['data']['gaussian_label']['sigma']) plt.show() labels_ununwarp = inv_warp_image_batch(labels_unwarp, sample['inv_homographies'], mode = 'nearest') # labels_unwarp = inv_warp_image_batch(sample['warped_labels_bi'], sample['homographies'], mode = 'nearest') # gauss_map = labels_unwarp - sample['labels_2D'] # gauss_map = sample['labels_2D'] - labels_unwarp gauss_map = labels_ununwarp - sample['warped_labels'] # gauss_map = sample['warped_labels'] plt.imshow(toNumpy(gauss_map[0,0,:,:])) print("gaussian: ", toNumpy(gauss_map[0,0,:,:]).sum()) # print("kernel size: ", config['data']['gaussian_label']['sigma']) plt.show()gaussian: -1.0check gt patches predictiondevice='cuda:0' # uses labels_2D, heatmap # heatmap = gauss_map to_floatTensor = lambda x: torch.tensor(x).type(torch.FloatTensor) # for synthetic data def get_data(mode='coco'): global labels_2D, heatmap if mode == 'coco': labels_2D = to_floatTensor(sample['warped_labels']) heatmap = to_floatTensor(sample['warped_labels_gaussian']) # heatmap = to_floatTensor(sample['warped_labels_bi']) else: labels_2D = to_floatTensor(sample['labels_2D']) heatmap = to_floatTensor(sample['labels_2D_gaussian']) get_data(mode='coco') ## crop patches from labels map from utils.losses import pts_to_bbox from utils.losses import _roi_pool patch_size=5 points = labels_2D[...].nonzero() rois = pts_to_bbox(points[:,2:], patch_size=patch_size) rois = torch.cat((points[:,:1], rois), dim=1) print("heatmap: ", heatmap.dtype) print("heatmap: ", heatmap.max()) print("rois: ", rois.dtype) patches = _roi_pool(heatmap.to(device), rois.to(device), patch_size=patch_size) print("patches: ", patches.shape) ## softargmax ## norm the patches import torch.nn as nn def norm_patches(patches): patches = patches.view(-1, 1, patch_size*patch_size) d = torch.sum(patches, dim=-1).unsqueeze(-1) + 1e-6 patches = patches/d patches = patches.view(-1, 1, patch_size, patch_size) print("patches: ", patches.shape) return patches patches = norm_patches(patches) # print patches def draw_center(patch, pos): patch[...,pos,pos] = 1 return patch from utils.var_dim import toNumpy for i in range(10): img_id = i # print("res (x, y): ", points_res[i]) # plt.imshow(draw_center(toNumpy(patches[img_id,0,:,:]), 4)) p = toNumpy(patches[img_id,0,:,:]) plt.imshow(p) # print("patches[img_id,0,:,:]: ", p) print("patches[img_id,0,:,:] max: ", p.max()) print("patches[img_id,0,:,:] sum: ", p.sum()) plt.show() labels_warped_res = sample['warped_res'] print("labels_warped_res: ", labels_warped_res.shape) # labels_warped_res = sample['warped_labels'] # labels_warped_res = sample['labels_res'] def ext_from_points(labels_res, points): labels_res = labels_res.transpose(1,2).transpose(2,3).unsqueeze(1) points_res = labels_res[points[:,0],points[:,1],points[:,2],points[:,3],:] # tensor [N, 2] return points_res points_res = ext_from_points(labels_warped_res, points) print("points_res: ", points_res.shape) # points_res = label_to_points(labels_warped, points) # print("labels_warped: ", points_res.shape) ## log on patches def do_log(patches): patches[patches==0] = patches[patches==0] + 1e-6 patches_log = torch.log(patches) return patches_log # patches_log = do_log(patches) patches_log = do_log(patches) # patches_log = patches from utils.losses import soft_argmax_2d ## point residual print("points_res: ", points_res.shape) print("points_res: ", points_res[:10]) # print("points_res sum: ", points_res.sum()) ## no norm dxdy = soft_argmax_2d(patches_log, normalized_coordinates=False) # tensor [B, N, patch, patch] dxdy = dxdy.squeeze(1) # tensor [N, 2] dxdy = dxdy-patch_size//2 print("dxdy: ", dxdy.shape) print("dxdy: ", dxdy[:10]) num = 500 print("errors: ", abs(dxdy[:num].cpu() - points_res[:num].cpu()).mean(dim=0)) print("var: ", abs(dxdy[:num].cpu() - points_res[:num].cpu()).std(dim=0)) # dxdy = soft_argmax_2d(patches_log, normalized_coordinates=True) # tensor [B, N, patch, patch] dxdy = dxdy.squeeze(1) # tensor [N, 2] print("dxdy: ", dxdy.shape) print("dxdy: ", dxdy[:10]) print("errors: ", abs(dxdy[:num].cpu() - points_res[:num].cpu()).mean(dim=0)) print("var: ", abs(dxdy[:num].cpu() - points_res[:num].cpu()).std(dim=0)) patch_test = torch.zeros((1,1,5,5)) patch_test[0,0,2,2] = 1 patch_test[0,0,2,3] = 1 patches_log = do_log(patch_test) dxdy = soft_argmax_2d(patches_log, normalized_coordinates=False) print("dxdy: ", dxdy) plt.imshow(patch_test[0,0].numpy()) def test_soft_argmax(input): batch_size, channels, height, width = input.shape # x: torch.Tensor = input.view(batch_size, channels, -1) # compute softmax with max substraction trick # exp_x = torch.exp(x - torch.max(x, dim=-1, keepdim=True)[0]) exp_x = input eps = 1e-6 exp_x_sum = torch.tensor( 1.0) / (exp_x.sum(dim=-1, keepdim=True) + eps) # create coordinates grid pos_y, pos_x = create_meshgrid(input, self.normalized_coordinates) pos_x = pos_x.reshape(-1) pos_y = pos_y.reshape(-1) # compute the expected coordinates expected_y: torch.Tensor = torch.sum( (pos_y * exp_x) * exp_x_sum, dim=-1, keepdim=True) expected_x: torch.Tensor = torch.sum( (pos_x * exp_x) * exp_x_sum, dim=-1, keepdim=True) output: torch.Tensor = torch.cat([expected_x, expected_y], dim=-1) return output.view(batch_size, channels, 2) # BxNx2 out_argmax = test_soft_argmax(patches)test labels 3Dfrom utils.d2s import DepthToSpace, SpaceToDepth labels_2D = sample['labels_2D_gaussian'] space2depth = SpaceToDepth(8) labels_3D = space2depth(labels_2D) print("labels_3D: ", labels_3D.shape) labels_sum = labels_3D[2].sum(dim=0) print("labels_3D sum: ", labels_sum.shape) print("labels_3D sum: ", labels_sum)labels_3D: torch.Size([16, 64, 15, 20]) labels_3D sum: torch.Size([15, 20]) labels_3D sum: tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.8863, 0.0824, 0.0000, 0.0000, 0.9843, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.9804, 0.0000, 0.0000, 0.0157, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.8824, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.7882, 0.0275, 0.0000, 0.0000, 0.9961, 0.0000, 0.0000, 0.0980, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.9882, 0.0000, 0.0000, 0.1608, 0.0039, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, [...]test on train_set test residual map# syn # labels_warped_res = sample['labels_res'] # warped_labels = sample['labels_2D'] # warped_img = sample['image'] # coco labels_warped_res = sample['warped_res'] warped_labels = sample['warped_labels'] warped_img = sample['warped_img'] img_id = 0 print("labels_warped_res: ", labels_warped_res.sum()) print("labels_warped_res: ", labels_warped_res.shape) res_img = torch.norm(labels_warped_res, dim=1) plt.imshow(toNumpy(res_img[img_id,:,:])) plt.show() # warped_labels plt.imshow(toNumpy(warped_labels[img_id,0,:,:])) plt.show() # warped image plt.imshow(toNumpy(warped_img[img_id,0,:,:])) plt.show() def subpixel_loss(labels_2D, labels_res, pred_heatmap, batch_size, patch_size=7): """ """ # def pts_to_bbox(points, patch_size): """ input: points: (y, x) output: bbox: (x1, y1, x2, y2) """ shift_l = patch_size / 2 shift_r = patch_size - shift_l # bbox = torch.stack((points-shift_l, points+shift_r), dim=1) pts_l = points-shift_l pts_r = points+shift_r+1 bbox = torch.stack((pts_l[:,1], pts_l[:,0], pts_r[:,1], pts_r[:,0]), dim=1) return bbox pass # roi pooling def _roi_pool(pred_heatmap, rois): from utils.roi_pool import RoIPool # noqa: E402 m = RoIPool(patch_size, 1.0) patches = m(pred_heatmap, rois.float()) return patches # soft argmax def _soft_argmax(patches): from models.SubpixelNet import SubpixelNet as subpixNet dxdy = subpixNet.soft_argmax_2d(patches) # tensor [B, N, patch, patch] dxdy = dxdy.squeeze(1) # tensor [N, 2] return dxdy def print_var(points): print("points: ", points.shape) print("points: ", points) pass points = labels_2D[...].nonzero() labels_res = labels_res.transpose(1,2).transpose(2,3).unsqueeze(1) rois = pts_to_bbox(points[:,2:], patch_size) # filter out?? rois = torch.cat((points[:,:1], rois), dim=1) points_res = labels_res[points[:,0],points[:,1],points[:,2],points[:,3],:] # tensor [N, 2] # print_var(rois) # print_var(labels_res) # print_var(points) # print("points max: ", points.max(dim=0)) # print_var(labels_2D) # print_var(points_res) patches = _roi_pool(pred_heatmap, rois) # get argsoft max dxdy = _soft_argmax(patches) loss = (points_res - dxdy) # loss = torch.norm(loss, p=2, dim=-1) # loss = loss.sum() # print("loss: ", loss) return patches, points_res, dxdy, loss pass patch_size = 8 device='cuda:0' outs = subpixel_loss(warped_labels.to(device), labels_warped_res.to(device), warped_img.to(device), 8, patch_size=patch_size) patches, points_res, dxdy = outs[0], outs[1], outs[2] for i in range(10): img_id = i def draw_center(patch): patch[...,3,3] = 1 return patch print("softmax dxdy (y, x): ", dxdy[i]) print("points_res (y, x): ", points_res[i]) # plt.imshow(toNumpy(patches[img_id,0,:,:])) plt.imshow(toNumpy(draw_center(patches[img_id,0,:,:]))) plt.show() outs = subpixel_loss(warped_labels.to(device), labels_warped_res.to(device), res_img.unsqueeze(1).to(device), 8, patch_size=patch_size) patches, points_res = outs[0], outs[1] for i in range(10): img_id = i plt.imshow(toNumpy(patches[img_id,0,:,:])) plt.show() from utils.losses import subpixel_loss as loss_f loss = loss_f(warped_labels.to(device), labels_warped_res.to(device), warped_labels.to(device), patch_size=11) print("loss: ", loss) from utils.losses import subpixel_loss_no_argmax as loss_f loss = loss_f(warped_labels.to(device), labels_warped_res.to(device), warped_labels.to(device), patch_size=11) print("loss: ", loss) def get_model(path, name): mod = __import__('{}'.format(path), fromlist=['']) return getattr(mod, name) get_model('utils.losses', 'subpixel_loss_no_argmax') from models.SubpixelNet import SubpixelNet net = SubpixelNet(subpixel_channel=1)Test predicting flow on pixelsprint("labels_2D: ", labels_2D.shape) print("labels_2D sum: ", labels_2D.sum()) print("warped_img: ", warped_img.shape) img_warp = warped_img # extract the patches from labels label_idx = labels_2D[...].nonzero() from utils.losses import extract_patches patches = extract_patches(label_idx.to(device), img_warp.to(device), patch_size=32) # tensor [N, patch_size, patch_size] # patches = extract_patches(label_idx.to(device), labels_2D.to(device), patch_size=15) # tensor [N, patch_size, patch_size] print("patches: ", patches.shape) def label_to_points(labels_res, points): labels_res = labels_res.transpose(1,2).transpose(2,3).unsqueeze(1) points_res = labels_res[points[:,0],points[:,1],points[:,2],points[:,3],:] # tensor [N, 2] return points_res points_res = label_to_points(labels_warped_res, label_idx) print("points_res: ", points_res.shape) for i in range(10): img_id = i plt.imshow(toNumpy(patches[img_id,0,:,:])) plt.show()Testingfrom utils.loader import get_module train_model_frontend = get_module('', 'Train_model_subpixel') '{}.{}'.format('path', 'name') import importlib name = 'Train_model_subpixel' mod = importlib.import_module(name) getattr(mod, name)Visualize warped images (for joint training)# entries = ['image', 'labels_2D', 'valid_mask', 'overlay'] # entries = ['warped_img', 'warped_labels', 'warped_valid_mask', 'overlay'] entries = ['image', 'warped_img'] cols = len(entries) scale = 5 # show images images_num = config['model']['batch_size'] col_row_ratio = 2 iter_max = 2 rows = images_num + 1 task_folder = ['matching'] print("load training samples") for it, sample in enumerate(train_loader): # break if max iteration print("iter: ", it) if it > iter_max: print(list(sample)) break plt.figure(figsize=(cols*col_row_ratio*scale, rows*scale)) count = 1 for i in range(images_num): for j in range(cols): # exp_path = Path(base_path, folder[j], prediction, task_folder[0]) # path = exp_path / (str(i) + 'm.png') # image = load_as_float(path) # print(entries[j]) if entries[j] == 'overlay': img, labels_2D, mask_2D = sample[entries[0]], sample[entries[1]], sample[entries[2]] image = img_overlap(toNumpy(1 - mask_2D[i,:,:,:]), toNumpy(labels_2D[i, :, :, :]), toNumpy(img[i, :, :, :])) else: image = sample[entries[j]][i,0,:,:] image = image.numpy() # print("image: ", image.shape) # print("count: ", count) # print("rows: ", rows) # print("cols: ", cols) plt.subplot(rows, cols, count) count += 1 plt.axis('off') plt.title(entries[j] + '/' + str(i)) # print("image: ", image) plt.imshow(image, cmap='gray') plt.tight_layout() plt.show() print("load validation samples") for it, sample in enumerate(val_loader): # break if max iteration print("iter: ", it) if it > iter_max: print(list(sample)) break plt.figure(figsize=(cols*col_row_ratio*scale, rows*scale)) count = 1 for i in range(images_num): for j in range(cols): # exp_path = Path(base_path, folder[j], prediction, task_folder[0]) # path = exp_path / (str(i) + 'm.png') # image = load_as_float(path) # print(entries[j]) if entries[j] == 'overlay': img, labels_2D, mask_2D = sample[entries[0]], sample[entries[1]], sample[entries[2]] image = img_overlap(toNumpy(1 - mask_2D[i,:,:,:]), toNumpy(labels_2D[i, :, :, :]), toNumpy(img[i, :, :, :])) else: image = sample[entries[j]][i,0,:,:] image = image.numpy() # print("image: ", image.shape) # print("count: ", count) # print("rows: ", rows) # print("cols: ", cols) plt.subplot(rows, cols, count) count += 1 plt.axis('off') plt.title(entries[j] + '/' + str(i)) # print("image: ", image) plt.imshow(image, cmap='gray') plt.tight_layout() plt.show()Visualize images w/o homography augmentation# entries = ['image', 'labels_2D', 'valid_mask', 'overlay'] entries = ['warped_img', 'warped_labels', 'warped_valid_mask', 'overlay'] cols = len(entries) scale = 10 # show images count = 1 images_num = 2 col_row_ratio = 3 rows = images_num plt.figure(figsize=(cols*col_row_ratio*scale, rows*scale)) task_folder = ['matching'] for i in range(images_num): for j in range(cols): # exp_path = Path(base_path, folder[j], prediction, task_folder[0]) # path = exp_path / (str(i) + 'm.png') # image = load_as_float(path) print(entries[j]) if entries[j] == 'overlay': img, labels_2D, mask_2D = sample[entries[0]], sample[entries[1]], sample[entries[2]] image = img_overlap(toNumpy(1 - mask_2D[i,:,:,:]), toNumpy(labels_2D[i, :, :, :]), toNumpy(img[i, :, :, :])) else: image = sample[entries[j]][i,0,:,:] image = image.numpy() plt.subplot(rows, cols, count) count += 1 plt.axis('off') plt.title(entries[j] + '/' + str(i)) plt.imshow(image, cmap='gray') plt.tight_layout() plt.show() mask_2D.shapeHomography Adaptation# data loading from utils.loader import dataLoader_test as dataLoader # data = dataLoader(config, dataset='hpatches') export_task = config['data']['export_folder'] data = dataLoader(config, dataset=task, export_task=export_task) test_set, test_loader = data['test_set'], data['test_loader'] # @torch.no_grad() from tqdm import tqdm from utils.utils import img_overlap, toNumpy device = 'cpu' count = 1 images_num = 2 col_row_ratio = 1 rows = 10 cols = 10 scale = 5 iter_max = 0 plt.figure(figsize=(cols*col_row_ratio*scale, rows*scale)) plot_inputs = True for i, sample in tqdm(enumerate(test_loader)): img, mask_2D = sample['image'], sample['valid_mask'] img = img.transpose(0,1) img_2D = img.numpy().squeeze() mask_2D = mask_2D.transpose(0,1) mask_2D_numpy = mask_2D.numpy() inv_homographies, homographies = sample['homographies'], sample['inv_homographies'] img, mask_2D, homographies, inv_homographies = img.to(device), mask_2D.to(device), \ homographies.to(device), inv_homographies.to(device) def plotImg(rows, cols, count, image, name=''): plt.subplot(rows, cols, count) count += 1 plt.axis('off') plt.title(name + '/' + str(i)) print("image: ", image.shape) plt.imshow(image) plt.tight_layout() print("img_2D: ", img_2D.shape) print("mask_2D: ", mask_2D.shape) if plot_inputs: for j in range(img.shape[0]): image = img_overlap((1 - mask_2D_numpy[j,:,:]), np.zeros_like(img_2D[j,:,:]), img_2D[j,:,:]) plotImg(rows, cols, count, image.transpose([1,2,0]),'warped-') count += 1 def homography_adaptation(heatmap, inv_homographies, mask_2D, config): from utils.utils import inv_warp_image_batch ## multiply heatmap with mask_2D heatmap = heatmap*mask_2D heatmap = inv_warp_image_batch(heatmap, inv_homographies[0,:,:,:], device=device, mode='bilinear') ##### check mask_2D = inv_warp_image_batch(mask_2D, inv_homographies[0,:,:,:], device=device, mode='bilinear') heatmap = torch.sum(heatmap, dim=0) mask_2D = torch.sum(mask_2D, dim=0) return heatmap/mask_2D pass image = homography_adaptation(img, inv_homographies, mask_2D, config) print("image: ", image.shape) # plotImg(rows, cols, count, toNumpy(image), name='') if i>iter_max: break # plotImg(rows, cols, count, toNumpy(image).squeeze(), name='') image_np = toNumpy(image).squeeze() plt.imshow(image_np) image image m = mask_2D_numpy[j,:,:] image = img_overlap((1 - mask_2D_numpy[j,:,:]), np.zeros_like(img_2D[j,:,:]), img_2D[j,:,:]) print("m: ", m.shape) print("image: ", image.shape) plt.imshow(image.transpose([1,2,0])) plt.show() # show homography adaptation images # show images images_num = config['data']['homography_adaptation']['num'] col_row_ratio = 2 iter_max = 20 rows = images_num + 1 task_folder = ['matching'] print("load training samples") for it, sample in enumerate(train_loader): # break if max iteration print("iter: ", it) if it > iter_max: print(list(sample)) break plt.figure(figsize=(cols*col_row_ratio*scale, rows*scale)) count = 1 for i in range(images_num): for j in range(cols): # exp_path = Path(base_path, folder[j], prediction, task_folder[0]) # path = exp_path / (str(i) + 'm.png') # image = load_as_float(path) # print(entries[j]) if entries[j] == 'overlay': img, labels_2D, mask_2D = sample[entries[0]], sample[entries[1]], sample[entries[2]] image = img_overlap(toNumpy(1 - mask_2D[i,:,:,:]), toNumpy(labels_2D[i, :, :, :]), toNumpy(img[i, :, :, :])) else: image = sample[entries[j]][i,0,:,:] image = image.numpy() # print("image: ", image.shape) # print("count: ", count) # print("rows: ", rows) # print("cols: ", cols) plt.subplot(rows, cols, count) count += 1 plt.axis('off') plt.title(entries[j] + '/' + str(i)) # print("image: ", image) plt.imshow(image, cmap='gray') plt.tight_layout() plt.show() try: path = config['pretrained'] print('==> Loading pre-trained network.') print('path: ', path) # This class runs the SuperPoint network and processes its outputs. fe = SuperPointFrontend_torch(weights_path=path, nms_dist=nms_dist, conf_thresh=conf_thresh, nn_thresh=nn_thresh, cuda=False, device=device) print('==> Successfully loaded pre-trained network.') fe.net_parallel() print(path) # save to files save_file = save_output / "export.txt" with open(save_file, "a") as myfile: myfile.write("load model: " + path + '\n') except Exception: passCompound Video Player Widget Most everything in this notebook is a work in progress.import os import IPython import ipywidgets # import nutmeg from jpy_video import Video, TimeCode, compound # Display cells full width txt = """ """ IPython.display.display(IPython.display.HTML(data=txt))Setupf = '/home/pierre/Projects/GoProHelper/notebooks/data/GOPR8802.intra.mp4' os.path.isfile(f) fps = 59.9 wid = compound.VideoPlayer(f, 1/fps) wid.display() wid.wid_video. wid.wid_timecode.layout.top wid.wid_info wid.wid_timecode._model_module_versionComponents# HTML5 video widget wid_video = Video(f) wid_video.set_property('controls', False) # Timecode wid_timecode = TimeCode() # Slider wid_slider = ipywidgets.FloatSlider(step=1/fps, continuous_update=True, readout=False) wid_slider.layout.width='500pt' # wid_button = ipywidgets.Button(icon='play') # http://fontawesome.io/icon/pause/ # self.wid_slider = ipywidgets.FloatSlider(min=0, max=60, step=timebase, # continuous_update=True, orientation='horizontal', # readout=False, # slider_color='blue') # self.wid_slider.layout.width = '50%'Assemblewid_controls = ipywidgets.HBox(children=[wid_timecode, wid_slider]) wid_outer = ipywidgets.VBox(children=[wid_video, wid_controls]) # Link widgets at front end ipywidgets.jslink((wid_video, 'current_time'), (wid_slider, 'value')) ipywidgets.jsdlink((wid_video, 'current_time'), (wid_timecode, 'timecode'))Event Handlers# def handle_any(wid, **event): # """Respond to any event type # """ # update_timecode(wid_time, wid_video.properties.currentTime) def handle_displayed(wid, **event): """Do stuff that can only be done after widget is displayed """ wid.set_property('controls', False) def handle_loaded_metadata(wid, **event): """Function to be called when sufficient video metadata has been loaded at the frontend """ pass # print(wid.properties) def handle_duration_change(wid, **event): """Update anything that depends on video duration """ wid_slider.max = wid.properties.duration wid_video.on_displayed(handle_displayed) # wid_video.on_event(handle_any) wid_video.on_event(handle_loaded_metadata, 'loadedmetadata') wid_video.on_event(handle_duration_change, 'loadedmetadata') wid_video.on_event(handle_duration_change, 'durationchange')Trusted Notebook" width="500 px" align="left"> Beginner's GuideThis is meant as a companion for the SDK to the different composer examples in the [Beginner's Guide](https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=introduction). All examples run with the local default simulation backend. You can import your Qconfig and configure the API if you would like to run on a real device. All images are from the Beginner's Guide in the referenced sections. This does not include the detailed text of the Beginner's Guide. It is designed to show how the composer examples can be executed with the SDK.*** Contributorsimport os import sys from IPython.display import Image from qiskit import QuantumProgram from qiskit.tools.visualization import plot_histogramHelper functionsThese lines of code are used throughout the tutorial as helper functions. These are used to display local images for the composer references, and execute and plot circuits in the quantum program.def show_image(img_name): """Display an image in the notebook. This is set to the default path of the root of the tutorials repo images directory in the sub-folder 'intro_img'. Args: img_name (str): file name to display """ return Image(filename=os.path.join("..", "images", "intro_img", img_name)) def execute_and_plot(qp, circuits): """Execute and plot the histograms with default settings. Args: qp: QuantumProgram containing the circuits circuits (list): list of circuits to execute """ results = qp.execute(circuits) for circuit in circuits: print(circuit) plot_histogram(results.get_counts(circuit))Quantum Program SetupWe're starting with a single qubit and classical register.qp = QuantumProgram() n = 1 # number of qubits q = qp.create_quantum_register("q", n) c = qp.create_classical_register("c", n)Single-Qubit GatesComposer examples from the [Beginner's Guide](https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=005-Single-Qubit_Gates~2F001-Single-Qubit_Gates). X-gateshow_image("single_q_x_gate.png") single_x = qp.create_circuit("single_x", [q], [c]) single_x.x(q[0]) single_x.measure(q[0], c[0]) execute_and_plot(qp, ["single_x"])single_xCreating superpositionComposer examples from the [creating superposition](https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=005-Single-Qubit_Gates~2F002-Creating_superposition) section. H-gateshow_image("single_q_h_gate_0.png") single_h = qp.create_circuit("single_h", [q], [c]) single_h.h(q[0]) single_h.measure(q[0], c[0]) execute_and_plot(qp, ["single_h"]) show_image("single_q_h_gate_1.png") single_xh = qp.create_circuit("single_xh", [q], [c]) single_xh.x(q[0]) single_xh.h(q[0]) single_xh.measure(q[0], c[0]) execute_and_plot(qp, ["single_xh"]) show_image("single_q_h_gate_2.png") # This is the |+> state single_hh = qp.create_circuit("single_hh", [q], [c]) single_hh.h(q[0]) single_hh.h(q[0]) single_hh.measure(q[0], c[0]) execute_and_plot(qp, ["single_hh"]) show_image("single_q_h_gate_3.png") # This is the |-> state single_xhh = qp.create_circuit("single_xhh", [q], [c]) single_xhh.x(q[0]) single_xhh.h(q[0]) single_xhh.h(q[0]) single_xhh.measure(q[0],c[0]) execute_and_plot(qp, ["single_xhh"])single_xhhIntroducing qubit phaseThese are the composer examples from [introducing qubit phase](https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=005-Single-Qubit_Gates~2F005-Introducing_qubit_phase). Z-gateshow_image("single_q_z_gate_0.png") single_z = qp.create_circuit("single_z", [q], [c]) single_z.z(q[0]) single_z.measure(q[0],c[0]) execute_and_plot(qp, ["single_z"]) show_image("single_q_z_gate_1.png") single_zh = qp.create_circuit("single_zh", [q], [c]) single_zh.h(q[0]) single_zh.z(q[0]) single_zh.measure(q[0],c[0]) execute_and_plot(qp, ["single_zh"]) show_image("single_q_z_gate_tbl.png") # 0 rotation around z, Pr(0) = 1.0 phs_0 = qp.create_circuit("phs_0", [q], [c]) phs_0.h(q[0]) phs_0.h(q[0]) # for x-basis measurement phs_0.measure(q[0], c[0]) # pi/4 rotation around z, Pr(0) = 0.85 phs_pi4 = qp.create_circuit("phs_pi4", [q], [c]) phs_pi4.h(q[0]) phs_pi4.t(q[0]) phs_pi4.h(q[0]) # for x-basis measurement phs_pi4.measure(q[0], c[0]) # pi/2 rotation around z, Pr(0) = 0.5 phs_pi2 = qp.create_circuit("phs_pi2", [q], [c]) phs_pi2.h(q[0]) phs_pi2.s(q[0]) # Alternate example gate: #phs_pi2.sdg(q[0]) #rotation -pi/2 using sdg instead of s phs_pi2.h(q[0]) # for x-basis measurement phs_pi2.measure(q[0], c[0]) # 3pi/4 rotation around z, Pr(0) = 0.15 phs_3pi4 = qp.create_circuit("phs_3pi4", [q], [c]) phs_3pi4.h(q[0]) phs_3pi4.s(q[0]) phs_3pi4.t(q[0]) phs_3pi4.h(q[0]) # for x-basis measurement phs_3pi4.measure(q[0], c[0]) # pi rotation around z, Pr(0) = 0 phs_pi = qp.create_circuit("phs_pi", [q], [c]) phs_pi.h(q[0]) phs_pi.z(q[0]) phs_pi.h(q[0]) # for measurement phs_pi.measure(q[0], c[0]) execute_and_plot(qp, ["phs_0", "phs_pi4", "phs_pi2", "phs_3pi4", "phs_pi"])phs_0Multi qubit gatesComposer examples from [multi qubit gates](https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=006-Multi-Qubit_Gates~2F001-Multi-Qubit_Gates) section. We set up new registers that have two qubits and classical components using the same quantum program.n = 2 # number of qubits q2 = qp.create_quantum_register("q2", n) c2 = qp.create_classical_register("c2", n) show_image("multi_q_0.png") # |00> -- |00> cnot_00 = qp.create_circuit("cnot_00", [q2], [c2]) cnot_00.cx(q2[0], q2[1]) cnot_00.measure(q2[0], c2[0]) cnot_00.measure(q2[1], c2[1]) execute_and_plot(qp, ["cnot_00"]) show_image("multi_q_1.png") # |01> -- |11> cnot_01 = qp.create_circuit("cnot_01", [q2], [c2]) cnot_01.x(q2[0]) cnot_01.cx(q2[0], q2[1]) cnot_01.measure(q2[0], c2[0]) cnot_01.measure(q2[1], c2[1]) execute_and_plot(qp, ["cnot_01"]) show_image("multi_q_2.png") # |10> -- |10> cnot_10 = qp.create_circuit("cnot_10", [q2], [c2]) cnot_10.x(q2[1]) cnot_10.cx(q2[0], q2[1]) cnot_10.measure(q2[0], c2[0]) cnot_10.measure(q2[1], c2[1]) execute_and_plot(qp, ["cnot_10"]) show_image("multi_q_3.png") # |11> -- |01> cnot_11 = qp.create_circuit("cnot_11", [q2], [c2]) cnot_11.x(q2[0]) cnot_11.x(q2[1]) cnot_11.cx(q2[0], q2[1]) cnot_11.measure(q2[0], c2[0]) cnot_11.measure(q2[1], c2[1]) execute_and_plot(qp, ["cnot_11"])cnot_11EntanglementThese are the [entanglemnt section](https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=007-Entanglement~2F001-Entanglement) composer examples.show_image("ent_q_0.png") e0 = qp.create_circuit("e0", [q2], [c2]) e0.h(q2[0]) # apply H-gate for superposition to q0 e0.cx(q2[0], q2[1]) # apply CNOT control from q0 to q1 e0.measure(q2[0], c2[0]) e0.measure(q2[1], c2[1]) execute_and_plot(qp, ["e0"]) show_image("ent_q_1.png") e1 = qp.create_circuit("e1", [q2], [c2]) e1.h(q2[0]) # apply H-gate on q0 for superposition e1.x(q2[1]) # apply x-gate on q1 e1.cx(q2[0], q2[1]) # apply CNOT control from q0 to q1 e1.measure(q2[0], c2[0]) e1.measure(q2[1], c2[1]) execute_and_plot(qp, ["e1"])e1Task 1: 1.Write a function to compute 5/0 and use try/except to catch the exceptions.def trycatchfunc(res): try: res = 5/0 except: print("\nZero divide error\n") finally: print("\nIn mathematics, no number can be divided by zero\n") return res res = 0 trycatchfunc(res)Zero divide error In mathematics, no number can be divided by zero2.Implement a Python program to generate all sentences where subject is in ["Americans","Indians"] and verb is in ["Play", "watch"] and the object is in ["Baseball","cricket"].Hint: Subject,Verb and Object should be declared in the program as shown below.subjects=["Americans ","Indians"]verbs=["play","watch"]objects=["Baseball","Cricket"]Output should come as below:Americans play Baseball.Americans play Cricket.Americans watch Baseball.Americans watch Cricket.Indians play Baseball.Indians play Cricket.Indians watch Baseball.Indians watch Cricket.subjects=["Americans ","Indians "] verbs=["play ","watch "] objects=["Baseball","Cricket"] for i in subjects: for j in verbs: for k in objects: print(i,j,k)Americans play Baseball Americans play Cricket Americans watch Baseball Americans watch Cricket Indians play Baseball Indians play Cricket Indians watch Baseball Indians watch CricketTask 2: 1.Write a function so that the columns of the output matrix are powers of the input vector.The order of the powers is determined by the increasing boolean argument. Specifically, whenincreasing is False, the i-th output column is the input vector raised element-wise to the powerof N - i - 1. [ vandermonde matrix ]import numpy as np[column_stack] Stack 1-D arrays as columns into a 2-D array.n = int(input("Enter n: ")) li = [] for i in range(n): temp = int(input()) li.append(temp) arr = np.array(li) arr type(resarr) np.column_stack([arr**(n-i-1) for i in range(n)])Migrating from Spark to BigQuery via Dataproc -- Part 1* [Part 1](01_spark.ipynb): The original Spark code, now running on Dataproc (lift-and-shift).* [Part 2](02_gcs.ipynb): Replace HDFS by Google Cloud Storage. This enables job-specific-clusters. (cloud-native)* [Part 3](03_automate.ipynb): Automate everything, so that we can run in a job-specific cluster. (cloud-optimized)* [Part 4](04_bigquery.ipynb): Load CSV into BigQuery, use BigQuery. (modernize)* [Part 5](05_functions.ipynb): Using Cloud Functions, launch analysis every time there is a new file in the bucket. (serverless) Copy data to HDFSThe Spark code in this notebook is based loosely on the [code](https://github.com/dipanjanS/data_science_for_all/blob/master/tds_spark_sql_intro/Working%20with%20SQL%20at%20Scale%20-%20Spark%20SQL%20Tutorial.ipynb) accompanying [this post](https://opensource.com/article/19/3/apache-spark-and-dataframes-tutorial) by . I am using it to illustrate migrating a Spark analytics workload to BigQuery via Dataproc.The data itself comes from the 1999 KDD competition. Let's grab 10% of the data to use as an illustration.!wget http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz !hadoop fs -put kddcup* / !hadoop fs -ls /Found 4 items drwx------ - mapred hadoop 0 2019-07-02 22:05 /hadoop -rw-r--r-- 2 root hadoop 2144903 2019-07-02 22:16 /kddcup.data_10_percent.gz drwxrwxrwt - hdfs hadoop 0 2019-07-02 22:05 /tmp drwxrwxrwt - hdfs hadoop 0 2019-07-02 22:05 /userReading in dataThe data are CSV files. In Spark, these can be read using textFile and splitting rows on commas.from pyspark.sql import SparkSession, SQLContext, Row spark = SparkSession.builder.appName("kdd").getOrCreate() sc = spark.sparkContext data_file = "hdfs:///kddcup.data_10_percent.gz" raw_rdd = sc.textFile(data_file).cache() raw_rdd.take(5) csv_rdd = raw_rdd.map(lambda row: row.split(",")) parsed_rdd = csv_rdd.map(lambda r: Row( duration=int(r[0]), protocol_type=r[1], service=r[2], flag=r[3], src_bytes=int(r[4]), dst_bytes=int(r[5]), wrong_fragment=int(r[7]), urgent=int(r[8]), hot=int(r[9]), num_failed_logins=int(r[10]), num_compromised=int(r[12]), su_attempted=r[14], num_root=int(r[15]), num_file_creations=int(r[16]), label=r[-1] ) ) parsed_rdd.take(5)Spark analysisOne way to analyze data in Spark is to call methods on a dataframe.sqlContext = SQLContext(sc) df = sqlContext.createDataFrame(parsed_rdd) connections_by_protocol = df.groupBy('protocol_type').count().orderBy('count', ascending=False) connections_by_protocol.show()+-------------+------+ |protocol_type| count| +-------------+------+ | icmp|283602| | tcp|190065| | udp| 20354| +-------------+------+Another way is to use Spark SQLdf.registerTempTable("connections") attack_stats = sqlContext.sql(""" SELECT protocol_type, CASE label WHEN 'normal.' THEN 'no attack' ELSE 'attack' END AS state, COUNT(*) as total_freq, ROUND(AVG(src_bytes), 2) as mean_src_bytes, ROUND(AVG(dst_bytes), 2) as mean_dst_bytes, ROUND(AVG(duration), 2) as mean_duration, SUM(num_failed_logins) as total_failed_logins, SUM(num_compromised) as total_compromised, SUM(num_file_creations) as total_file_creations, SUM(su_attempted) as total_root_attempts, SUM(num_root) as total_root_acceses FROM connections GROUP BY protocol_type, state ORDER BY 3 DESC """) attack_stats.show() %matplotlib inline ax = attack_stats.toPandas().plot.bar(x='protocol_type', subplots=True, figsize=(10,25))Matplotlib Exercise 1 Imports%matplotlib inline import matplotlib.pyplot as plt import numpy as npLine plot of sunspot data Download the `.txt` data for the "Yearly mean total sunspot number [1700 - now]" from the [SILSO](http://www.sidc.be/silso/datafiles) website. Upload the file to the same directory as this notebook.import os assert os.path.isfile('yearssn.dat')Use `np.loadtxt` to read the data into a NumPy array called `data`. Then create two new 1d NumPy arrays named `years` and `ssc` that have the sequence of year and sunspot counts.data=np.loadtxt('yearssn.dat') year = data[:,0] ssc = data[:,1] assert len(year)==315 assert year.dtype==np.dtype(float) assert len(ssc)==315 assert ssc.dtype==np.dtype(float)Make a line plot showing the sunspot count as a function of year.* Customize your plot to follow Tufte's principles of visualizations.* Adjust the aspect ratio/size so that the steepest slope in your plot is *approximately* 1.* Customize the box, grid, spines and ticks to match the requirements of this data.f = plt.figure(figsize=(10,5)) plt.plot(year,ssc) plt.xlabel('Year') plt.ylabel('Sunspot Count') plt.xlim(1700, 2015) plt.ylim(0, 200) plt.tick_params(axis='y', direction='out', length=5) plt.box(False) assert True # leave for gradingDescribe the choices you have made in building this visualization and how they make it effective. I made my plot longer in the horizontal direction in order to make the years more discernable. I labeled the plot so that my data is understandable. I changed the limits of my axes so that the data fills the chart. I also changed the directions of the y ticks to the outside so they don't overlap onto my line. Now make 4 subplots, one for each century in the data set. This approach works well for this dataset as it allows you to maintain mild slopes while limiting the overall width of the visualization. Perform similar customizations as above:* Customize your plot to follow Tufte's principles of visualizations.* Adjust the aspect ratio/size so that the steepest slope in your plot is *approximately* 1.* Customize the box, grid, spines and ticks to match the requirements of this data.x = plt.figure(figsize=(15,15)) plt.subplot(4,1,1) plt.plot(year[0:99], ssc[0:99]) plt.ylabel("Sunspot Count") plt.box(False) plt.subplot(4,1,2) plt.plot(year[100:199], ssc[100:199]) plt.box(False) plt.subplot(4,1,3) plt.plot(year[200:299], ssc[200:299]) plt.box(False) plt.subplot(4,1,4) plt.plot(year[300:], ssc[300:]) plt.xlabel("Year") plt.box(False) plt.tight_layout() assert True # leave for grading**ANALIZA STANOVANJ** UvodV tej datoteki je predstavljena analiza podatkov o oddaji stanovanj, zbranih s spletne strani [Nepremičnine](https://www.nepremicnine.net/).Obravnavali bomo vpliv različnih dejavnikov na višino najemnine. Zbrani podatki o stanovanjih so:* id stanovanja* kvadratura* tip (število sob)* leto* cena (mesečna najemnina)* regija* upravna enota* občinaimport pandas as pd Stanovanja = pd.read_csv("obdelani-podatki\podatki.csv", index_col='id') slo_stanovanja = Stanovanja[(Stanovanja.regija != 'Šibeniško-kninska') & (Stanovanja.regija != 'Istrska') & (Stanovanja.regija != 'Primorsko-goranska')] slo_stanovanjaZgornja razpredelnica prikazuje vse zbrane podatke, ki jih analiziramo v tej datoteki. Vprašanja in hipoteze* Najdrazja najmenina na kvadraturo je na obali, takoj za tem pa ji sledi Ljubljana.* V katerih regijah se oddajajo največja stanovanja?* V kateri regiji je največ aktivnih oglasov?* Starejša stanovanja se oddajajo po višji ceni kot mlajša.* Kakšen vpliv ima tip stanovanja (število sob) na višino najemnine?* Stanovanja označena kot primerna za študente so dražja v primerjavi z ostalimi. Mesečna najmenina na $m^2$ po regijahV prvi točki nas zanima kako se mesečne najmenine aktivnih oglasov spreminjajo glede na regije. Podana hipoteza pravi, da so najemnine najvišje na južno primorskem, sledila pa naj bi ji ljubljanska regija. Za začetek si poglejmo kakšne so povprečne najemnine na trenutnem trgu v Sloveniji.sum(slo_stanovanja.cena) // 1710 sum(slo_stanovanja.kvadratura) // 1710Povprečna mesečna najemnina stanovanja na strani *nepremičnine.net* je **719€**, povprečna kvadratura pa **62 $m^2$**.Stanovanja['cena_kvad'] = (Stanovanja.cena // Stanovanja.kvadratura) slo_stanovanja = Stanovanja[(Stanovanja.regija != 'Šibeniško-kninska') & (Stanovanja.regija != 'Istrska') & (Stanovanja.regija != 'Primorsko-goranska')] sum(slo_stanovanja.cena_kvad) // 1710Kvadratni meter se v Sloveniji v povprečju oddaja za **11€**. V naslednji dveh tabelah so prikazane cene kvadratur najdražjih ter najcenejših 10 oglaševanih stanovanj.slo_stanovanja[['cena_kvad', 'kvadratura', 'cena', 'regija']].sort_values('cena_kvad', ascending=False).head(10) slo_stanovanja[['cena_kvad', 'kvadratura', 'cena', 'regija']].sort_values('cena_kvad', ascending=False).tail(10)Med najdražjimi stanovanji se pojavljajo tista iz ljubljanske regije, kar je v nasprotju s hipotezo. Dalje si bomo ogledali povprečja najemnin po regijah. Omejimo se na stanovanja, ki imajo najmenino nižjo od 60€ /$m^2$.slo_stanovanja2 = slo_stanovanja[['cena_kvad', 'kvadratura', 'cena', 'regija']][slo_stanovanja.cena_kvad < 60].sort_values('cena_kvad', ascending=False) Stanovanja_po_regijah = slo_stanovanja2.groupby('regija') Stanovanja_po_regijah.mean()[['cena_kvad', 'kvadratura', 'cena']].sort_values('cena_kvad', ascending=False)V nasprotju s hipotezi je cena kvadratnega metra višja v Ljubljani(mesto) kot ob obali, vendar so na južno-primorskem najemnine višje kot v okolici Ljubljane, zato lahko povzamemo, da je naša hipoteza le delno veljala. Mesto Ljubljana in južna primorska sta edini regiji, ki po naših podatkih segata nad slovensko povprečje cene kvadrature, ki je **11€ / $m^2$**.Za boljšo predstavo si poglejmo še gref, ki kaže najemnino na kvadraturo v odvisnosti od velikosti stanovanja.ax = slo_stanovanja2.plot.scatter(y='cena_kvad', x='kvadratura', color='Grey', label='ostalo') slo_stanovanja2[slo_stanovanja2.regija == 'LJ-mesto'].plot.scatter(y='cena_kvad', x='kvadratura', color='Yellow', label='LJ-mesto', ax=ax) slo_stanovanja2[slo_stanovanja2.regija == ''].plot.scatter(y='cena_kvad', x='kvadratura', color='Green', label='J.Primorska', ax=ax) slo_stanovanja2[slo_stanovanja2.regija == 'Posavska'].plot.scatter(y='cena_kvad', x='kvadratura', color='Blue', label='Posavska', ax=ax) slo_stanovanja2[slo_stanovanja2.regija == 'Dolenjska'].plot.scatter(y='cena_kvad', x='kvadratura', color='Red', label='Dolenjska', ax=ax) slo_stanovanja2[slo_stanovanja2.regija == 'Koroška'].plot.scatter(y='cena_kvad', x='kvadratura', color='Orange', label='Koroška', ax=ax, figsize=(15,7))Iz grafa je lepo vidno, da so cene v Ljubljani(*rumena*) ter ob obali(*zelena*) veliko višje kot drugod po Sloveniji. Opazimo tudi, da se cena najema kvadrature niža z velikostjo stanovanj. Povprečna površina stanovanja po regijahOpazovali bomo kakšna je povprečna površina stanovanj po posameznih regijah. V prejšnjem poglavju smo ugotovili, da je povprečna kvadratura vseh stanovanj **62 $m^2$**.Stanovanja_po_regijah.mean()[['kvadratura']].sort_values('kvadratura', ascending=False)Največja so stanovanja na severno-primorskem ter v Ljubljani. Na dno lestvice spadajo koroška in notranjska regija V prejšnjem poglavju smo opazili, da cena kvadrature pada ze velikostjo stanovanj. Zanima nas ali obstaja povezava med višino povprečne najemnine kvadrature ter povprečno velikostjo stanovanja v regiji.Stanovanja_po_regijah.mean()[['cena_kvad', 'kvadratura']].sort_values('kvadratura', ascending=False) Stanovanja_po_regijah.mean()[['cena_kvad', 'kvadratura']].sort_values('kvadratura', ascending=True).plot.barh(figsize=(14,10))Iz grafa opazimo, da se v mestu Ljubljana oddajajo med največjimi in tudi najdražjimi stanovanji. Opazna razlika med velikostjo in ceno je na obali, kjer so v oddaji draga in majhna stanovanja. Sklepamo, da je razlog za to oddaja stanovanj v namen turizma.Iz podatkov je razvidno, da višina najemnin ne vpliva na velikost stanovanj v oddaji. Število aktivnih oglasov po regijahPričakujemo, da je največ aktivnih oglasov v regijah kjer je velika ponudba in povpraševanje, torej kjer je tudi višja cena najemnin, tj. v ljubljanski in južno primorski regiji.po_regijah = slo_stanovanja[['regija', 'upravna']].groupby('regija').count().sort_values('upravna', ascending=False) po_regijah explode = (0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1) plt = po_regijah.plot.pie(y='upravna', figsize=(10, 10), shadow=True, startangle=0, labeldistance=None, explode=explode) plt.legend(loc="upper left")Naša hipoteza drži, največ aktivnih oglasov je v Ljubljani in ob obali. Iz grafikona je razvidno, da je ponudba po slovenskih regijah zelo neenakomerna. Razlog za to je najverjetneje neenakomerna poseljenost Slovenije in morda razlika v bivalnih standardih. Povpraševanje po stanovanjih je najvišje v Ljubljani, kjer je tudi gostota naselitve največja. V drugih regijah prevladuje podeželje kjer ljudje večinoma živijo v hišah, zato je tu manj aktivnih oglasov. Najemnine v mestni občini LjubljanaOpazili smo, da ima lokacija velik vpliv na cene najemnin zato bi lahko razlike po regijah vplivale na naše rezulate. V nadaljni analizi se omejimo le na eno regijo. Obravnavali bomo ljubljansko regijo, ki ima tudi največ aktivnih oglasov. Vpliv števila sob na najemnino Za začetek si poglejmo koliko sobna stanovanja so v Ljubljani najbolj ponujena.stanovanja_lj = Stanovanja[Stanovanja.regija == 'LJ-mesto'] stanovanja_sobe = stanovanja_lj[(stanovanja_lj.tip != 'apartma') & (stanovanja_lj.tip != '1-sobni nadstandardni apartma') & (stanovanja_lj.tip != 'nadstandardna soba') & (stanovanja_lj.tip != 'stanovanje') & (stanovanja_lj.tip != '2- sobno')] stanovanja_sobe[stanovanja_sobe.regija == 'LJ-mesto'].groupby('tip').count()[['upravna']].sort_values('upravna', ascending=False)Najbolj oglaševana so 3 in 2-sobna stanovanja, najverjetneje za to ker so primerna za večino 4-članskih družin. Veliko oglasov je tudi za sobe in garsonjere. Predvidevamo, da je razlog za to, to da je Ljubljana univerzitetno središče Slovenije in posledično veliko študentov v Ljubljani išče prebivališče.Najmanj aktivnih oglasov je za več kot 4 sobna stanovanja. Zanima nas kako tip stanovanja vpliva na ceno kvadrature. Na podlagi števila aktivnih oglasov posameznih tipov stanovanj pričakujemo, da bo cena kvadrature največja za 4,5 in 5-sobna stanovanja, saj je tudi povpraševanja zanj najmanj, ter sumimo, da mednje spadajo luksuzna stanovanja, ki so seveda tudi dražja.Najnižje cene pričakujemo pri sobah ter garsonjerah, saj so ta najverjetneje namenjena študentom, ki nimajo tako veliki finančnih zmožnosti.preurejen = stanovanja_sobe[stanovanja_sobe.regija == 'LJ-mesto'] tip = preurejen.groupby('tip').mean()[['cena_kvad']].sort_values('cena_kvad', ascending=False) tipV nasprotju s hipotezo se kot najdražje oddajajo garsonjere in sobe, ki bi po naši predpostavki morale biti najcenejše. Sledijo jim 5 in večsobna stanovanja, najcenejša pa so presenetljivo 4,5 sobna stanovanja z **10,75€ / $m^2$** kar je celo pod slovenskim povprečjem (glej poglavje 1).custom_dict = {'soba': 0, 'garsonjera': 1, '1,5-sobno': 2, '1-sobno': 3, '2-sobno': 4,'2,5-sobno': 5,'3-sobno': 6,'3,5-sobno': 7, '4-sobno': 8, '4,5-sobno': 9, '5 in večsobno': 10} tip.sort_index(key=lambda x: x.map(custom_dict)).plot.bar(figsize=(10,8))Iz grafa je razvidno to, da z večanjem števila sob cena nekoliko pada, kar bi lahko povezali s tem, da cena kvadrature pada z velikostjo stanovanj. To pa ne velja za 5 in večsobna stanovanja, ki so opazno dražja kot preostala večsobna stanovanja. Ponovno sklepamo, da je razlog za to, da mednje večinoma spadajo luksuzna stanovanja, namenjena premožnejšim najemnikom. Vpliv starosti stanovanj na višino najemnineObravnavali bomo odvisnost višine najemnin od starosti stanovanj. Podamo hipotezo, da so starejša stanovanja v povprečju cenejša, saj so energetsko manj učinkovita, težja za vzdrževanje in večinoma težje dostopna (nimajo dvigal, parkirišč itd.).stanovanja_lj[stanovanja_lj.leto > 1800].groupby('leto').mean()[['cena_kvad']].plot(figsize=(14, 5))Analizirali smo stanovanja v mestni občini Ljubljana starejša od leta 1800. Glede na graf ne moremo sklepati o soodvisnosti najemnin in starosti stanovanj, vendar opazimo, da starejša stanovanja v povprečju niso cenejša kot novejša. Študentska stanovanja V Sloveniji je znano, da imajo študenti velike težave z iskanjem stanovanj. V tem poglavju želimo primerjati podatke o aktivnih oglasih študentskih stanovanj v primerjavi s slovenskim povprečjem.**OPOMBA:** Pri analizi upoštevamo, da trenutno zaradi epidemije ni veliko aktivnih oglasov namenjenim študentom ter da je študentom v splošnem na valojo več stanovanj v poletnih mesecih oz. po koncu študijskega obdobja.stanovanja = pd.read_csv("obdelani-podatki\podatki.csv") studenti = pd.read_csv("obdelani-podatki\studenti.csv")V spodnji razpredelnici so razvrščeni podatki o stanovanjih, ki so na strani *nepremicnine.net* označena kot študentska stanovanja.stanovanja['cena_kvad'] = (stanovanja.cena) // stanovanja.kvadratura studentska_stanovanja = pd.merge(stanovanja, studenti) studentska_stanovanja['cena_kvad_s'] = (studentska_stanovanja.cena // studentska_stanovanja.kvadratura) studentska_stanovanja sum(studentska_stanovanja.cena_kvad) // 29Povprečna najemnina na kvadratni meter za študentsko stanovanja **13€**. To je nad slovenskim povprečjem (**11€ / $m^2$**) in ekvivalentno povprečni ceni v Ljubljani, kljub temu da niso vsa stanovanja iz ljubljanske regije. To pomeni, da so cene študentskih stanovanj v samem vrhu slovenskih najemnin. Podatke o študentskih stanovanjih imamo za naslednje regije: Ljubljana(mesto), Ljubljana(okolica), Gorenjska, Savinjska.Kot smo ugotovili v prvem razdelku se najemnine po regijah močno razlikujejo. Zato bomo za vsako regijo posebaj primerjali povprečno ceno vseh stanovanj ter povprečno ceno študentskih stanovanj.ax = po_regijah_stud = studentska_stanovanja.groupby('regija').mean()[['cena_kvad_s']].plot.bar(color='Orange', align='edge', width=0.3, edgecolor='Black') po_regijah = stanovanja[(stanovanja.regija == 'LJ-mesto') | (stanovanja.regija == 'Gorenjska') | (stanovanja.regija == 'Savinjska') | (stanovanja.regija == 'LJ-okolica')].groupby('regija').mean()[['cena_kvad']].plot.bar(color='LightBlue', ax = ax, figsize=(14, 5), width=0.3, edgecolor='Black')Importance Sampling and Monte Carlo for the Heterogenous Agent Model# Load general packages import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy import interpolate # Import user generated functions import functions as funcs ## Uncomment for nicer plots ## plt.rc('font', family='serif') plt.rc('text', usetex=True) plt.rc('axes', axisbelow=True)The autoreload extension is already loaded. To reload it, use: %reload_ext autoreloadLoad model data and plot Load the data and save the wealth distribution. The distributions is generated from an external matlab file 'model.m'.# Load the matlab generated wealth distribution df_g = pd.read_csv(r'ga_var.csv', header=None) df_a = pd.read_csv(r'a_var.csv', header=None) # Save as a numpy array g_bottom = df_g.to_numpy() w_bottom = df_a.to_numpy() g_bottom = np.array(g_bottom).ravel() # Remove outer brackets w_bottom = np.array(w_bottom).ravel() # Define variables alpha = 1.5 mu = -0.3 np.random.seed(214)Manually add a right tail to the wealth distribution# Normalization constant for upper tail C = g_bottom[-1] / 2000**(-alpha-1) # Grid for tail x values tail_vals = np.linspace(w_bottom[-1],1_000_000, 100000) # Compute top g_top = C*tail_vals**(-alpha-1) # alpha value 1.5Combine the two distributions and define grid of wealth levels associated with $g(w)$g = np.append(g_bottom[:-1], g_top) w = np.append(w_bottom[:-1], tail_vals)Plot the distribution of wealth# Standard plot fig = plt.figure(figsize=(6,4)) ax = fig.add_subplot(1,1,1) ax.grid(b=True, which = 'major', linestyle='-', linewidth=0.5, color='0.9', zorder=0) ax.tick_params(axis='both', bottom=True, top=True, left=True, right=True, direction='in', which='both') ax.plot(w, g, color = '0.4', zorder = 2, label = r'$g(a)$') ax.set_xlim(-0.3,10) ax.set_ylim(0,0.35) ax.set_xlabel(r'Wealth, $w$') ax.set_ylabel(r'Density, $g(w)$') # ax.legend(frameon = True, edgecolor = 'k', facecolor = 'white', framealpha=1, fancybox=False) plt.savefig('../../tex/figs/model.pdf') # Log-log plot fig = plt.figure(figsize=(6,4)) ax = fig.add_subplot(1,1,1) ax.grid(b=True, which = 'major', linestyle='-', linewidth=0.5, color='0.9', zorder=0) ax.tick_params(axis='both', bottom=True, top=True, left=True, right=True, direction='in', which='both') ax.plot(w, g, color = '0.4', zorder = 2, label = r'$g(a)$') ax.set(xscale='log', yscale = 'log') ax.set_xlim(1e-2,1e+3) ax.set_ylim(1e-8,1) ax.set_xlabel(r'Wealth, $w$') ax.set_ylabel(r'Density, $g(w)$') # ax.legend(frameon = True, edgecolor = 'k', facecolor = 'white', framealpha=1, fancybox=False) plt.savefig('../../tex/figs/model_log.pdf')Simulations Draw from the asset distribution using rejection sampling.First generate the envelope density, $\tilde{g}(w)$, then find $c = \max \frac{g(w)}{\tilde{g}(w)} + \varepsilon$ for some small $\varepsilon$.# Generate envelope density density_pareto = funcs.den_pareto2(w, 1.5, np.min(w)) # Find the ratio of the densities over the domain c = g/density_pareto # Find the maximal value and add small epsilon c_max = np.max(c) + 1e-5 print(c_max)1.04383186071944To draw random numbers from these distributions i need to interpolate between values.I use a linear interpolater for simplicity.f = interpolate.interp1d(w,g, bounds_error = False, fill_value = 0) h = interpolate.interp1d(w,density_pareto, bounds_error = False, fill_value = 0)Run the rejection sampling algorithm just to test if everything works.# Test function N = 50_000 sample = funcs.act_rejct(N,c_max,h,f) fig = plt.figure(figsize=(6,4)) ax = fig.add_subplot(1,1,1) ax.grid(b=True, which = 'major', linestyle='-', linewidth=0.5, color='0.9', zorder=0) ax.tick_params(axis='both', bottom=True, top=True, left=True, right=True, direction='in', which='both') ax.hist(sample, bins = np.linspace(np.min(w),2,500), density = True) ax.plot(w,g) ax.set_xlim(np.min(w),2) plt.show()Looks fine **Run Monte Carlo experiment**# Parameters N = 10_000 reps = 10_000 # Initialize G_new = np.empty(shape=reps) # Improved estimator G_np = np.empty(shape=reps) # Non-parametric estimator # Loop over reps for j in range(reps): # Draw sample using accept-reject sample = funcs.act_rejct(N,c_max,h,f) ## Compute nonparametric gini ## G_np[j] = funcs.gini(sample) ## Compute improved gini ## # The (1-p)'th percentile P = 0.0025 u = np.percentile(sample,100*(1-P)) # Compute s x_low = sample[samplePlot the resulting distribution of Gini coefficient estimatorsfig = plt.figure(figsize=(6,4)) ax = fig.add_subplot(1,1,1) ax.grid(b=True, which = 'major', linestyle='-', linewidth=0.5, color='0.9', zorder=0) ax.tick_params(axis='both', bottom=True, top=True, left=True, right=True, direction='in', which='both') ax.hist(G_new, density = True, bins = 150, color = 'red', alpha = 0.7, zorder = 2, label='Semiparametric', histtype='stepfilled') ax.hist(G_np, density = True, bins = 150, color = 'blue', alpha = 0.7, zorder = 2, label='Nonparametric', histtype='stepfilled') # ax.set_ylim(0,100) # ax.set_xlim(0.65,1) ax.set_xlabel(r'Estimated Gini coefficient') ax.set_ylabel(r'Density') ax.legend(frameon = True, edgecolor = 'k', facecolor = 'white', framealpha=1, fancybox=False) plt.savefig('../../tex/figs/model_estimator.pdf')Attention Modules#export MASK_VAL = -5e4 SELF_ATTN_MASK_VAL = -1e4Attention Projection#export class AttnInProj(Module): """Computes q, k, v from input x and [optional] context""" def __init__(self, d_model:int, bias:bool=False): self.to_q = nn.Linear(d_model, d_model, bias=bias) self.to_k = nn.Linear(d_model, d_model, bias=bias) self.to_v = nn.Linear(d_model, d_model, bias=bias) def forward(self, x, context=None): context = ifnone(context, x) q = self.to_q(x) k, v = self.to_k(context), self.to_v(context) return q, k, v bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) context = torch.randn(bs, sl-16, d) proj = AttnInProj(d) q1, k1, v1 = proj(x) assert (bs, sl, d) == q1.size() == k1.size() == v1.size() q1.shape, k1.shape, v1.shape q2, k2, v2 = proj(x, context) assert (bs, sl, d) == q2.size() assert k2.size() == v2.size() == context.size() assert all_equal(q1, q2) assert not all_equal(k1, k2) q2.shape, k2.shape, v2.shape #export class AttnInProjV2(Module): """Computes q, k, v from input x and [optional] context""" def __init__(self, d_model:int, bias:bool=False): self.to_q = nn.Linear(d_model, d_model, bias=bias) self.to_kv = nn.Linear(d_model, 2*d_model, bias=bias) def forward(self, x, context=None): context = ifnone(context, x) q = self.to_q(x) k, v = self.to_kv(context).chunk(2, -1) return q, k, v bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) context = torch.randn(bs, sl-16, d) proj = AttnInProj(d) q1, k1, v1 = proj(x) assert (bs, sl, d) == q1.size() == k1.size() == v1.size() q1.shape, k1.shape, v1.shape q2, k2, v2 = proj(x, context) assert (bs, sl, d) == q2.size() assert k2.size() == v2.size() == context.size() assert all_equal(q1, q2) assert not all_equal(k1, k2) q2.shape, k2.shape, v2.shapeShared Query-Key Attention Projection#export class SharedQKAttnInProj(Module): """Computes q, k, v from input x and [optional] context""" def __init__(self, d_model:int, bias:bool=False): self.to_qk = nn.Linear(d_model, d_model, bias=bias) self.to_v = nn.Linear(d_model, d_model, bias=bias) def forward(self, x, context=None): context = ifnone(context, x) qk = self.to_qk(x) v = self.to_v(context) return qk, qk, v bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) context = torch.randn(bs, sl-16, d) shared_proj = SharedQKAttnInProj(d) q1, k1, v1 = shared_proj(x) assert (bs, sl, d) == q1.size() == k1.size() == v1.size() assert q1 is k1 q1.shape, k1.shape, v1.shape #hide #TODO: Figure out tests for when using SharedQKAttnInProj and contextScaled Dot Product Attention#export #TODO make sure store_attention works class ScaledDotProdAttention(Module): """ Computes scaled dot-product attnetion given q, k, v """ def __init__(self, d_model, n_heads, causal=False, dropout=0., shared_qk=False, store_attention:bool=False): store_attr() self.scale = (d_model//n_heads)**-0.5 self.dropout = nn.Dropout(dropout) def forward(self, q, k, v, attn_mask=None): n, device = q.size(1), q.device q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.n_heads), (q, k, v)) # classic dot-product attention dots = torch.einsum('bhid,bhjd->bhij', q*self.scale, k) if exists(attn_mask): dots.masked_fill_(~attn_mask, MASK_VAL) del attn_mask if self.shared_qk: m = torch.arange(n) dots[:, :, m, m] = SELF_ATTN_MASK_VAL if self.causal: i, j = torch.triu_indices(n, n, 1) dots[:,:,i,j] = MASK_VAL attn = F.softmax(dots, -1) if self.store_attention: self.attention = attn.detach().cpu() attn = self.dropout(attn) out = torch.einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return outScaled dot-product attention is calculated as:$$\textbf {Attention}(Q,K,V) = \textbf {softmax}({QK^T\over\sqrt d_k})V $$q = torch.randn(bs, sl, d) k = torch.randn(bs, sl, d) v = torch.randn(bs, sl, d) attn_func = ScaledDotProdAttention(d, 4) out = attn_func(q, k, v) assert out.size() == (bs,sl,d) out.shape # Test shared_qk attn_func = ScaledDotProdAttention(d, 4, shared_qk=True) out = attn_func(q, k, v) assert out.size() == (bs,sl,d) out.shape #hide attn_func = ScaledDotProdAttention(d, 4) mask = torch.ones(1,sl,sl).bool() out = attn_func(q, k, v, attn_mask=mask) assert out.size() == (bs,sl,d) #slow q = torch.randn(bs, sl, d).cuda() k = torch.randn(bs, sl, d).cuda() v = torch.randn(bs, sl, d).cuda() attn_func = ScaledDotProdAttention(d, 4, shared_qk=True) out = attn_func(q, k, v)Attention container#export class Attention(Module): """ Standard attention module using scaled dot-product attention """ def __init__(self, d_model:int, n_heads:int = 8, causal:bool = False, mask:Tensor = None, dropout:float=0.1, out_dropout:float=None, bias:bool=False, shared_qk:bool=False, store_attention:bool=False): store_attr('causal, mask, n_heads, bias, shared_qk') out_dropout = ifnone(out_dropout, dropout) if shared_qk: self.in_proj = SharedQKAttnInProj(d_model, bias=bias) else: self.in_proj = AttnInProjV2(d_model, bias=bias) self.attn = ScaledDotProdAttention(d_model, n_heads, causal=causal, dropout=dropout, shared_qk=shared_qk, store_attention=store_attention) self.out_proj = nn.Linear(d_model, d_model, bias=bias) self.dropout = nn.Dropout(out_dropout) self._init() def forward(self, x, context = None, mask = None, context_mask = None): q, k, v = self.in_proj(x, context) if self.shared_qk: k = F.normalize(k, 2, dim=-1).type_as(k) attn_mask = self._make_attn_mask(mask, context_mask, x, context) out = self.attn(q, k, v, attn_mask) out = self.out_proj(out) return self.dropout(out) def _init(self): [nn.init.xavier_uniform_(w) for w in self.parameters() if w.dim()>1] if self.bias: [nn.init.constant_(b, 0) for b in self.parameters() if b.dim()==1] def _make_attn_mask(self, mask, context_mask, x, context): if any(map(exists, (mask, context_mask))): b, n, _, device = *x.size(), x.device q_mask = default(mask, lambda: torch.ones((b, n), device = device).bool()) k_mask = q_mask if not exists(context) else context_mask k_mask = default(k_mask, lambda: torch.ones((b, context.shape[-2]), device = device).bool()) q_mask = rearrange(q_mask, 'b i -> b () i ()') k_mask = rearrange(k_mask, 'b j -> b () () j') return q_mask * k_mask else: return None #attn_mask is None if both mask and context_mask are None bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) context = torch.randn(bs, sl-16, d) attn = Attention(d) out = attn(x) assert (bs, sl, d) == out.size() out.shape out = attn(x, context) assert (bs, sl, d) == out.size() out.shape # test shared_qk bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) context = torch.randn(bs, sl-16, d) attn = Attention(d, shared_qk=True) out = attn(x) assert (bs, sl, d) == out.size() out.shape e_msg = "Causal masking error" attn = Attention(d, causal=True, dropout=0) x1 = torch.randn(bs, sl, d) out1 = attn(x1) x2 = x1.clone() x2[:, sl//2:, :] = torch.randn(bs, sl//2, d) out2 = attn(x2) # all elements in first half are equal despite second half is defferent assert all_equal(out1[:, :sl//2], out2[:, :sl//2]), e_msg assert not (out1[:, sl//2:] == out2[:, sl//2:]).any(), e_msg e_msg = "Masking error" attn = Attention(d, causal=False, dropout=0) x1 = torch.randn(bs, sl, d) mask = torch.ones(bs, sl) # mask out second half of input mask[:, sl//2:] = 0 mask = mask.bool() out1 = attn(x1, mask=mask) x2 = x1.clone() x2[:, sl//2:, :] = torch.randn(bs, sl//2, d) out2 = attn(x2, mask=mask) # all elements are equal, masked values do not effect result assert all_equal(out1[:, :sl//2], out2[:, :sl//2]), e_msg out1 = attn(x1) out2 = attn(x2) assert not (out1[:, :sl//2] == out2[:, :sl//2]).any() e_msg = "Context masking error" attn = Attention(d, causal=False, dropout=0) x = torch.randn(bs, sl, d) context = torch.randn(bs, sl, d) context_mask = torch.ones(bs, sl) # mask out second half of context context_mask[:, sl//2:] = 0 context_mask = context_mask.bool() out1 = attn(x, context, context_mask=context_mask) context2 = context.clone() context2[:, sl//2:, :] = torch.randn(bs, sl//2, d) out2 = attn(x, context2, context_mask=context_mask) # all elements are equal, masked values do not effect result assert all_equal(out1, out2), e_msg # all output values are different for different context out1 = attn(x, context) out2 = attn(x, context2) assert not (out1 == out2).any() # check stored attention matrix torch.manual_seed(842) bs = 4 sl = 16 csl = sl + 16 d = 64 x = torch.rand(bs, sl, d) context = torch.rand(bs, csl, d) mask = torch.ones(bs, sl) mask[:, -5:] = 0 context_mask = torch.ones(bs, csl) context_mask[:, -10:] = 0 mask, context_mask = mask.bool(), context_mask.bool() attn = Attention(d, store_attention=True) out = attn(x, context, mask=mask, context_mask=context_mask) attention = attn.attn.attention assert (bs, sl, d) == out.size() assert attention.size() == (bs, attn.attn.n_heads, sl, csl) # zeros for masked keys and "don't cares" for masked queries plt.matshow(attention[0,0]); #hide #skip # check stored attention matrix torch.manual_seed(842) bs = 4 sl = 16 d = 64 x = torch.rand(bs, sl, d) mask = torch.ones(bs, sl) mask[:, -5:] = 0 mask = mask.bool() attn = Attention(d, store_attention=True, causal=True) out = attn(x, mask=mask) attention = attn.attn.attention assert (bs, sl, d) == out.size() assert attention.size() == (bs, attn.attn.n_heads, sl, sl) # zeros for masked keys and "don't cares" for masked queries plt.matshow(attention[0,0]);Memory efficient attention Customized `_checkpoint` and `_ChunkedAttnCptFunction` to handle non-tensor args. See https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html for source implementation.#export class _ChunkedAttnCptFunction(torch.autograd.Function): @staticmethod def forward(ctx, run_function, preserve_rng_state, qc, k, v, i, csz, self, l, attn_mask): # check_backward_validity((qc,k,v)) ctx.run_function = run_function ctx.preserve_rng_state = preserve_rng_state ctx.extra = (i, csz, self, l, attn_mask) if preserve_rng_state: ctx.fwd_cpu_state = torch.get_rng_state() # Don't eagerly initialize the cuda context by accident. # (If the user intends that the context is initialized later, within their # run_function, we SHOULD actually stash the cuda state here. Unfortunately, # we have no way to anticipate this will happen before we run the function.) ctx.had_cuda_in_fwd = False if torch.cuda._initialized: ctx.had_cuda_in_fwd = True ctx.fwd_gpu_devices, ctx.fwd_gpu_states = get_device_states(qc,k,v) ctx.save_for_backward(qc, k, v) with torch.no_grad(): outputs = run_function(qc, k, v, i, csz, self, l, attn_mask) return outputs @staticmethod def backward(ctx, *args): if not torch.autograd._is_checkpoint_valid(): raise RuntimeError("Checkpointing is not compatible with .grad(), please use .backward() if possible") inputs = ctx.saved_tensors # Stash the surrounding rng state, and mimic the state that was # present at this time during forward. Restore the surrounding state # when we're done. rng_devices = [] if ctx.preserve_rng_state and ctx.had_cuda_in_fwd: rng_devices = ctx.fwd_gpu_devices with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state): if ctx.preserve_rng_state: torch.set_rng_state(ctx.fwd_cpu_state) if ctx.had_cuda_in_fwd: set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states) detached_inputs = detach_variable(inputs) with torch.enable_grad(): outputs = ctx.run_function(*detached_inputs, *ctx.extra) if isinstance(outputs, torch.Tensor): outputs = (outputs,) torch.autograd.backward(outputs, args) grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp for inp in detached_inputs) return (None, None) + grads + tuple([None]*5) #export def _checkpoint(function, *args, **kwargs): "Same as torch.utils.checkpoint.checkpoint bu allows kwargs" preserve = kwargs.pop('preserve_rng_state', True) assert not kwargs return _ChunkedAttnCptFunction.apply(function, preserve, *args) #export #Not working, use ChunkedDotProdAttention class MemEfficientAttention(Module): """ Memory efficient and very time inefficient attention for long seqences O(L) memory complexity but uses python loop to compute attention for 1 query at a time """ def __init__(self, d_model, n_heads, causal=False, dropout=0., shared_qk=False, store_attention:bool=False): store_attr() self.scale = (d_model//n_heads)**-0.5 self.dropout = nn.Dropout(dropout) def forward(self, q, k, v, attn_mask=None): b,n,d,l, device = *q.size(), k.size(1), q.device q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.n_heads), (q, k, v)) q = q*self.scale outs = [] if self.store_attention: self.attention = torch.zeros(b, d//self.n_heads, n,l) for i in range(n): dot = torch.einsum('bhd, bhjd -> bhj', q[:,:,i,:], k) #pading masking if exists(attn_mask): dot.masked_fill_(~attn_mask[:,:,i,:], MASK_VAL) #shared qk masking if self.shared_qk: dot[:,:,i] = SELF_ATTN_MASK_VAL #causal masking if self.causal: dot[:,:,i+1:] = MASK_VAL attn = F.softmax(dot, -1) # if self.store_attention: self.attention[:,:,i,:] = attn.detach().cpu() attn = self.dropout(attn) outs.append(torch.einsum('bhj, bhjd -> bhd', attn, v)) del attn_mask out = torch.stack(outs, dim=2) out = rearrange(out, 'b h n d -> b n (h d)') return out #hide bs = 4 sl = 128 d = 64 q = torch.rand(bs, sl, d) k = torch.rand(bs, sl, d) v = torch.rand(bs, sl, d) #hide # assert torch.all(MemEfficientAttention(d,4)(q,k,v) == ScaledDotProdAttention(d,4)(q,k,v)) assert (MemEfficientAttention(d,4)(q,k,v) - ScaledDotProdAttention(d,4)(q,k,v)).max() < 1e-5 #export def _chunked_attn(qc, k, v, i, csz, self, l, attn_mask): dots = torch.einsum('bhid, bhjd -> bhij', qc*self.scale, k) #pading masking if exists(attn_mask): raise NotImplementedError #shared qk masking if self.shared_qk: ii, jj = torch.arange(csz), torch.arange(i*csz, (i+1)*csz) dots[:,:,ii,jj] = SELF_ATTN_MASK_VAL #causal masking if self.causal: ii, jj = torch.triu_indices(csz, l, offset=i*csz+1) dots[:,:,ii,jj] = MASK_VAL attn = F.softmax(dots, -1) # if self.store_attention: self.attention[:,:,i,:] = attn.detach().cpu() attn = self.dropout(attn) return torch.einsum('bhij, bhjd -> bhid', attn, v) #export #TODO make store_attention work class ChunkedDotProdAttention(Module): """ Memory efficient and time inefficient attention for long seqences O(L) memory complexity if `n_chunks == seq_len` but uses python loop to compute attention for chunks of queries at a time """ def __init__(self, d_model, n_heads, causal=False, dropout=0., shared_qk=False, n_chunks=1, store_attention:bool=False): store_attr() self.scale = (d_model//n_heads)**-0.5 self.dropout = nn.Dropout(dropout) def forward(self, q, k, v, attn_mask=None): b,n,d,l, device = *q.size(), k.size(1), q.device csz = math.ceil(n/self.n_chunks) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.n_heads), (q, k, v)) qs = q.chunk(self.n_chunks, dim=2) outs = [] if self.store_attention: self.attention = torch.zeros(b, d//self.n_heads, n,l) for i, qc in enumerate(qs): res = _checkpoint(_chunked_attn, qc, k, v, i, csz, self, l, attn_mask) outs.append(res) del attn_mask out = torch.cat(outs, dim=2) out = rearrange(out, 'b h n d -> b n (h d)') return out q = torch.randn(bs, sl, d) k = torch.randn(bs, sl, d) v = torch.randn(bs, sl, d) attn_func = ChunkedDotProdAttention(d, 4, n_chunks=8, causal=True, shared_qk=True) out = attn_func(q, k, v) assert out.size() == (bs,sl,d) out.shape #hide # assert torch.all(ChunkedDotProdAttention(d,4, n_chunks=8)(q,k,v) == ScaledDotProdAttention(d,4)(q,k,v)) assert (ChunkedDotProdAttention(d,4, n_chunks=8)(q,k,v) - ScaledDotProdAttention(d,4)(q,k,v)).max() < 1e-5 #hide (ChunkedDotProdAttention(d,4, n_chunks=8)(q,k,v) - ScaledDotProdAttention(d,4)(q,k,v)).max() #export class ChunkedAttention(Module): """ Standard attention module using scaled dot-product attention """ def __init__(self, d_model:int, n_heads:int = 8, causal:bool = False, mask:Tensor = None, dropout:float=0.1, out_dropout:float=None, bias:bool=False, shared_qk:bool=False, n_chunks:int=1, store_attention:bool=False): store_attr('causal, mask, n_heads, bias, shared_qk') out_dropout = ifnone(out_dropout, dropout) if shared_qk: self.in_proj = SharedQKAttnInProj(d_model, bias=bias) else: self.in_proj = AttnInProjV2(d_model, bias=bias) self.attn = ChunkedDotProdAttention(d_model, n_heads, causal=causal, dropout=dropout, shared_qk=shared_qk, store_attention=store_attention, n_chunks=n_chunks) self.out_proj = nn.Linear(d_model, d_model, bias=bias) self.dropout = nn.Dropout(out_dropout) self._init() def forward(self, x, context = None, mask = None, context_mask = None): q, k, v = self.in_proj(x, context) if self.shared_qk: k = F.normalize(k, 2, dim=-1).type_as(k) attn_mask = None#self._make_attn_mask(mask, context_mask, x, context) out = self.attn(q, k, v, attn_mask) out = self.out_proj(out) return self.dropout(out) def _init(self): [nn.init.xavier_uniform_(w) for w in self.parameters() if w.dim()>1] if self.bias: [nn.init.constant_(b, 0) for b in self.parameters() if b.dim()==1] def _make_attn_mask(self, mask, context_mask, x, context): if any(map(exists, (mask, context_mask))): b, n, _, device = *x.size(), x.device q_mask = default(mask, lambda: torch.ones((b, n), device = device).bool()) k_mask = q_mask if not exists(context) else context_mask k_mask = default(k_mask, lambda: torch.ones((b, context.shape[-2]), device = device).bool()) q_mask = rearrange(q_mask, 'b i -> b () i ()') k_mask = rearrange(k_mask, 'b j -> b () () j') return q_mask * k_mask else: return None #attn_mask is None if both mask and context_mask are None #memory-efficient attention bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) cattn = ChunkedAttention(d, n_chunks=10) out = cattn(x) assert (bs, sl, d) == out.size() out.shape def time_fwd_bwd(f, x): loss = f(x).sum() loss.backward() #hide #skip attn = Attention(d) %timeit time_fwd_bwd(attn, x) #hide #skip # 1 chunk cattn1 = ChunkedAttention(d) %timeit time_fwd_bwd(cattn1, x) #hide #skip %timeit time_fwd_bwd(cattn, x) #hide #skip # 1 query at a time cattnf = ChunkedAttention(d, n_chunks=sl) %timeit time_fwd_bwd(cattnf, x)157 ms ± 8.03 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)Additive Attention#export class AdditiveInProj(Module): """Computes q, k, v from input x and [optional] context""" def __init__(self, d_model:int, bias:bool=False): self.to_q = nn.Linear(d_model, d_model, bias=bias) self.to_k = nn.Linear(d_model, d_model, bias=bias) self.to_v = nn.Linear(d_model, d_model, bias=bias) def forward(self, x, context=None): b, _, d = x.size() context = ifnone(context, torch.empty(b, 0, d, dtype=x.dtype, device=x.device)) kv_input = torch.cat([x, context], dim=-2) q = self.to_q(x) k, v = self.to_k(kv_input), self.to_v(kv_input) return q, k, v bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) context = torch.randn(bs, sl-16, d) proj = AdditiveInProj(d) q1, k1, v1 = proj(x) assert (bs, sl, d) == q1.size() == k1.size() == v1.size() q1.shape, k1.shape, v1.shape q2, k2, v2 = proj(x, context) assert (bs, sl, d) == q2.size() assert k2.size() == v2.size() == (bs, x.size(1)+context.size(1), d) assert all_equal(q1, q2) assert not all_equal(k1, k2) q2.shape, k2.shape, v2.shape #export #TODO: add support for shared_qk for additive attn class AdditiveAttention(Attention): """ Additive attention module """ def __init__(self, d_model:int, n_heads:int = 8, causal:bool = True, dropout:float=0.1, out_dropout:float=None, bias:bool=False, shared_qk:bool = False, store_attention:bool=False): store_attr('causal, n_heads, bias, shared_qk') out_dropout = ifnone(out_dropout, dropout) self.in_proj = AdditiveInProj(d_model, bias=bias) self.attn = ScaledDotProdAttention(d_model, n_heads, causal=causal, dropout=dropout, store_attention=store_attention) self.out_proj = nn.Linear(d_model, d_model, bias=bias) self.dropout = nn.Dropout(out_dropout) self._init() def _make_attn_mask(self, mask, context_mask, x, context): b, n, _, device = *x.size(), x.device if any(map(exists, (mask, context_mask))): q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) self_mask = q_mask[:, None, :, None] * q_mask[:, None, None, :] if exists(context): k_mask = default(context_mask, lambda: torch.ones((b, context.shape[-2]), device=device).bool()) cross_mask = q_mask[:, None, :, None] * k_mask[:, None, None, :] else: cross_mask = torch.empty(0, dtype=self_mask.dtype, device=device) return torch.cat([self_mask, cross_mask], dim=-1) else: return None #attn_mask is None if both mask and context_mask are None bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) context = torch.randn(bs, sl-16, d) attn = AdditiveAttention(d) out = attn(x) assert (bs, sl, d) == out.size() out.shape #hide #TODO: tests for additive attention out = attn(x, context) assert (bs, sl, d) == out.size() out.shape #hide #TODO: add tests with input mask e_msg = "Causal masking error" attn = AdditiveAttention(d, causal=True, dropout=0) x1 = torch.randn(bs, sl, d) out1 = attn(x1) x2 = x1.clone() x2[:, sl//2:, :] = torch.randn(bs, sl//2, d) out2 = attn(x2) # all elements in first half are equal despite second half is defferent assert all_equal(out1[:, :sl//2], out2[:, :sl//2]), e_msg assert not (out1[:, sl//2:] == out2[:, sl//2:]).any(), e_msg e_msg = "Masking error" attn = AdditiveAttention(d, causal=False, dropout=0) x1 = torch.randn(bs, sl, d) mask = torch.ones(bs, sl) # mask out second half of input mask[:, sl//2:] = 0 mask = mask.bool() out1 = attn(x1, mask=mask) x2 = x1.clone() x2[:, sl//2:, :] = torch.randn(bs, sl//2, d) out2 = attn(x2, mask=mask) # all elements are equal, masked values do not effect result assert all_equal(out1[:, :sl//2], out2[:, :sl//2]), e_msg out1 = attn(x1) out2 = attn(x2) assert not (out1[:, :sl//2] == out2[:, :sl//2]).any() e_msg = "Context masking error" attn = Attention(d, causal=False, dropout=0) x = torch.randn(bs, sl, d) context = torch.randn(bs, sl, d) context_mask = torch.ones(bs, sl) # mask out second half of context context_mask[:, sl//2:] = 0 context_mask = context_mask.bool() out1 = attn(x, context, context_mask=context_mask) context2 = context.clone() context2[:, sl//2:, :] = torch.randn(bs, sl//2, d) out2 = attn(x, context2, context_mask=context_mask) # all elements are equal, masked values do not effect result assert all_equal(out1, out2), e_msg # all output values are different for different context out1 = attn(x, context) out2 = attn(x, context2) assert not (out1 == out2).any() #hide #skip # check stored attention matrix torch.manual_seed(842) bs = 4 sl = 16 csl = sl + 16 d = 64 x = torch.rand(bs, sl, d) context = torch.rand(bs, csl, d) mask = torch.ones(bs, sl) mask[:, -5:] = 0 context_mask = torch.ones(bs, csl) context_mask[:, -10:] = 0 mask, context_mask = mask.bool(), context_mask.bool() attn = AdditiveAttention(d, store_attention=True) out = attn(x, context, mask=mask, context_mask=context_mask) attention = attn.attn.attention # assert (bs, sl, d) == out.size() # assert attention.size() == (bs, attn.attn.n_heads, sl, csl) # zeros for masked keys and "don't cares" for masked queries plt.matshow(attention[0,0]);LSH Attention LSH attention from Reformer: [The Efficient Transformer](https://arxiv.org/abs/2001.04451). Based on [lucidrains/reformer-pytorch](https://github.com/lucidrains/reformer-pytorch/), but simpliefied and refactored. Uses shared keys and queries, but requires both to be passed as input (even though they are identical).#export class LSHAttention(Module): """ LSH attention module: """ def __init__( self, dropout = 0., # attention matrix dropout bucket_size = 64, # at least 64 suggested in trax n_hashes = 8, # papers sugests 8 causal = False, allow_duplicate_attention = False, # as in the paper attend_across_buckets = False, # as in the paper drop_for_hash_rate = 0.0, # unsure of default, not mentioned in paper return_attn = False, seed = None, # for reproducibility **kwargs): if dropout >= 1.0 or drop_for_hash_rate >=1.0: raise ValueError('Dropout rates must be lower than 1.') store_attr(but=['dropout', 'drop_for_hash_rate']) # fastcore - store attibutes self.dropout = nn.Dropout(dropout) self.dropout_for_hash = nn.Dropout(drop_for_hash_rate) self._cache = {} # cache buckets for reversible network, required to make Reformer work at depth @cache_method_decorator('_cache', 'buckets', reexecute=True) def hash_vectors(self, n_buckets, vecs): # 0. We need an even number of buckets: assert n_buckets % 2 == 0 # 1. account for the input shapes. vecs = [bs, sl, dim] batch_size, seqlen, dim = vecs.shape device = vecs.device rotations_shape = (dim, self.n_hashes, n_buckets // 2) # 2. Calculate hash bucket id via random rotations, concatenation and argmax # note: we copy rotations accross batch dimension (see exploration notebook for details). if self.seed is not None: torch.manual_seed(self.seed) random_rotations = repeat(torch.randn(rotations_shape,device=device), 'd nh nb -> bs d nh nb', bs=batch_size) dropped_vecs = self.dropout_for_hash(vecs) rotated_vecs = torch.einsum('bsd,bdhn->bhsn', dropped_vecs, # [bs, sl, dim] random_rotations) # [bs, dim, n_hashes, n_buckets//2] # rotated vecs: [bs, n_hashes, sl, n_buckets//2] rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1) # [bs, n_hashes, sl, n_buckets] buckets = torch.argmax(rotated_vecs, dim=-1) # [bs, n_hashes, sl] # 3. Next we add offsets so that bucket numbers from different hashing rounds don't overlap. # We also reshape the buckets so that each hash round is concatenated along the -1 dim offsets = torch.arange(self.n_hashes,device=device) # list of [0,1,2,..n_hashes-1] offsets = rearrange(offsets * n_buckets, 'nh -> 1 nh 1') # [1, n_hashes, 1] buckets = rearrange(buckets+offsets, 'bs nh sl -> bs (nh sl)') # [bs, (n_hashes*sl)] return buckets def forward(self, q, k, v, attn_mask = None, **kwargs): batch_size, seqlen, dim, device = *q.shape, q.device # caching is_reverse = kwargs.pop('_reverse', False) depth = kwargs.pop('_depth', None) # We will have an even number of buckets, and our attention chunks needs to fit completely within a seqlen assert seqlen % (self.bucket_size * 2) == 0, f'Sequence length ({seqlen}) needs to be divisible by target bucket size x 2 - {self.bucket_size * 2}' # get the hash buckets for our q,k input vectors n_buckets = seqlen // self.bucket_size buckets = self.hash_vectors(n_buckets, q, key_namespace=depth, fetch=is_reverse, set_cache=self.training) # We use the same vector as both a query and a key. assert int(buckets.shape[1]) == self.n_hashes * seqlen # Create an index that reflexts both bucket id and sequence id. This let's us sort q, k according # to both simultaneously. Repeated across the batch dimension. ticker = repeat(torch.arange((self.n_hashes * seqlen),device=device), 'l -> bs l', bs=batch_size) buckets_and_t = seqlen * buckets + (ticker % seqlen) buckets_and_t = buckets_and_t.detach() # [bs, seqlen*n_hashes] # Hash-based sort ("s" at the start of variable names means "sorted") sbuckets_and_t, sticker = sort_key_val(buckets_and_t, ticker, dim=-1) # [bs, seqlen*n_hashes] _, undo_sort = sticker.sort(dim=-1) # indexes to undo sortings del ticker sbuckets_and_t = sbuckets_and_t.detach() # no need to store gradiens for indexes sticker = sticker.detach() undo_sort = undo_sort.detach() st = (sticker % seqlen) # index of [0..seqlen-1] for each hash round sq = batched_index_select(q, st) # get the sorted q, [bs, seqlen*n_hashes, dim] sk = batched_index_select(k, st) # get the sorted k, [bs, seqlen*n_hashes, dim] sv = batched_index_select(v, st) # get the sorted v, [bs, seqlen*n_hashes, dim] # Reshape to include a n_chunks axis. n_chunks = self.n_hashes * n_buckets bq_t = bkv_t = rearrange(st, 'bs (n s) -> bs n s', n=n_chunks) # [bs, n_chunks, chunk_size] bq = rearrange(sq, 'bs (n s) d -> bs n s d', n=n_chunks) # [bs, n_chunks, chunk_size, dim] bk = rearrange(sk, 'bs (n s) d -> bs n s d', n=n_chunks) # [bs, n_chunks, chunk_size, dim] bv = rearrange(sv, 'bs (n s) d -> bs n s d', n=n_chunks) # [bs, n_chunks, chunk_size, dim] # Hashing operates on unit-length vectors. Unnormalized query vectors are # fine because they effectively provide a learnable temperature for the # attention softmax, but normalizing keys is needed so that similarity for # the purposes of attention correctly corresponds to hash locality. bk = F.normalize(bk, p=2, dim=-1) # Allow each chunk to attend within itself, and also one chunk back. Chunk # boundaries might occur in the middle of a sequence of items from the # same bucket, so this increases the chances of attending to relevant items. # Note: no look_back for queries bk = look_one_back(bk) # [bs, n_chunks, chunk_size*2, dim] bv = look_one_back(bv) # [bs, n_chunks, chunk_size*2, dim] bkv_t = look_one_back(bkv_t) # Dot-product attention. dots = torch.einsum('bnsd,bnzd->bnsz', bq, # [bs, n_chunks, chunk_size, dim] bk # [bs, n_chunks, chunk_size*2, dim] ) * (dim ** -0.5) # dots: [bs, n_chunks, chunk_size, chunk_size*2] masked_value = max_neg_value(dots) # Input mask for padding in variable lengthed sequences if attn_mask is not None: attn_mask = F.pad(attn_mask, (0, seqlen - attn_mask.shape[1]), value=True) mq = attn_mask.gather(1, st).reshape((batch_size, n_chunks, -1)) mkv = look_one_back(mq) mask = mq[:, :, :, None] * mkv[:, :, None, :] dots.masked_fill_(~mask, masked_value) del mask # Causal masking if self.causal: mask = bq_t[:, :, :, None] < bkv_t[:, :, None, :] dots.masked_fill_(mask, masked_value) del mask # Mask out attention to self except when no other targets are available. self_mask = bq_t[:, :, :, None] == bkv_t[:, :, None, :] dots.masked_fill_(self_mask, SELF_ATTN_MASK_VAL) del self_mask # Mask out attention to other hash buckets. if not self.attend_across_buckets: bq_buckets = bkv_buckets = torch.reshape(sbuckets_and_t // seqlen, (batch_size, n_chunks, -1)) bkv_buckets = look_one_back(bkv_buckets) bucket_mask = bq_buckets[:, :, :, None] != bkv_buckets[:, :, None, :] dots.masked_fill_(bucket_mask, masked_value) del bucket_mask # Don't double-count query-key pairs across multiple rounds of hashing. # There are two possible strategies here. (1) The default is to count how # many times a query-key pair is repeated, and to lower its log-prob # correspondingly at each repetition. if not self.allow_duplicate_attention: locs1 = undo_sort // bq_t.shape[-1] locs2 = (locs1 + 1) % n_chunks if not self.attend_across_buckets: locs1 = buckets * n_chunks + locs1 locs2 = buckets * n_chunks + locs2 locs = torch.cat([ torch.reshape(locs1, (batch_size, self.n_hashes, seqlen)), torch.reshape(locs2, (batch_size, self.n_hashes, seqlen)), ], 1).permute((0, 2, 1)) slocs = batched_index_select(locs, st) b_locs = torch.reshape(slocs, (batch_size, n_chunks, -1, 2 * self.n_hashes)) b_locs1 = b_locs[:, :, :, None, :self.n_hashes] bq_locs = b_locs1.expand(b_locs.shape[:3] + (2, self.n_hashes)) bq_locs = torch.reshape(bq_locs, b_locs.shape) bkv_locs = look_one_back(b_locs) dup_counts = (bq_locs[:, :, :, None, :] == bkv_locs[:, :, None, :, :]) # for memory considerations, chunk summation of last dimension for counting duplicates dup_counts = chunked_sum(dup_counts, chunks=(self.n_hashes * batch_size)) dup_counts = dup_counts.detach() assert dup_counts.shape == dots.shape dots = dots - torch.log(dup_counts + 1e-9) del dup_counts # Softmax. dots_logsumexp = torch.logsumexp(dots, dim=-1, keepdim=True) dots = torch.exp(dots - dots_logsumexp).type_as(dots) dropped_dots = self.dropout(dots) # calculate self-attention (attn * values) bo = torch.einsum('bnsz,bnzd->bnsd', dropped_dots, # [bs, n_chunks, chunk_size, chunk_size*2] bv) # [bs, n_chunks, chunk_size*2, dim] # bo: [bs, n_chunks, chunk_size, dim] # unchunk, unsort and reshape self-attention so = rearrange(bo, 'b n s d -> b (n s) d') # [bs, seqlen*n_hashes, dim] o = batched_index_select(so, undo_sort) # [bs, seqlen*n_hashes, dim] o = rearrange(o, 'b (nh sl) d -> b nh sl d', nh=self.n_hashes) # [bs, n_hashes, seqlen, dim] # unchunk, unsort and reshape logits slogits = rearrange(dots_logsumexp, 'bs n s 1 -> bs (n s)') # [bs, seqlen*n_hashes] logits = slogits.gather(1, undo_sort) # [bs, seqlen*n_hashes] logits = rearrange(logits, 'bs (nr sl) -> bs nr sl 1', nr=self.n_hashes) # [bs, n_hashes, seqlen, 1] # average probabilites across hash rounds (dim 1) and get weighted attention probs = torch.exp(logits - torch.logsumexp(logits, dim=1, keepdim=True)) # [bs, n_rounds, seqlen, 1] out = torch.sum(o * probs, dim=1) # [bs, seqlen, dim] # return unsorted attention weights - empty otherwise attn = torch.empty(0, device=device) if self.return_attn: attn_unsort = ((bq_t * seqlen)[:, :, :, None] + bkv_t[:, :, None, :]) attn_unsort = attn_unsort.view(batch_size * self.n_hashes, -1).long() unsorted_dots = torch.zeros(batch_size * self.n_hashes, seqlen * seqlen, device=device) unsorted_dots.scatter_add_(1, attn_unsort, dots.view_as(attn_unsort)) del attn_unsort unsorted_dots = unsorted_dots.reshape(batch_size, self.n_hashes, seqlen, seqlen) attn = torch.sum(unsorted_dots * probs, dim=1) # return output, attention matrix, and bucket distribution return out, attn, bucketsTest LSH-attention layer. Note: `d_model` is infered from input. Assumes shared key and query, but accepts both as input.bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) shared_proj = SharedQKAttnInProj(d) q, k, v = shared_proj(x) lsh_attn = LSHAttention() out, _, _ = lsh_attn(q, k, v) assert (bs, sl, d) == out.size() out.shape # test that seeting seed gives identical results lsh_attn = LSHAttention(seed=123) lsh_attn1 = LSHAttention(seed=123) assert all_equal(lsh_attn(q, k, v), lsh_attn1(q,k,v))LSH-self-attention Performs multihead `LSHAttention`#export class LSHSelfAttention(Module): def __init__(self, d_model, n_heads = 8, bucket_size = 64, # reccomended default from paper/lucid n_hashes = 8, # reccomended default from paper/lucid causal = False, bias:bool=False, attend_across_buckets = False, allow_duplicate_attention = False, # Penalize multiple qk-v pairs in same attention chunk or not return_attn = False, # Not implemented yet seed = None, # for reproducibility dropout = 0., # dropout for LSH-Attention attention matrix dropout_hash = 0., # dropout for hashing algorithm out_dropout = 0.): # a final dropout on output assert (d_model % n_heads) == 0, 'dimensions must be divisible by number of heads' store_attr('n_heads, bias') self.in_proj = SharedQKAttnInProj(d_model, bias=bias) self.out_proj = nn.Linear(d_model, d_model, bias=bias) self.lsh_attn = LSHAttention(bucket_size=bucket_size, n_hashes=n_hashes, causal=causal, attend_across_buckets = attend_across_buckets, allow_duplicate_attention = allow_duplicate_attention, return_attn = return_attn, dropout = dropout, dropout_hash = dropout_hash, seed=seed) self.out_dropout = nn.Dropout(out_dropout) self._init() def forward(self, x, context = None, mask = None, context_mask = None, **kwargs): device, dtype = x.device, x.dtype bs, sl, d_model = x.shape # project keys, queries and valuess q, k, v = self.in_proj(x, context) # [bs, sl(+csl), d_model] # split off head dimension for q, k and v. Resulting shapes are: [nh, bs, sl, dim_head] q, k, v = map(lambda t: rearrange(t, 'bs sl (nh dh) -> nh bs sl dh', nh=self.n_heads), (q, k, v)) #create masks: attn_mask = self._make_attn_mask(mask, context_mask, x, context) # run lsh per head (iterate through 0th dim i.e. the n_head dim), concatenate and rearrange lsh_results = L([self.lsh_attn(q_h, k_h, v_h, attn_mask, **kwargs) for q_h, k_h, v_h in zip(q, k, v)]) out = lsh_results.itemgot(0) # split tuple (output, attn, buckets) out = torch.cat([head for head in out], dim=0) # concatenate [n_heads*bs, sl, dh] out = rearrange(out, '(nh bs) sl dh -> bs sl (nh dh)', bs=bs) # [bs, sl, dim_heads] (dim_heads = head_dim * n_heads) # pass through final feed forward and maybe dropout out = self.out_proj(out) # [bs, sl, dim] return self.out_dropout(out) # Note: masks are reused per head and should be of size bs, sl def _make_attn_mask(self, mask, context_mask, x, context): if any(map(exists, (mask, context_mask))): context_lenght = context.shape[-2] if context is not None else 0 # context.shape[-2] is sl dim (0 if none) default_mask = torch.tensor([True], device=x.device) i_mask = default(mask, default_mask.expand(bs, sl)) c_mask = default(context_mask, default_mask.expand(bs, context_lenght)) attn_mask = torch.cat((i_mask, c_mask), dim=1) return attn_mask else: return None #attn_mask is None if both mask and context_mask are None def _init(self): [nn.init.xavier_uniform_(w) for w in self.parameters() if w.dim()>1] if self.bias: [nn.init.constant_(b, 0) for b in self.parameters() if b.dim()==1] bs = 4 sl = 128 d = 64 x = torch.randn(bs, sl, d) attn = LSHSelfAttention(d, seed=123, out_dropout=0.1, dropout=0.1, dropout_hash=0.1) assert all_equal(attn(x), attn(x)) out = attn(x) assert (bs, sl, d) == out.size() out.shapeTesting causal masking Note that unlike the testing for the standard transformer, we can't draw new vectors for our change input since this would impact the clustering of the vectors in the LSH-algorithm. If we instead scale by a constant factor, the angular based clustering is not affected, even though the values have changed.e_msg = "Causal masking error" attn = LSHSelfAttention(d, causal=True, dropout=0, seed=123) x1 = torch.randn(bs, sl, d) out1 = attn(x1) x2 = x1.clone() x2[:, sl//2:, :] = x2[:, sl//2:, :]*2 out2 = attn(x2) # all elements in first half are equal despite second half is defferent assert torch.allclose(out1[:, :sl//2], out2[:, :sl//2]), e_msg assert not (out1[:, sl//2:] == out2[:, sl//2:]).any(), e_msgTesting maskinge_msg = "Masking error" attn = LSHSelfAttention(d, causal=False, dropout=0, seed=123) x1 = torch.randn(bs, sl, d) mask = torch.ones(bs, sl) # mask out second half of input mask[:, sl//2:] = 0 mask = mask.bool() out1 = attn(x1, mask=mask) x2 = x1.clone() x2[:, sl//2:, :] = x2[:, sl//2:, :]*2 out2 = attn(x2, mask=mask) # all elements are equal in the first half assert all_equal(out1[:, :sl//2], out2[:, :sl//2]), e_msg # masked values does not affect result for the first half of the input out1 = attn(x1) out2 = attn(x2) assert not (out1[:, :sl//2] == out2[:, :sl//2]).any()Testing context maskinge_msg = "Context masking error" attn = LSHSelfAttention(d, causal=False, dropout=0, seed=123) x = torch.randn(bs, sl, d)Passing in context=x should not alter the result, as compared to no context:out0 = attn(x,) out1 = attn(x, context=x) assert all_equal(out0, out1)Mask second half of contextcontext = x.clone() # cloning x for context context_mask = torch.ones(bs, sl).bool() context_mask[:, sl//2:] = False out1 = attn(x, context, context_mask=context_mask) context2 = context.clone() context2[:, -1:, :] = context2[:, -1:, :]*2 # scaling to not affect clustering, relevant here? #context2[:, sl//2:, :] = torch.randn(bs, sl//2, d) # new random data out2 = attn(x, context2, context_mask=context_mask) out1[0], out2[0] (out1==out2).sum() # all elements are equal, masked values do not effect result #assert all_equal(out1, out2), e_msg # all output values are different for different context out1 = attn(x, context) out2 = attn(x, context2) #assert not (out1 == out2).any()Reformer Attention Reformer attention calculates multihead attention with shared keys and queries, and allows switching between full `Attention` or `LSHAttention` at creation, but not during inference or training.#export class ReformerAttention(Module): """ Reformer attention container. Switch between FullSharedQKAttention and LSHAttention. """ def __init__(self, d_model:int, n_heads:int = 8, causal:bool = False, mask:Tensor = None, dropout:float=0.1, out_dropout:float=None, bias:bool=False, store_attention:bool=False, lsh_attention:bool = True, n_hashes:int = 8, bucket_size:int = 64): store_attr('causal, mask, n_heads, bias, lsh_attention') out_dropout = ifnone(out_dropout, dropout) if lsh_attention: self.attn = LSHSelfAttention(d_model, n_heads = n_heads, bucket_size=bucket_size, n_hashes=n_hashes, causal=causal, dropout=dropout, return_attn=store_attention) else: self.attn = Attention(d_model, n_heads, causal=causal, shared_qk=True, dropout=dropout, store_attention=store_attention) self.dropout = nn.Dropout(out_dropout) self._init() def forward(self, x, context = None, mask = None, context_mask = None): out = self.attn(x, mask, context_mask) return self.dropout(out) def _init(self): [nn.init.xavier_uniform_(w) for w in self.parameters() if w.dim()>1] if self.bias: [nn.init.constant_(b, 0) for b in self.parameters() if b.dim()==1] bs = 4 sl = 128 d = 512 x = torch.randn(bs, sl, d) attn_lsh = ReformerAttention(d, lsh_attention=True) out = attn_lsh(x) assert (bs, sl, d) == out.size() out.shape attn_full = ReformerAttention(d, lsh_attention=False) out = attn_full(x) assert (bs, sl, d) == out.size() out.shapeThe state dicts of full and lsh attention are identical:[(k, v.shape) for k, v in attn_lsh.state_dict().items()] [(k, v.shape) for k, v in attn_full.state_dict().items()] #export class ReformerAttentionV2(Module): """ Reformer attention container. Take on making it switchable on the fly. Switch between FullSharedQKAttention and LSHAttention. """ def __init__(self, d_model:int, n_heads:int = 8, causal:bool = False, attn_mask:Tensor = None, dropout:float=0.1, out_dropout:float=None, bias:bool=False, store_attention:bool=False, use_lsh:bool = True, n_hashes:int = 8, bucket_size:int = 64, seed:int=None): store_attr('causal, attn_mask, n_heads, bias, use_lsh') out_dropout = ifnone(out_dropout, dropout) self.in_proj = SharedQKAttnInProj(d_model, bias=bias) self.lsh_attn = LSHAttention(bucket_size=bucket_size, n_hashes=n_hashes, causal=causal, return_attn=store_attention, dropout=dropout, seed=seed) self.full_attn = ScaledDotProdAttention(d_model, n_heads, causal=causal, dropout=dropout, shared_qk=True, store_attention=store_attention) self.out_proj = nn.Linear(d_model, d_model, bias=bias) self.dropout = nn.Dropout(out_dropout) self._init() def forward(self, x, context=None, mask=None, context_mask=None, **kwargs): #doesn't support cross attention for now? assert context is None, "sharedQK doesn't support cross attention yet" q, k, v = self.in_proj(x) # use LSH attn_mask = self._make_attn_mask(mask, context_mask, x, context) if self.use_lsh: bs = x.size(0) q, k, v = map(lambda t: rearrange(t, 'bs sl (nh dh) -> nh bs sl dh', nh=self.n_heads), (q, k, v)) # run lsh per head (iterate through 0th dim i.e. the n_head dim), concatenate and rearrange # Note: masks are reused per head lsh_results = L([self.lsh_attn(q_h, k_h, v_h, attn_mask, **kwargs) for q_h, k_h, v_h in zip(q, k, v)]) out = lsh_results.itemgot(0) # split tuple (output, attn, buckets) out = torch.cat([head for head in out], dim=0) # concatenate [n_heads*bs, sl, dh] out = rearrange(out, '(nh bs) sl dh -> bs sl (nh dh)', bs=bs) # [bs, sl, dim_heads] (dim_heads = head_dim * n_heads) # use full attention else: out = self.full_attn(q, k, v, attn_mask) out = self.out_proj(out) return self.dropout(out) def _init(self): [nn.init.xavier_uniform_(w) for w in self.parameters() if w.dim()>1] if self.bias: [nn.init.constant_(b, 0) for b in self.parameters() if b.dim()==1] #TODO: add attn_mask generation def _make_attn_mask(self, mask, context_mask, x, context): b, n, _, device = *x.size(), x.device if any(map(exists, (mask, context_mask))): q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) self_mask = q_mask[:, None, :, None] * q_mask[:, None, None, :] if exists(context): k_mask = default(context_mask, lambda: torch.ones((b, context.shape[-2]), device=device).bool()) cross_mask = q_mask[:, None, :, None] * k_mask[:, None, None, :] else: cross_mask = torch.empty(0, dtype=self_mask.dtype, device=device) return torch.cat([self_mask, cross_mask], dim=-1) else: return None #attn_mask is None if both mask and context_mask are NoneReformerAttentionV2 containes both `LSHAttention` and `ScaledDotProdAttention` and which one to use is determined by `self.lsh_attention` flag.Proposed TODOs:- [x] rename `self.lsh_attention` to `self.use_lsh` to avoid confusion with `self.lsh_attn` which is a module- [x] synchronize mask naming across all Attention modules: input_mask->attn_mask; minor renaming in LSH modules to make it consistent with `Attention`- [x] add masking support to ReformerAttentionV2- [ ] add masking tests- [ ] synchronize `store_attention` functionality- [x] test switchable attention module with synthetic taskbs = 4 sl = 128 d = 256 x = torch.randn(bs, sl, d) attn = ReformerAttentionV2(d, use_lsh=True) out = attn(x) assert (bs, sl, d) == out.size() out.shape # switch to full attention attn.use_lsh = False out = attn(x) assert (bs, sl, d) == out.size() out.shapeState dict remanes unchanged[(k, v.shape) for k, v in attn.state_dict().items()] #hide from nbdev.export import notebook2script; notebook2script()Converted 00_core.ipynb. Converted 01_layers.ipynb. Converted 02_attention.ipynb. Converted 03_transformer.ipynb. Converted 04_reformer.ipynb. Converted 05_tokenizers.ipynb. Converted 06_data.ipynb. Converted 07_metrics.ipynb. Converted 08_optimizers.ipynb. Converted 09_tracking.ipynb. Converted 10_experiment.synthetic-task.ipynb. Converted 10a_experiment.synthetic-task-comparison.ipynb. Converted 10b_experiment.synthetic-task-minimal.ipynb. Converted 10c_experiment.synthetic-task-analysis.ipynb. Converted 11a_experiment.enwik8_baseline.ipynb. Converted 11b_experiment.enwik8_sharedQK.ipynb. Converted 11c_experiment.enwik8_reversible.ipynb. Converted 12_experiment.speed-lsh_synthetic-task.ipynb. Converted 13_experiment.enwik8-n_hashes.ipynb. Converted 14_experiment.enwik8-n_layers.ipynb. Converted 20_experiment-script.ipynb. Converted 21_experiment-configs.ipynb. Converted 50_exploration.LSH.ipynb. Converted 51_exploration.LSH-report.ipynb. Converted 52_exploration.memory_and_timing.ipy[...]Testing Conductor Fields in Presence of MR Notebook provides examples of:- Setting up multiple, overlapping mesh refinement patches- Making manual lineout plots of field data- Making a manual plot of conductors on domain (see ConductorPlots for automatic functionality)- Use of lineout widget for field/potential plots Simulation DescriptionSimulation domain is x=120 nm by z=10,000 nm. The grid is included as a single post which is grid_z=100 nm by grid_x=12 nm. The conductor at the top of the grid is z=10 by x=12 nm, only this conductor influences the field solve, however, the whole grid structure will act as a particle sink, as will the cathode and anode. Grid Voltage = 20.0 VAnode Voltage = -0.5 VLineouts are plotted in representation of the simulation domain along with mesh refinement patches. Example line plots are shown for Ex(x) and Ez(z) on parent solver and child MR patches.% matplotlib notebook from __future__ import division import sys del sys.argv[1:] # Necessry to run 'from warp import *' in IPython notebook without conflict. from warp import * import numpy as np import matplotlib.pyplot as plt import os import pickle from re import findall from datetime import datetime from scipy.special import erfinv from warp.data_dumping.openpmd_diag import ParticleDiagnostic from rswarp.diagnostics import FieldDiagnostic from rswarp.diagnostics.plotting import FieldLineout from rswarp.utilities.file_utils import cleanupPrevious from warp.particles.singleparticle import TraceParticle # Plotting imports import matplotlib.animation as anim import matplotlib.lines as mlines import types import matplotlib as mpl from matplotlib import gridspec import matplotlib.patches as patches import h5py # Constants imports from scipy.constants import e, m_e, c, k kb_eV = 8.6173324e-5 # Bolztmann constant in eV/K kb_J = k # Boltzmann constant in J/K m = m_eDiagnostic DirectoriesdiagDir = 'diags/xzsolver/hdf5' field_base_path = 'diags/fields/' child0 = 'diags/fields/child0' diagFDir = {'magnetic':'diags/fields/magnetic','electric':'diags/fields/electric', 'child0': child0} # Cleanup previous files cleanupPrevious(diagDir,diagFDir)Grid Parameters Mesh Refinementmr_flag = True # Turn MR on/off level = 2 # Can set level = 2 for 2 patches, else 1 patch applied mesh_factor = 1 # Scaling for number of cells on parent mesh # GLOBAL USER PARAMETERS # Injection strategy top.inject = 0 # 0 no injection # 1 means constant; 2 means space-charge limited injection; 4 means thermionic; # 6 means user-specified; 5 and 8 are balances between thermionic and SC-limited # Voltages GRID_VOLTAGE = 20.0 ANODE_VOLTAGE = -0.5 # Cathode settings PLATE_SPACING = 10e-6 #plate spacing CHANNEL_WIDTH = 120e-9 #width of simulation box # Dimensions X_MAX = CHANNEL_WIDTH*0.5 X_MIN = -1.*X_MAX Y_MAX = CHANNEL_WIDTH*0.5 Y_MIN = -1.*Y_MAX Z_MIN = 0. Z_MAX = PLATE_SPACING # Grid parameters (Parent Grid) NUM_X = 12 * mesh_factor NUM_Y = 12 * mesh_factor NUM_Z = 1000 * mesh_factor # z step size dz = (Z_MAX - Z_MIN)/NUM_Z Z_PART_MIN = dz / 8. # starting particle z value top.dt = 2.5e-15Solver Geometryw3d.solvergeom = w3d.XZgeomGrid and Boundary Conditions# Set boundary conditions # Longitudinal conditions overriden by conducting plates w3d.bound0 = neumann w3d.boundnz = dirichlet w3d.boundxy = periodic # Particles boundary conditions top.pbound0 = absorb top.pboundnz = absorb top.pboundxy = periodic # Set grid boundaries w3d.xmmin = X_MIN w3d.xmmax = X_MAX w3d.zmmin = 0. w3d.zmmax = Z_MAX # Set grid counts w3d.nx = NUM_X w3d.nz = NUM_Z parent_zmesh = np.linspace(0,Z_MAX,NUM_Z+1) #holds the z-axis grid points in an arrayField Solver# Set up fieldsolver f3d.mgtol = 1e-6 # Multigrid solver convergence tolerance, in volts. 1 uV is default in Warp. if mr_flag: solverE = MRBlock2D() registersolver(solverE) solverE.mgverbose = -1 else: solverE = MultiGrid2D() registersolver(solverE) solverE.mgverbose = -1 # Don't print solver convergence informationMesh Refinement PatchesCurrent length of MR patch set to 2*column length. This is a bit arbritrary, could be reduced in length possibly.# Define grid parameters, will be needed to set MR patch column_width = 12e-9 column_length = 90e-9 grid_length = 10e-9 if mr_flag: guardx = 0 # No guard cells in x since it extends to mesh bounds guardz = 2 refinement_level = 10 child0 = solverE.addchild(mins=[w3d.xmmin, 0., PLATE_SPACING - 2 * column_length], maxs=[w3d.xmmax, 0., PLATE_SPACING], nguard=[guardx, 0, guardz],refinement=[refinement_level, 1, refinement_level]) child0.mgverbose = -1 # Need separate call to prevent child solver printout if level == 2: guardx = 0 # No guard cells in x since it extends to mesh bounds guardz = 2 refinement_level = 10 xmin_child2 = -2 child1 = child0.addchild(mins=[-20e-9, 0., PLATE_SPACING - 2 * column_length], maxs=[20e-9, 0., PLATE_SPACING], nguard=[guardx, 0, guardz],refinement=[refinement_level, 1, refinement_level]) child1.mgverbose = -1 # Need separate call to prevent child solver printoutConducting boundaries and Scrapers These must be installed after registering the field solver.# Create source conductors source = ZPlane(zcent=w3d.zmmin,zsign=-1.,voltage=0.) solverE.installconductor(source, dfill=largepos) # Create ground plate plate = ZPlane(voltage=ANODE_VOLTAGE, zcent=PLATE_SPACING) solverE.installconductor(plate, dfill=largepos) # Install Grid Support Column column = Box(column_width, 1., column_length, voltage=0., xcent=0., ycent=0.0, zcent=PLATE_SPACING - column_length / 2.) # Install Grid grid = Box(column_width, 10., grid_length, voltage=GRID_VOLTAGE, xcent=0., ycent=0.0, zcent=PLATE_SPACING - column_length - grid_length / 2.) solverE.installconductor(grid) # Setup the particle scraper scraper = ParticleScraper([source, plate, column, grid], lcollectlpdata=True) #print (PLATE_SPACING - 2 * column_length) * w3d.nz / (w3d.zmmax - w3d.zmmin) - guardzParticle, Lost Particle, and Field Diagnostics Not installing particle diagnstostics right now while testing MR fields.particleperiod = 100 particle_diagnostic_0 = ParticleDiagnostic(period = particleperiod, top = top, w3d = w3d, species = {species.name: species for species in listofallspecies}, comm_world=comm_world, lparallel_output=False, write_dir = diagDir[:-4]) fieldperiod = 100 efield_diagnostic_0 = FieldDiagnostic.ElectrostaticFields(solver=solverE, top=top, w3d=w3d, comm_world=comm_world, period=fieldperiod) installafterstep(efield_diagnostic_0.write) if mr_flag: efield_diagnostic_1 = FieldDiagnostic.ElectrostaticFields(solver=child0, top=top, w3d=w3d, write_dir='diags/fields/child0', comm_world=comm_world, period=fieldperiod) installafterstep(efield_diagnostic_1.write) if level == 2: efield_diagnostic_2 = FieldDiagnostic.ElectrostaticFields(solver=child1, top=top, w3d=w3d, write_dir='diags/fields/child1', comm_world=comm_world, period=fieldperiod) installafterstep(efield_diagnostic_2.write)Generate PIC code and Run Simulation# Prevent GIST from starting upon setup top.lprntpara = false top.lpsplots = false top.verbosity = 0 # Reduce solver verbosity package("w3d") generate() step(100)*** particle simulation package W3D runningAnalysisdef set_patch(artist, xcorner, ycorner, xlength, ylength, set_facecolor='grey',set_edgecolor='grey',set_alpha=1., scale=1): artist.add_patch( patches.Rectangle( (xcorner * scale, ycorner * scale), xlength * scale, ylength * scale, alpha=set_alpha, facecolor=set_facecolor, edgecolor=set_edgecolor ) )Set lineout positions# Linouts parallel to z x_positions = [-2e-08, 0.0, 4e-08] # Lineouts parallel to x z_positions = [9.9e-06, 9.91e-06, 9.92e-06]Convert position to array indexdef return_index(lbound, ubound, cells, position): """ Give the position of a node on a 1D mesh this function will return the corresponding index of that node in an array that holds the node positions. lbound: Lower bound of mesh domain. ubound: Upper bound of mesh domain. cells: Number of cells along axis in mesh domain. position: Position of mesh node to find corresponding index for. returns Integer """ index = (position - lbound) * cells / (ubound - lbound) return int(index)Plot positions where lineouts will be shownfig = plt.figure(figsize=(12,6)) # Set up plotting region gs = gridspec.GridSpec(1, 15) ax1 = plt.subplot(gs[0,0:8]) ax2 = plt.subplot(gs[0,8:12]) ax3 = plt.subplot(gs[0,12:13]) ax1.set_title("Simulation Region with Mesh Refinement") ax2.set_title("Zoomed in on\n Mesh Refinement Region") # Set plot scales and bounds scale = 1e6 ax1.set_xlim(Z_MIN * scale, Z_MAX * scale) ax1.set_ylim(X_MIN * scale, X_MAX * scale) ax1.set_xlabel('z ($\mu$m)') ax1.set_ylabel('x ($\mu$m)') ax2.set_xlim(9.80, 10.0) ax2.set_ylim(X_MIN * scale, X_MAX * scale) ax2.set_xlabel('z ($\mu$m)') ax2.set_yticklabels([]) xc = PLATE_SPACING - (column_length + grid_length) yc = -column_width / 2. # Add patches to represent conductors/dielectric regions set_patch(ax1, xc, yc, (column_length + grid_length), column_width, set_facecolor='grey',set_edgecolor='grey',set_alpha=1., scale=scale) set_patch(ax2, xc, yc, (column_length + grid_length), column_width, set_facecolor='grey',set_edgecolor='grey',set_alpha=1., scale=scale) xc1 = PLATE_SPACING - 2 * column_length yc1 = X_MIN set_patch(ax1, xc1, yc1, 2 * (column_length + grid_length), (X_MAX - X_MIN), set_facecolor='#2ca02c',set_edgecolor='#2ca02c',set_alpha=0.4, scale=scale) set_patch(ax2, xc1, yc1, 2 * (column_length + grid_length), (X_MAX - X_MIN), set_facecolor='#2ca02c',set_edgecolor='#2ca02c',set_alpha=0.4, scale=scale) xc2 = PLATE_SPACING - 2 * column_length - guardz * (w3d.zmmax / w3d.nz) yc2 = X_MIN set_patch(ax1, xc2, yc1, (xc1 - xc2), (X_MAX - X_MIN), set_facecolor='#1f77b4',set_edgecolor='#1f77b4',set_alpha=0.4, scale=scale) set_patch(ax2, xc2, yc1, (xc1 - xc2), (X_MAX - X_MIN), set_facecolor='#1f77b4',set_edgecolor='#1f77b4',set_alpha=0.4, scale=scale) set_patch(ax1, xc1, -20e-9, 2 * (column_length + grid_length), 40e-9, set_facecolor='#9467bd',set_edgecolor='#9467bd',set_alpha=0.6, scale=scale) set_patch(ax2, xc1, -20e-9, 2 * (column_length + grid_length), 40e-9, set_facecolor='#9467bd',set_edgecolor='#9467bd',set_alpha=0.6, scale=scale) # Plot lines on plot that represent where the lineout plots of the fields are coming from # Plot lines parallel to z for xpos, cx in zip(x_positions, ['b', 'r', 'g']): ax1.plot(solverE.zmesh * scale, np.ones_like(solverE.zmesh) * xpos * scale, c=cx) # Plot lines parallel to x for zpos, cz in zip(z_positions, ['b', 'r', 'g']): ax2.plot(np.ones_like(solverE.xmesh) * zpos * scale, solverE.xmesh * scale, c=cz) mr_0 = patches.Patch(color='#2ca02c', label='MR x10') mr_1 = patches.Patch(color='#9467bd', label='MR x100') guard1 = patches.Patch(color='#1f77b4', label='Guard Cells') grid = patches.Patch(color='grey', label='Post + Grid') e1 = mlines.Line2D([], [], color='b',label='Field Position #1') e2 = mlines.Line2D([], [], color='r',label='Field Position #2') e3 = mlines.Line2D([], [], color='g',label='Field Position #3') ax3.legend(handles=[mr_0, mr_1, guard1, grid, e1, e2, e3],fontsize=10,loc=6) ax3.axis('off') plt.tight_layout() plt.show()Above: User positions line along which they want to see the electric field data (E_x or E_z) or the potential. Load Field/Potential Diagnostic Data From Files# Load parent fields num_steps = '0' * (5 - len(str(top.it))) +str(top.it) fielddata_path = 'diags/fields/electric/data{}.h5'.format(num_steps) fielddata_file = os.path.splitext(os.path.split(fielddata_path)[1])[0] step_number = int(findall(r'\d+', fielddata_file)[0]) data_efield = h5py.File(fielddata_path, 'r') Ex = data_efield['data/%s/meshes/E' % (step_number)][0] Ey = data_efield['data/%s/meshes/E' % (step_number)][1] Ez = data_efield['data/%s/meshes/E' % (step_number)][2] phi = data_efield['data/%s/meshes/phi'% (step_number)] E = data_efield['data/%s/meshes/E' % (step_number)] # Load child fields num_steps = '0' * (5 - len(str(top.it))) +str(top.it) fielddata_path = 'diags/fields/child0/data{}.h5'.format(num_steps) fielddata_file = os.path.splitext(os.path.split(fielddata_path)[1])[0] step_number = int(findall(r'\d+', fielddata_file)[0]) data_efield = h5py.File(fielddata_path, 'r') child0_Ex = data_efield['data/%s/meshes/E' % (step_number)][0] child0_Ey = data_efield['data/%s/meshes/E' % (step_number)][1] child0_Ez = data_efield['data/%s/meshes/E' % (step_number)][2] child0_phi = data_efield['data/%s/meshes/phi'% (step_number)] child0_E = data_efield['data/%s/meshes/E' % (step_number)] # Load child fields num_steps = '0' * (5 - len(str(top.it))) +str(top.it) fielddata_path = 'diags/fields/child1/data{}.h5'.format(num_steps) fielddata_file = os.path.splitext(os.path.split(fielddata_path)[1])[0] step_number = int(findall(r'\d+', fielddata_file)[0]) data_efield = h5py.File(fielddata_path, 'r') child1_Ex = data_efield['data/%s/meshes/E' % (step_number)][0] child1_Ey = data_efield['data/%s/meshes/E' % (step_number)][1] child1_Ez = data_efield['data/%s/meshes/E' % (step_number)][2] child1_phi = data_efield['data/%s/meshes/phi'% (step_number)] child1_E = data_efield['data/%s/meshes/E' % (step_number)]Fields Plots of field data along the [Lineout Locations](locations)First plot $E_x$ field as a function of position along the x-axis at several locations along the z-axis.Second plot $E_z$ field as a function of position along the z-axis at several locations along the x-axis.fig1 = plt.figure(figsize=(12,6)) plt.xlabel("x (nm)") plt.ylabel("Ex (V/m)") plt.title("Comparison of $E_x$ on Parent and Child Grid") scale = 1e9 for zpos, c, cell in zip(z_positions, ['b', 'r', 'g'], [990, 991, 992]): index = return_index(solverE.zmmin, solverE.zmmax, np.size(solverE.zmesh), zpos) plt.plot(solverE.xmesh * scale, Ex[:, index], c=c, label='Parent Cell:' + '{}'.format(cell)) for zpos, c, cell in zip(z_positions, ['b', 'r', 'g'], [990, 991, 992]): index = return_index(child0.zmmin, child0.zmmax, np.size(child0.zmesh), zpos) plt.plot(child0.xmesh * scale, child0_Ex[:, index], '--',c=c, label='Parent Cell:' + '{}'.format(cell)) plt.legend(loc='best', fontsize=10) plt.show() fig = plt.figure(figsize=(12,6)) plt.xlabel("z ($\mu$m)") plt.ylabel("Ez (V/m)") plt.title("Comparison of $E_z$ on Parent and Child Grid") scale = 1e6 for xpos, c, cell in zip(x_positions, ['b', 'r', 'g'], [4, 6, 10]): index = return_index(solverE.xmmin, solverE.xmmax, np.size(solverE.xmesh), xpos) plt.plot(solverE.zmesh * scale, Ez[index, :], c=c, label='Parent Cell:' + '{}'.format(cell)) for xpos, c, cell in zip(x_positions, ['b', 'r', 'g'], [4, 6, 10]): index = return_index(child0.xmmin, child0.xmmax, np.size(child0.xmesh), xpos) plt.plot(child0.zmesh * scale, child0_Ez[index, :], '--',c=c, label='Parent Cell:' + '{}'.format(cell)) plt.xlim(9.80, 10.0) plt.legend(loc='best', fontsize=10) plt.show()Interactive Field Plots# Lineout Axis: Axis the line intersects # Lineout Intercept: Position the line intersects # Field data: Choose to view Ex, Ez, or Potential along the line plot1 = FieldLineout(solverE, E, phi) plot1()Lambda School Data Science*Unit 2, Sprint 3, Module 1*--- Define ML problemsYou will use your portfolio project dataset for all assignments this sprint. AssignmentComplete these tasks for your project, and document your decisions.- [ ] Choose your target. Which column in your tabular dataset will you predict?- [ ] Is your problem regression or classification?- [ ] How is your target distributed? - Classification: How many classes? Are the classes imbalanced? - Regression: Is the target right-skewed? If so, you may want to log transform the target.- [ ] Choose your evaluation metric(s). - Classification: Is your majority class frequency >= 50% and < 70% ? If so, you can just use accuracy if you want. Outside that range, accuracy could be misleading. What evaluation metric will you choose, in addition to or instead of accuracy? - Regression: Will you use mean absolute error, root mean squared error, R^2, or other regression metrics?- [ ] Choose which observations you will use to train, validate, and test your model. - Are some observations outliers? Will you exclude them? - Will you do a random split or a time-based split?- [ ] Begin to clean and explore your data.- [ ] Begin to choose which features, if any, to exclude. Would some features "leak" future information?If you haven't found a dataset yet, do that today. [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2) and choose your dataset.Some students worry, ***what if my model isn't “good”?*** Then, [produce a detailed tribute to your wrongness. That is science!](https://twitter.com/nathanwpyle/status/1176860147223867393)# Choose your target. Which column in your tabular dataset will you predict? df1 = pd.read_csv('vehicles_trimmed.csv') # Choose target target = 'price' # Is your problem regression or classification? # Regression, predicting a continuous variable with many levels # Target distribution sns.distplot(df1['price']) print(np.median(df1['price'])) y.describe() # Very right skewed but the 0s are holding it back even more. # Let's look at logs y = df1['price'] y_log = np.log1p(y) sns.distplot(y_log) # Chop out some outliers df2 = df1[(df1['price'] >= np.percentile(df1['price'], 10)) & (df['price'] <= np.percentile(df1['price'], 99.95))] df2['price'].describe() y = np.log1p(df2['price']) sns.distplot(y) y = y.replace(0, np.NaN) sns.distplot(y) # Now it's left skewed y.describe() # Choose your evaluation metric(s). # R^2 and perhaps MAE #Choose which observations you will use to train, validate, and test your model. train_orig, test = train_test_split(df2, test_size = 0.1, train_size = 0.9) test train, val = train_test_split(train_orig, train_size = 0.9, test_size = 0.1) train # Begin to clean and explore your data. # - [x] Begin to choose which features, if any, to exclude. Would some features "leak" future information? # Question about what to do with price distribution: should I keep closing in the quartiles until the lower hump is left out? # NaNs? Get rid of them?Here, you'll learn about the principles of tidy data and more importantly, why you should care about them and how they make subsequent data analysis more efficient. You'll gain first hand experience with reshaping and tidying your data using techniques such as pivoting and melting. Recognizing tidy data Reshaping your data using meltMelting data is the process of turning columns of your data into rows of data. Consider the DataFrames from the previous exercise. In the tidy DataFrame, the variables Ozone, Solar.R, Wind, and Temp each had their own column. If, however, you wanted these variables to be in rows instead, you could melt the DataFrame. In doing so, however, you would make the data untidy! This is important to keep in mind: Depending on how your data is represented, you will have to reshape it differently (e.g., this could make it easier to plot values).In this exercise, you will practice melting a DataFrame using pd.melt(). There are two parameters you should be aware of: id_vars and value_vars. The id_vars represent the columns of the data you do not want to melt (i.e., keep it in its current shape), while the value_vars represent the columns you do wish to melt into rows. By default, if no value_vars are provided, all columns not set in the id_vars will be melted. This could save a bit of typing, depending on the number of columns that need to be melted.Load the (tidy) DataFrame airquality and then your job is to melt its Ozone, Solar.R, Wind, and Temp columns into rows. Later in this chapter, you'll learn how to bring this melted DataFrame back into a tidy formfrom IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import numpy as np import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('airquality.csv') df.head() # Use pd.melt() to melt the Ozone, Solar.R, Wind, and Temp columns of airquality into rows. Do this by using id_vars to specify the columns you do not wish to melt: 'Month' and 'Day' df_melt = pd.melt(frame = df, id_vars = ['Month', 'Day']) df_melt.head() df.head() df_melt.head()Customizing melted dataWhen melting DataFrames, it would be better to have column names more meaningful than variable and value (the default names used by pd.melt()).The default names may work in certain situations, but it's best to always have data that is self explanatory.You can rename the variable column by specifying an argument to the var_name parameter, and the value column by specifying an argument to the value_name parameter. You will now practice doing exactly this. __Instruction__- Melt the columns of airquality with the defaultvariablecolumn renamed to'measurement'and the defaultvaluecolumn renamed to'reading'. You can do this by specifying, respectively, the `varname` and `valuename` parameters.df_melt = pd.melt(frame = df, id_vars = ['Month', 'Day'], var_name = 'measurement', value_name = 'reading') df_melt.head()Pivot dataPivoting data is the opposite of melting it. Remember the tidy form that the airquality DataFrame was in before you melted it? You'll now begin pivoting it back into that form using the .pivot_table() method!While melting takes a set of columns and turns it into a single column, pivoting will create a new column for each unique value in a specified column..pivot_table() has an index parameter which you can use to specify the columns that you don't want pivoted: It is similar to the id_vars parameter of pd.melt(). Two other parameters that you have to specify are columns (the name of the column you want to pivot), and values (the values to be used when the column is pivoted). - Pivot df_melt by using .pivot_table() with the rows indexed by 'Month' and 'Day', the columns indexed by 'measurement', and the values populated with 'reading'df_melt.head() df_pivot = df_melt.pivot_table(index = ['Month', 'Day'], columns = 'measurement', values = 'reading') df_pivot.head()Resetting the index of a DataFrameAfter pivoting airquality_melt in the previous exercise, you didn't quite get back the original DataFrame.What you got back instead was a pandas DataFrame with a hierarchical index (also known as a MultiIndex).There's a very simple method you can use to get back the original DataFrame from the pivoted DataFrame: .reset_index()# print index of df_pivot df_pivot.index # reset the index of df_pivot using it's reset_index() method df_pivot_reset = df_pivot.reset_index() df_pivot_reset.index df_pivot_reset.head()Pivoting duplicate valuesSo far, you've used the .pivot_table() method when there are multiple index values you want to hold constant during a pivot.Let's say your data collection method accidentally duplicated your dataset.You'll see that by using .pivot_table() and the aggfunc parameter, you can not only reshape your data, but also remove duplicates. Finally, you can then flatten the columns of the pivoted DataFrame using .reset_index()df_dup = pd.concat([df, df]) df_dup.columns df_dup df_dup = pd.concat([df, df], ignore_index=True) df_dup df_dup_melt = pd.melt(frame = df_dup, id_vars = ['Month', 'Day'], var_name = 'measurement', value_name = 'reading') df_melt.head() df_dup_pivot = df_dup_melt.pivot_table(index=['Month', 'Day'], columns='measurement', values='reading', aggfunc=np.mean) df_dup_pivot df_dup_pivot_reset = df_dup_pivot.reset_index() df_dup_pivot_reset.head()Beyond melt and pivot Splitting a column with .strload the dataset tb.csv, consisting of case counts of tuberculosis by country, year, gender, and age group into a dataframe named tbtb = pd.read_csv('tb.csv') tb.head()__In this exercise, you're going to tidy the `m014` column, which represents males aged 0-14 years of age. In order to parse this value, you need to extract the first letter into a new column for gender, and the rest into a column for age_group. Here, since you can parse values by position, you can take advantage of pandas' vectorized string slicing by using the str attribute of columns of type object.__ __Instructions:__- Melt tb keeping 'country' and 'year' fixed.- Create a 'gender' column by slicing the first letter of the variable column of tb_melt.- Create an 'age_group' column by slicing the rest of the variable column of tb_melt.# Melt tb tb_melt = pd.melt(frame = tb, id_vars = ['country', 'year']) tb_melt.head() # create the gender column tb_melt['gender'] = tb_melt.variable.str[0] # create an age-group column tb_melt['age_group'] = tb_melt.variable.str[1:] tb_melt.head()Splitting a column with .split() and .get()Another common way multiple variables are stored in columns is with a delimiter. You'll learn how to deal with such cases in this exercise, using a dataset consisting of Ebola cases and death counts by state and country.Print the columns of ebola in the IPython Shell using ebola.columns. Notice that the data has column names such as Cases_Guinea and Deaths_Guinea. Here, the underscore _ serves as a delimiter between the first part (cases or deaths), and the second part (country).This time, you cannot directly slice the variable by position as in the previous exercise. You now need to use Python's built-in string method called .split(). By default, this method will split a string into parts separated by a space. However, in this case you want it to split by an underscore. You can do this on Cases_Guinea, for example, using Cases_Guinea.split('_'), which returns the list ['Cases', 'Guinea'].The next challenge is to extract the first element of this list and assign it to a type variable, and the second element of the list to a country variable. You can accomplish this by accessing the str attribute of the column and using the .get() method to retrieve the 0 or 1 index, depending on the part you want. __Instructions__- Melt ebola using `Date` and `Day` as the id_vars, `type_country` as the var_name, and `counts` as the value_name.- Create a column called `str_split` by splitting the `type_country` column of ebola_melt on `'_'`. Note that you will first have to access the str attribute of type_country before you can use .split().- Create a column called `type` by using the `.get()` method to retrieve index 0 of the `str_split` column of ebola_melt.- Create a column called `country` by using the `.get()` method to retrieve index 1 of the `str_split` column of ebola_melt.ebola = pd.read_csv('ebola.csv') ebola.head() # Melt ebola ebola_melt = pd.melt(frame = ebola, id_vars = ['Date', 'Day'], var_name = 'type_country', value_name = 'counts') ebola_melt.head() # create the 'str_split' column ebola_melt['str_split'] = ebola_melt.type_country.str.split('_') ebola_melt.head(2) # create the 'type' column ebola_melt['type'] = ebola_melt.str_split.str.get(0) ebola_melt.head(2) # create the 'country' column ebola_melt['country'] = ebola_melt.str_split.str.get(1) ebola_melt.head(2)Data Dive Week 9: Decision TreesThis week we take a look at *decision trees*, our second type of classification model that brings deeper into the machine learning territory. We'll be using `scikit-learn` in today's exercise. ***![alt text](https://upload.wikimedia.org/wikipedia/commons/thumb/d/db/Titanic-Cobh-Harbour-1912.JPG/330px-Titanic-Cobh-Harbour-1912.JPG) This week we'll be illustrating how decision trees work using the Titanic survivor dataset available on [Kaggle](https://www.kaggle.com/c/titanic/data). We'll look at a create variety of variables to help us learn predict whether a given passenger on the Titanic was able to survive. There is a ton out on the web (including [here](https://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/)) about this dataset, as it's a popular among those just coming up to speed on machine learning classification models. Play around and use what you learn in class to join [the Kaggle competition](https://www.kaggle.com/c/titanic)!. *** Data Dictionary|Variable|Definition|Key|| --- | --- |:---|| survival | Survival | 0 = No, 1 = Yes || pclass | Ticket class | 1 = 1st, 2 = 2nd, 3 = 3rd || sex | Sex | | | Age | Age in years | | | sibsp | of siblings / spouses aboard the Titanic | | | parch | of parents / children aboard the Titanic | | | ticket | Ticket number | | | fare | Passenger fare | | | cabin | Cabin number| | | embarked | Port of Embarkation | C = Cherbourg (France), Q = Queenstown (Ireland), S = Southampton (England) |import numpy as np import pandas as pd import warnings import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, recall_score, precision_score, confusion_matrix from sklearn.model_selection import KFold, cross_val_score # Used for visualizing trees, but not strictly necessary from sklearn.externals.six import StringIO from IPython.display import Image from sklearn.tree import export_graphviz import pydotplus %matplotlib inline warnings.filterwarnings("ignore")Load and summarize datadf = pd.read_csv('https://grantmlong.com/data/titanic.csv') df.head() df.Survived.describe()Summarize survival by age.df.loc[(df.Survived==0), 'Age'].hist(bins=20, alpha=.6, color='red', figsize=[15, 5]) df.loc[(df.Survived==1), 'Age'].hist(bins=20, alpha=.6, color='blue')Summarize survival by sex.df[['Sex', 'Survived']].groupby('Sex').agg(['mean', 'count'])Find and Count Nullsdf.isna().sum()TODO: Summarize by Pclass, point of embarkment Data Cleaning and Feature EngineeringSadly `sci-kit learn` will only let use numeric or boolean variables to train our decision tree, so let's transform some of our variables to address that. * Create booleans for each of the Embarkment points.* Create a boolean for is_male. * Create a boolean for whether someone has a cabin. * **TODO, time permitting:** create identifiers for passengers in A, B, C, and D cabinsMoreover, some of our ages are missing, so let's enter the missing values as 100 for now.# Embarkment booleans for k in df.Embarked.unique(): if type(k)==str: df['emb_' + k] = (df.Embarked==k)*1 # Sex boolean df['is_male'] = (df.Sex=='male')*1 # Has cabin boolean df.loc[:, 'has_cabin'] = 0 df.loc[df.Cabin.isna(), 'has_cabin'] = 1 # Age fill df.loc[df.Age.isna(), 'Age'] = 100 print(list(df)) df.head()Let's assign a list of our clean and model ready features to a list so we can call them easily while training our model.features = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'emb_S', 'emb_C', 'emb_Q', 'is_male', 'has_cabin'] valid = df[features].notna().all(axis=1) print(len(df), sum(valid))Building a Decision TreeNow that we have variables in good shape, we can start modeling. Let's train a simple tree and see how it performs. Note: for the documentation on `DecisionTreeClassifier`, see [here](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html).dtree=DecisionTreeClassifier( criterion='entropy', random_state=20181105, max_depth=5, #min_samples_split=2, #min_samples_leaf=1, #max_features=None, #max_leaf_nodes=None, ) dtree.fit(df[features], df['Survived'])Visualize the tree. *Note: there's a strong chance this will not work if you do not have `graphviz` installed.* For more on visualizing decision trees see [here](https://chrisalbon.com/machine_learning/trees_and_forests/visualize_a_decision_tree/), and for more on installing graphviz see [here](https://graphviz.gitlab.io). To install `graphviz` on my Macbook Air, I used `brew install graphviz`.dot_data = StringIO() export_graphviz(dtree, out_file=dot_data, filled=True, rounded=True, feature_names=features, special_characters=True ) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) Image(graph.create_png())Calculate metrics from in-sample performancepred_survival = dtree.predict(df[features]) print(confusion_matrix(df.Survived, pred_survival), '\n') print('Accuracy: %0.3f' % accuracy_score(df.Survived, pred_survival)) print('Precision: %0.3f' % precision_score(df.Survived, pred_survival)) print('Recall: %0.3f' % recall_score(df.Survived, pred_survival))Wait, are nonlinear models actually doing better here? * Let's run a logistic regression to comparelogreg = LogisticRegression(random_state=20181105, solver='lbfgs') logreg.fit(df[features], df['Survived']) pred_survival = logreg.predict(df[features]) print(confusion_matrix(df.Survived, pred_survival), '\n') print('Accuracy: %0.3f' % accuracy_score(df.Survived, pred_survival)) print('Precision: %0.3f' % precision_score(df.Survived, pred_survival)) print('Recall: %0.3f' % recall_score(df.Survived, pred_survival))Selecting Hyperparameters with Cross Validation* First, we use the `KFold` function from `sci-kit learn` to generate five folds for cross validation. We can show the balance of the survivor rate among the different folds to get a better idea of what's going on.* Next, we train a different decision tree model against each of the folds and track our performance.* Finally, we track average cv metrics for different values of our hyperparameters.k_fold = KFold(n_splits=5, random_state=20181105) # Print the number of observations and survivor rate for for train_indices, test_indices in k_fold.split(df[features]): print('Train: n=%i, s_rate=%0.2f | test: n=%i, s_rate=%0.2f ' % (df.loc[train_indices, 'Survived'].count(), df.loc[train_indices, 'Survived'].mean(), df.loc[test_indices, 'Survived'].count(), df.loc[test_indices, 'Survived'].mean(), ) )Creating a function to fit our model and return relevant metrics makes it easy to track cross validation performance over different values of our parameters.def get_cv_results(classifier): results = [] for train, test in k_fold.split(df[features]): classifier.fit(df.loc[train, features], df.loc[train, 'Survived']) y_predicted = classifier.predict(df.loc[test, features]) accuracy = accuracy_score(df.loc[test, 'Survived'], y_predicted) results.append(accuracy) return np.mean(results), np.std(results)Let's track mean and variance of accuracy for different values of the minimum samples per split.hp_values = [2, 5, 7, 10, 15, 20, 50, 60, 70, 80, 90, 100, 120, 150] all_mu = [] all_sigma = [] for m in hp_values: dtree=DecisionTreeClassifier( criterion='entropy', random_state=20181105, min_samples_split=m, #max_depth=m, #min_samples_leaf=m, #max_features=m, #max_leaf_nodes=m, ) mu, sigma = get_cv_results(dtree) all_mu.append(mu) all_sigma.append(sigma) print(m, mu, sigma) plt.figure(figsize=(14, 5)) plt.plot(hp_values, all_mu) plt.ylabel('Cross Validation Accuracy') plt.xlabel('Minimum Samples Per Leaf') plt.figure(figsize=(14, 5)) plt.plot(hp_values, all_sigma) plt.ylabel('Cross Validation Std Dev.') plt.xlabel('Minimum Samples Per Leaf')Pretty cool, right? We can take a quick look again at how these results compare to logistic regression.* What do you make of these results?* Is this a better model? Why or why not?logreg = LogisticRegression(random_state=20181105, solver='lbfgs') get_cv_results(logreg)Selecting Our Model and Applying It to Our Test Set From this, it seems like `min_samples_split=70` might provide our best fit. We can train our best model using that value. We can then read in our holdout test set from the Kaggle competition to enter our predictions. We'll first double check and see if our model makes sense by taking a closer look at our predictions.dtree=DecisionTreeClassifier( criterion='entropy', random_state=20181105, min_samples_split=90, ) # Here we train our final model against all of our validation data. dtree.fit(df.loc[:, features], df.loc[:, 'Survived'])Read in our test data and apply the same transformations as our training set.test_df = pd.read_csv('https://grantmlong.com/data/titanic_test.csv') # Embarkment booleans for k in test_df.Embarked.unique(): if type(k)==str: test_df['emb_' + k] = (test_df.Embarked==k)*1 # Sex boolean test_df['is_male'] = (test_df.Sex=='male')*1 # Has cabin boolean test_df.loc[:, 'has_cabin'] = 0 test_df.loc[test_df.Cabin.isna(), 'has_cabin'] = 1 # Age fill test_df.loc[test_df.Age.isna(), 'Age'] = 100 # Fare fill test_df.loc[test_df.Fare.isna(), 'Fare'] = test_df.loc[test_df.Fare.notna(), 'Fare'].median() print(list(test_df)) test_df.head()Rank the most likely to survive according to our model.# Calculate the probability of test_probabilities = dtree.predict_proba(test_df[features])[:,1] test_df['survival_likelihood'] = test_probabilities readable_features = ['Name', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked', 'survival_likelihood'] # Find the rankings based on the probabilities probability_rankings = np.argsort(test_probabilities)Most Likely to Survive:test_df.loc[probability_rankings[-20:], readable_features]Most Likely to Die:test_df.loc[probability_rankings[:20], readable_features] Data Science and Business Analytics Intern @ TSF Task 5 : Exploratory Data Analysis : Sports (Indian Premier League)import numpy as np # numerical computing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt #visualization import seaborn as sns #modern visualization #load csv file df=pd.read_csv(r"E:\New folder (4)\matches.csv") df.head() df.shape #loading 2nd dataset df1 = pd.read_csv(r"E:\New folder (4)\deliveries.csv") df1.head() df1.shape df.describe() df1.describe() #merging two dataset merge = pd.merge(df1,df, left_on='match_id', right_on ='id') merge.head(2) merge.info() merge.describe() df.id.is_unique df.set_index('id', inplace=True) df.describe(include = 'all') df.head() df[df.city.isnull()][['city','venue']] #filling missing value df.city = df.city.fillna('Dubai') df[(df.umpire1.isnull()) | (df.umpire2.isnull())] df = df.drop('umpire3', axis = 1) #city has 33 distinct values while we have 35 venues. #Let's find out venues grouped by cities to see which cities have multiple venues city_venue = df.groupby(['city','venue']).count()['season'] city_venue_df = pd.DataFrame(city_venue) city_venue_df #Bengaluru and Bangalore both are in the data when they are same. So we need to keep one of them #Chandigarh and Mohali are same and there is just one stadium Punjab Cricket Association IS Bindra Stadium, Mohali whose value has not been entered correctly. We need to have either Chandigarh or Mohali as well as correct name of the stadium there #Mumbai has 3 stadiums/venues used for IPL #Pune has 2 venues for IPL #Number of matches played in each season plt.figure(figsize=(15,5)) sns.countplot('season', data = df) plt.title("Number of matches played each season",fontsize=18,fontweight="bold") plt.ylabel("Count", size = 25) plt.xlabel("Season", size = 25) plt.xticks(size = 20) plt.yticks(size = 20) #Venue which has hosted most number of IPL matches df.venue.value_counts().sort_values(ascending = True).tail(10).plot(kind = 'barh',figsize=(12,8), fontsize=15, color='red') plt.title("Venue which has hosted most number of IPL matches",fontsize=18,fontweight="bold") plt.ylabel("Venue", size = 25) plt.xlabel("Frequency", size = 25) plt.figure(figsize = (18,10)) sns.countplot(x='winner',data=df, palette='cool') plt.title("Numbers of matches won by team ",fontsize=20) plt.xticks(rotation=50) plt.xlabel("Teams",fontsize=15) plt.ylabel("No of wins",fontsize=15) plt.show() #creating a dataframe with season and winner columns winning_teams = df[['season','winner']] #dictionaries to get winners to each season winners_team = {} for i in sorted(winning_teams.season.unique()): winners_team[i] = winning_teams[winning_teams.season == i]['winner'].tail(1).values[0] winners_of_IPL = pd.Series(winners_team) winners_of_IPL = pd.DataFrame(winners_of_IPL, columns=['team']) winners_of_IPL['team'].value_counts().plot(kind = 'barh', figsize = (15,5), color = 'darkblue') plt.title("Winners of IPL across 11 seasons",fontsize=18,fontweight="bold") plt.ylabel("Teams", size = 25) plt.xlabel("Frequency", size = 25) plt.xticks(size = 15) plt.yticks(size = 15) # we will print winner season wise final_matches=df.drop_duplicates(subset=['season'], keep='last') final_matches[['season','winner']].reset_index(drop=True).sort_values('season') #Does teams choosed to bat or field first, after winning toss? df['toss_decision'].value_counts().plot(kind='pie', fontsize=14, autopct='%3.1f%%', figsize=(10,7), shadow=True, startangle=135, legend=True, cmap='Oranges') plt.ylabel('Toss Decision') plt.title('Decision taken by captains after winning tosses') #How toss decision affects match results? df['toss_win_game_win'] = np.where((df.toss_winner == df.winner),'Yes','No') plt.figure(figsize = (15,5)) sns.countplot('toss_win_game_win', data=df, hue = 'toss_decision') plt.title("How Toss Decision affects match result", fontsize=18,fontweight="bold") plt.xticks(size = 15) plt.yticks(size = 15) plt.xlabel("Winning Toss and winning match", fontsize = 25) plt.ylabel("Frequency", fontsize = 25) # we will plot graph on Numbers of matches won by Toss result plt.figure(figsize = (18,10)) sns.countplot('season',hue='toss_decision',data=df,palette='afmhot') plt.title("Numbers of matches won by Toss result ",fontsize=20) plt.xlabel("Season",fontsize=15) plt.ylabel("Count",fontsize=15) plt.show() #let's plot the top 10 run getter so far in IPL merge.groupby('batsman')['batsman_runs'].sum().sort_values(ascending = False).head(10).plot(kind = 'bar', color = 'red', figsize = (15,5)) plt.title("Top Run Getters of IPL", fontsize = 20, fontweight = 'bold') plt.xlabel("Batsmen", size = 25) plt.ylabel("Total Runs Scored", size = 25) plt.xticks(size = 12) plt.yticks(size = 12) MoM= df['player_of_match'].value_counts() MoM.head(10).plot(kind = 'bar',figsize=(12,8), fontsize=15, color='blue') plt.title("Top 10 players with most MoM awards",fontsize=18,fontweight="bold") plt.ylabel("Frequency", size = 25) plt.xlabel("Players", size = 25) #Which batsman has been most consistent among top 10 run getters? consistent_batsman = merge[merge.batsman.isin(['', '','','', '', '','', '', '', ''])][['batsman','season','total_runs']] consistent_batsman.groupby(['season','batsman'])['total_runs'].sum().unstack().plot(kind = 'box', figsize = (15,8)) plt.title("Most Consistent batsmen of IPL", fontsize = 20, fontweight = 'bold') plt.xlabel("Batsmen", size = 25) plt.ylabel("Total Runs Scored each season", size = 25) plt.xticks(size = 15) plt.yticks(size = 15) #Which bowlers have performed the best? merge.groupby('bowler')['player_dismissed'].count().sort_values(ascending = False).head(10).plot(kind = 'bar', color = 'purple', figsize = (15,5)) plt.title("Top Wicket Takers of IPL", fontsize = 20, fontweight = 'bold') plt.xlabel("Bowler", size = 25) plt.ylabel("Total Wickets Taken", size = 25) plt.xticks(size = 12) plt.yticks(size = 12) #We will consider players who have played 10 or more seasons no_of_balls = pd.DataFrame(merge.groupby('batsman')['ball'].count()) #total number of matches played by each batsman runs = pd.DataFrame(merge.groupby('batsman')['batsman_runs'].sum()) #total runs of each batsman seasons = pd.DataFrame(merge.groupby('batsman')['season'].nunique()) #season = 1 implies played only 1 season batsman_strike_rate = pd.DataFrame({'balls':no_of_balls['ball'],'run':runs['batsman_runs'],'season':seasons['season']}) batsman_strike_rate.reset_index(inplace = True) batsman_strike_rate['strike_rate'] = batsman_strike_rate['run']/batsman_strike_rate['balls']*100 highest_strike_rate = batsman_strike_rate[batsman_strike_rate.season.isin([10,11])][['season','batsman','strike_rate']].sort_values(by = 'strike_rate', ascending = False) highest_strike_rate.head(10) plt.figure(figsize = (15,6)) sns.barplot(x='batsman', y='strike_rate', data = highest_strike_rate.head(10), hue = 'season') plt.title("Highest strike rates in IPL",fontsize= 30, fontweight = 'bold') plt.xlabel("Player", size = 25) plt.ylabel("Strike Rate", size = 25) plt.xticks(size = 14) plt.yticks(size = 14) #strike_rate = balls bowled by wickets taken balls_bowled = pd.DataFrame(merge.groupby('bowler')['ball'].count()) wickets_taken = pd.DataFrame(merge[merge['dismissal_kind'] != 'no dismissal'].groupby('bowler')['dismissal_kind'].count()) seasons_played = pd.DataFrame(merge.groupby('bowler')['season'].nunique()) bowler_strike_rate = pd.DataFrame({'balls':balls_bowled['ball'],'wickets':wickets_taken['dismissal_kind'], 'season':seasons_played['season']}) bowler_strike_rate.reset_index(inplace = True) bowler_strike_rate['strike_rate'] = bowler_strike_rate['balls']/bowler_strike_rate['wickets'] def highlight_cols(s): color = 'skyblue' return 'background-color: %s' % color #Strike rate for bowlers who have taken more than 50 wickets best_bowling_strike_rate = bowler_strike_rate[bowler_strike_rate['wickets'] > 50].sort_values(by = 'strike_rate', ascending = True) best_bowling_strike_rate.head().style.applymap(highlight_cols, subset=pd.IndexSlice[:, ['bowler', 'wickets','strike_rate']])Connected components This notebook illustrates the search for [connected components](https://en.wikipedia.org/wiki/Component_(graph_theory)) in graphs.from IPython.display import SVG import numpy as np from sknetwork.data import karate_club, painters, movie_actor from sknetwork.topology import get_connected_components from sknetwork.visualization import svg_graph, svg_digraph, svg_bigraph from sknetwork.utils.format import bipartite2undirectedGraphsgraph = karate_club(metadata=True) adjacency = graph.adjacency position = graph.position # subgraph k = 15 adjacency = adjacency[:k][:,:k] position = position[:k] # connected components labels = get_connected_components(adjacency) image = svg_graph(adjacency, position, labels=labels) SVG(image)Directed graphsgraph = painters(metadata=True) adjacency = graph.adjacency names = graph.names position = graph.position # weak connected components labels = get_connected_components(adjacency) image = svg_digraph(adjacency, position=position, names=names, labels=labels) SVG(image) # strong connected components labels = get_connected_components(adjacency, connection='strong') image = svg_digraph(adjacency, position, names, labels) SVG(image)/Users/thomas/Documents/github/scikit-network/sknetwork/visualization/graphs.py:93: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison if name_position == 'left': /Users/thomas/Documents/github/scikit-network/sknetwork/visualization/graphs.py:98: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison elif name_position == 'right': /Users/thomas/Documents/github/scikit-network/sknetwork/visualization/graphs.py:109: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison if name_position == 'above': /Users/thomas/Documents/github/scikit-network/sknetwork/visualization/graphs.py:340: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison if position == 'left': /Users/thomas/Documents/github/scikit-n[...]Bipartite graphsgraph = movie_actor(metadata=True) biadjacency = graph.biadjacency names_row = graph.names_row names_col = graph.names_col # subgraph k = 5 biadjacency = biadjacency[k:] names_row = names_row[k:] adjacency = bipartite2undirected(biadjacency) labels = get_connected_components(adjacency) n_row, _ = biadjacency.shape labels_row = labels[:n_row] labels_col = labels[n_row:] image = svg_bigraph(biadjacency, names_row, names_col, labels_row, labels_col) SVG(image)Introduction to Machine Learning - CSE 474/574 A practical introduction to IPython Notebook Some notes about installation: Don't do this:```sudo apt-get install ipython-notebook``` Instead, do this:```pip install ipython tornado pyzmq```or install Anaconda from [http://store.continuum.io](http://store.continuum.io)You can start IPython notebook by running```ipython notebook --pylab inline``` Lets get started with some simple Bayesian analysisAdapted from [Probabilistic Programming and Bayesian Methods for Hackers](http://nbviewer.ipython.org/github/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Prologue/Prologue.ipynb) Task - Estimate the probability of getting a heads for a coin (should be 0.5 for an unbiased coin)- Frequentist Approach: Draw several samples and estimate the parameters that best explain the observation (maximum likelihood)- Bayesian Approach: Start with an initial estimate of the parameter (Prior) and combine it with the likelihood to get an estimate that combines prior and observations (Posterior)%matplotlib inline from IPython.core.pylabtools import figsize import numpy as np from matplotlib import pyplot as plt figsize(11, 9) import scipy.stats as stats dist = stats.beta # this is the prior distribution n_trials = [0, 1, 2, 3, 4, 5, 8, 15, 50, 500] # here we draw samples from a bernoulli distribution which models a coin tossing # note that in next line we are using 0.5 as the parameter for the Bernoulli distribution # which means that we are using a fair coin. However during estimation we do not assume knowledge # of the true parameter data = stats.bernoulli.rvs(0.5, size=n_trials[-1]) x = np.linspace(0, 1, 100) # For the already prepared, I'm using Binomial's conj. prior. # (more about this in a few weeks when we get to Probability based methods) for k, N in enumerate(n_trials): sx = plt.subplot(len(n_trials) / 2, 2, k + 1) # plt.setp(sx.get_yticklabels(), visible=False) heads = data[:N].sum() # choose only the first N samples in each iteration y = dist.pdf(x, 1 + heads, 1 + N - heads) # find the posterior distribution # note that in the above line we are computing the posterior distribution for the parameter # assuming that the prior distribution is uniform over [0,1] plt.plot(x, y, label="observe %d tosses,\n %d heads" % (N, heads)) plt.fill_between(x, 0, y, color="#348ABD", alpha=0.4) plt.vlines(0.5, 0, 4, color="k", linestyles="--", lw=1) leg = plt.legend() leg.get_frame().set_alpha(0.4) plt.autoscale(tight=True) plt.suptitle("Bayesian updating of posterior probabilities", y=1.02, fontsize=14) plt.tight_layout()Some basic linear algebra Our goal is to understand and, if possible, solve the system of $n$ linear equations$$\begin{align}a_{00}\,x_0 + a_{01}\,x_1 + \ldots + a_{0,n-1}\,x_{n-1} &= b_0 \\\a_{10}\,x_0 + a_{11}\,x_1 + \ldots + a_{1,n-1}\,x_{n-1} &= b_1 \\\\vdots & \\\a_{n-1,0}\,x_0 + a_{n-1,1}\,x_1 + \ldots + a_{n-1,n-1}\,x_{n-1} &= b_{n-1}\ .\end{align}$$In the system, the $a_{ij}$s and $b_i$s are known, while the $x_i$s are the unkown variables we wish to solve for. In other words, solving the system means finding the values for the $x_i$s using the $a_{ij}$s and $b_i$s. Using matrix notation, we can write the system as$$\begin{pmatrix}a_{00} & a_{01} & \ldots & a_{0,n-1} \\\a_{10} & a_{11} & \ldots & a_{1,n-1} \\\\vdots & & \ddots & \vdots \\\a_{n-1,0} & a_{n-1,1} & \ldots & a_{n-1,n-1}\end{pmatrix} \,\begin{pmatrix} x_0 \\\ x_1 \\\ \vdots \\\ x_{n-1}\end{pmatrix}=\begin{pmatrix} b_0 \\\ b_1 \\\ \vdots \\\ b_{n-1}\end{pmatrix}\ $$or $Ax = b$. In this form, a solution to the system is the vector $x$ that satisfies the equation.# a simple example import numpy as np import numpy.linalg as la A = np.matrix([[ 2, 3, 1], [0.5, 2, 0], [ -1, 5, -7]]) b = np.matrix([[10],[-3],[2]]) x = la.inv(A)*b print(x) # what happens with the following matrix A = np.array([[ 2, 3, 1], [0.5, 2, -1], [ -1, 5, -7]]) b = np.matrix([[10],[-3],[2]]) x = la.inv(A)*b print(x)Read datatraining_df = cudf.read_csv('../data/features/UNSW_NB15_training-set.csv') testing_df = cudf.read_csv('../data/features/UNSW_NB15_testing-set.csv') ### ADD IDENTIFIER training_df['test'] = 0 testing_df['test'] = 1 df = cudf.concat([training_df, testing_df]) df = df.drop(['id']).reset_index().rename({'index': 'id'}) df.head() to_keep = [ 'dur', 'proto', 'service', 'state', 'spkts', 'dpkts', 'sbytes', 'dbytes' , 'sttl', 'dttl', 'sload', 'dload', 'sloss', 'dloss' , 'sjit', 'djit', 'swin', 'stcpb', 'dtcpb', 'dwin', 'tcprtt', 'synack', 'ackdat' , 'ct_srv_src', 'ct_state_ttl', 'ct_src_dport_ltm', 'ct_dst_sport_ltm', 'ct_dst_src_ltm' , 'ct_srv_dst', 'attack_cat', 'label', 'test'] df = df[to_keep].reset_index(drop=True).reset_index() df = df.rename({'index': 'id'}) df['id'] = df['id'].astype('int32') df['attack_cat'] = df['attack_cat'].fillna('Normal') df['attack_cat'] = df['attack_cat'].str.replace(' ', '') df.describe()Encode variables Continuous to binsbin_num = 10 quantiles = cudf.DataFrame() for i in range(1, bin_num): quant = i / 10 print(f'Quantile: {quant}') quantiles['q' + str(i)] = df.quantile(q=quant) quantiles = quantiles.dropna().reset_index() quantiles_list = quantiles.to_pandas().to_dict('records') quantiles = [] for q in quantiles_list: if q['index'] not in ['id', 'label', 'test']: elements = list(q.items()) quants = sorted(list(set([0.0] + [e[1] for e in elements[1:]]))) quantiles.append((elements[0][1], quants)) def encode_quantiles(df, quantiles): temp_df = cudf.DataFrame() for q in quantiles: col_name = q[0] bins = q[1] if df[col_name].dtype == 'int64': bins = [int(e) for e in bins] temp_df[col_name + '_bin'] = df[col_name].digitize(np.array(bins)) return temp_df carry_over_cols = ['proto', 'service', 'state', 'attack_cat', 'label', 'test'] df_binned = encode_quantiles(df, quantiles) for col in carry_over_cols: df_binned[col] = df[col] df_binned = df_binned.reset_index().rename({'index': 'id'}) # del df df_binned.head()Categorical to indeximport cuml encoders = {} cols_to_encode = ['proto', 'service', 'state'] for col in cols_to_encode: le = cuml.preprocessing.LabelEncoder() df_binned[col] = df_binned[col].astype('category') df_binned[col + '_enc'] = le.fit_transform(df_binned[col]) encoders[col] = le cols_reordered = [c for c in df_binned.columns if c not in cols_to_encode + ['label']] + ['label'] df_binned = df_binned[cols_reordered] df_binned.columnsExplode to COO format### Attack_cat encoding attack_cat = [ (0, 'Normal') , (1, 'Reconnaissance') , (2, 'Shellcode') , (3, 'Analysis') , (4, 'Backdoor') , (5, 'DoS') , (6, 'Exploits') , (7, 'Generic') , (8, 'Fuzzers') , (9, 'Worms') ] attack_cat = { 'attack_id': [e[0] for e in attack_cat] , 'attack_cat': [e[1] for e in attack_cat] } attack_categories = cudf.DataFrame(attack_cat) df_binned = df_binned.merge(attack_categories, on='attack_cat') df_binned = df_binned.drop('attack_cat') df_binned.head() training_df = df_binned.query('test == 0') training_df = training_df.drop(['test', 'id']).reset_index().rename({'index': 'id'}) # training_df.head() df_exploded = cudf.melt(training_df, id_vars=['id', 'label', 'attack_id']).sort_values('id').reset_index(drop=True) df_exploded['variable'] = df_exploded['variable'].astype('str') df_exploded['value'] = df_exploded['value'].astype('str') df_exploded['feature'] = df_exploded['variable'] + '=' + df_exploded['value'] df_exploded.drop(['variable', 'value']) feature_encoding = df_exploded['feature'].unique().reset_index() len(df_binned), len(training_df) feature_encoding['index'] = feature_encoding['index'].astype('int16') feature_encoding = feature_encoding.rename({'index': 'feature_enc'}) feature_encoding.head() df_exploded = df_exploded.merge(feature_encoding, on='feature')[['id', 'feature_enc', 'label', 'attack_id']] df_exploded.head()Finding frequent patternsdef mine_patterns(df_coo, df_binned, min_attack_rate=0.75, min_feature_count=200, max_iter=-1): def return_rank(frate, counts, rank): for i, (f, c) in enumerate(zip(frate, counts)): rank[i] = math.log(float(c)) * f features = df_coo['feature_enc'].unique().to_frame() df_coo = df_coo.merge(features, on='feature_enc') #### FIND FREQUENT ITEMS freq_items = df_coo.groupby(['feature_enc']).agg({'id': 'count', 'label': 'sum'}).reset_index() freq_items['attack_rate'] = freq_items['label'] / freq_items['id'] freq_items = freq_items.apply_rows( return_rank , incols = {'label': 'counts', 'attack_rate': 'frate'} , outcols = {'rank': np.float64} , kwargs = {} ) freq_items = freq_items.sort_values('rank', ascending=False) freq_items.head(5) freq_items = freq_items.query(f'attack_rate >= {min_attack_rate} and label > {min_feature_count}') if max_iter == -1: max_iter = len(freq_items) devices_checked = {} patterns = [] stats = [] features_ordered = list(freq_items['feature_enc'].to_array()) for i in range(max_iter): feature = features_ordered[i] ### get all the ids ids = df_coo.query('feature_enc == @feature')['id'].unique().to_frame() h = ids.hash_columns(['id']).sum() if h not in devices_checked: count_ids = len(ids) devices_checked[h] = 1 ### OUTPUT PATTERN all_features = df_coo.merge(ids, on='id').groupby('feature_enc').agg({'label': 'count'}).query('label == @count_ids').reset_index() all_features['pattern_id'] = i patterns.append(all_features[['pattern_id', 'feature_enc']]) ### OUTPUT STATS ids = ids.merge(df_binned, on='id') ids['pattern_id'] = i ids = ids.groupby('pattern_id').agg({'id': 'count', 'label': 'sum'}) ids = ids.rename({'id': 'packet_count', 'label': 'attack_count'}) ids['attack_rate'] = ids['attack_count'] / ids['packet_count'] ids['feature_cnt'] = all_features['feature_enc'].count() ids = ids.reset_index() stats.append(ids) patterns = cudf.concat(patterns).merge(features, on='feature_enc').sort_values(by='pattern_id') stats = cudf.concat(stats) return patterns, statstime: 10.6 msTestingtesting_binned = df_binned.query('test == 1')time: 233 msPattern encodingdef encode_patterns(binned_df, patterns_to_encode): subset = binned_df[['id', 'label']] subset['pred'] = 0 for i, pattern in enumerate(patterns_to_encode): q = ' and '.join([' == '.join(e) for e in pattern]) temp = binned_df.query(q)['id'].to_frame() temp['enc'] = 1 subset = subset.merge(temp, on=['id'], how='left') subset = subset.rename({'enc': 'col_' + str(i)}) subset['col_' + str(i)] = subset['col_' + str(i)].fillna(0) subset['col_' + str(i)] = subset['col_' + str(i)].astype('float32') subset['pred'] = subset['pred'] + subset['col_'+str(i)] subset['pred'] = subset['pred'] > 0 subset['pred'] = subset['pred'].astype('int8') return subsettime: 1.86 msEnd-2-end pattern miningdef calculate_metrics(df): ttl = df['id'].sum() accuracy = df.query('(label == 0 and pred == 0) or (label == 1 and pred == 1)')['id'].sum() / ttl fp = df.query('(label == 0 and pred == 1)')['id'].sum() tn = df.query('(label == 0 and pred == 0)')['id'].sum() fn = df.query('(label == 1 and pred == 0)')['id'].sum() tp = df.query('(label == 1 and pred == 1)')['id'].sum() fpr = fp / (fp + tn) fnr = fn / (fn + tp) return fp, tn, fn, tp, accuracy, fpr, fnr, (fpr + fnr) / 2 def encode_patterns(binned_df, patterns_to_encode): subset = binned_df[['id', 'label']] subset['pred'] = 0 for i, pattern in enumerate(patterns_to_encode): q = ' and '.join([' == '.join(e) for e in pattern]) temp = binned_df.query(q)['id'].to_frame() temp['enc'] = 1 subset = subset.merge(temp, on=['id'], how='left') subset = subset.rename({'enc': 'col_' + str(i)}) subset['col_' + str(i)] = subset['col_' + str(i)].fillna(0) subset['col_' + str(i)] = subset['col_' + str(i)].astype('float32') subset['pred'] = subset['pred'] + subset['col_'+str(i)] subset['pred'] = subset['pred'] > 0 subset['pred'] = subset['pred'].astype('int8') return subset def e2e_pattern_mining(label, df, training_df, testing_df, feature_encoding, results, min_attack_rate=.5, min_feature_count=200): print('[{0}] Mining features...'.format(label)) patterns, stats = mine_patterns(df, training_df, min_attack_rate=min_attack_rate, min_feature_count=min_feature_count) patterns['label'] = label stats['label'] = label print('[{0}] Encoding patterns...'.format(label)) patterns_rec = patterns.merge(feature_encoding, on='feature_enc') patterns_rec['col_name'] = patterns_rec['feature'].str.split('=')[0] patterns_rec['col_val'] = patterns_rec['feature'].str.split('=')[1] patterns_rec = patterns_rec.sort_values(by='pattern_id').reset_index(drop=True) patterns_expl = patterns_rec[['pattern_id', 'col_name', 'col_val']].to_pandas().to_records() patterns_to_encode = [] curr = 0 temp = [] for i in patterns_expl: if curr == i[1]: temp.append((i[2], i[3])) else: patterns_to_encode.append(temp) temp = [(i[2], i[3])] curr = i[1] encoded_testing = encode_patterns(testing_df, patterns_to_encode) encoded_testing['id'] = encoded_testing['id'].astype('float32') #### Encoding training dataset encoded_df = encode_patterns(training_df, patterns_to_encode) X = encoded_df[['col_' + str(i) for i in range(len(patterns_to_encode))]] y = encoded_df['label'].astype('int32') #### Binned data X_binned = training_df[[col for col in training_df.columns if col not in ['id', 'label', 'attack_id']]] for col in X_binned: X_binned[col] = X_binned[col].astype('float32') y_binned = training_df['label'].astype('float32') X_testing_binned = testing_df[[col for col in testing_df.columns if col not in ['id', 'test', 'label', 'attack_id']]] for col in X_testing_binned: X_testing_binned[col] = X_testing_binned[col].astype('float32') y_testing_binned = testing_df[['id', 'label']] print('[{0}] Building models'.format(label)) ##################################### #### SIMPLE ENCODING ##################################### pred = encoded_testing.groupby(['label', 'pred']).agg({'id': 'count'}).reset_index() simple_results = calculate_metrics(pred) row_to_insert = { 'label': label , 'model': 'simple' , 'fp': simple_results[0] , 'tn': simple_results[1] , 'fn': simple_results[2] , 'tp': simple_results[3] , 'accuracy': simple_results[4] , 'fpr': simple_results[5] , 'fnr': simple_results[6] , 'far': simple_results[7] } simple_results_df = cudf.DataFrame(row_to_insert) ##################################### #### RANDOM FOREST ##################################### print('[{0}]\tRandom Forest'.format(label)) rf = cuRFC(max_features=1.0, n_estimators=100, n_bins=10) rf.fit(X, y) encoded_testing = encoded_testing.drop('pred') encoded_testing['pred'] = rf.predict(encoded_testing[['col_' + str(i) for i in range(len(patterns_to_encode))]]) rf_conf = encoded_testing.groupby(['label', 'pred']).agg({'id': 'count'}).reset_index() rf_results = calculate_metrics(rf_conf) row_to_insert = { 'label': label , 'model': 'random forest' , 'fp': rf_results[0] , 'tn': rf_results[1] , 'fn': rf_results[2] , 'tp': rf_results[3] , 'accuracy': rf_results[4] , 'fpr': rf_results[5] , 'fnr': rf_results[6] , 'far': rf_results[7] } rf_results_df = cudf.DataFrame(row_to_insert) ##################################### #### SUPPORT VECTOR MACHINES ##################################### print('[{0}]\tSupport Vector Machines'.format(label)) svc = SVC() svc.fit(X, y) encoded_testing = encoded_testing#.drop('pred') encoded_testing['pred'] = svc.predict(encoded_testing[['col_' + str(i) for i in range(len(patterns_to_encode))]]) svc_conf = encoded_testing.groupby(['label', 'pred']).agg({'id': 'count'}).reset_index() svc_results = calculate_metrics(svc_conf) row_to_insert = { 'label': label , 'model': 'SVC' , 'fp': svc_results[0] , 'tn': svc_results[1] , 'fn': svc_results[2] , 'tp': svc_results[3] , 'accuracy': svc_results[4] , 'fpr': svc_results[5] , 'fnr': svc_results[6] , 'far': svc_results[7] } svc_results_df = cudf.DataFrame(row_to_insert) results = cudf.concat([simple_results_df, rf_results_df, svc_results_df]) return patterns, stats, results results = cudf.DataFrame() patterns = cudf.DataFrame() stats = cudf.DataFrame() #### overall label p, s, r = e2e_pattern_mining('0/1 Overall Label', df_exploded, training_df, testing_binned, feature_encoding, results, min_attack_rate=.85) results = cudf.concat([results, r]) patterns = cudf.concat([patterns, p]) stats = cudf.concat([stats, s]) results def redefine_label(attack_id, new_label, attack_select): for i, ai in enumerate(attack_id): new_label[i] = 1 if ai == attack_select else 0 for i, ac in list(zip(attack_cat['attack_id'], attack_cat['attack_cat']))[1:]: training_df_new = training_df.apply_rows( redefine_label , incols = ['attack_id'] , outcols = {'new_label': np.int32} , kwargs = {'attack_select': i} ) testing_binned_new = testing_binned.apply_rows( redefine_label , incols = ['attack_id'] , outcols = {'new_label': np.int32} , kwargs = {'attack_select': i} ) training_df_new = training_df_new.drop('label').rename({'new_label': 'label'}) testing_binned_new = testing_binned_new.drop('label').rename({'new_label': 'label'}) p, s, r = e2e_pattern_mining(ac + ' (full)', df_exploded, training_df_new, testing_binned_new, feature_encoding, results, min_attack_rate=.85) results = cudf.concat([results, r]) patterns = cudf.concat([patterns, p]) stats = cudf.concat([stats, s]) # results for i, ac in list(zip(attack_cat['attack_id'], attack_cat['attack_cat']))[1:]: # print(i, ac) training_df_new = training_df.query('attack_id == 0 or attack_id == @i') testing_binned_new = testing_binned.query('attack_id == 0 or attack_id == @i') p, s, r = e2e_pattern_mining(ac + ' (limited)', df_exploded, training_df_new, testing_binned_new, feature_encoding, results, min_attack_rate=.85) results = cudf.concat([results, r]) patterns = cudf.concat([patterns, p]) stats = cudf.concat([stats, s]) results[Reconnaissance (limited)] Mining features... [Reconnaissance (limited)] Encoding patterns... [Reconnaissance (limited)] Building models [Reconnaissance (limited)] Random Forest [Reconnaissance (limited)] Support Vector Machines [Shellcode (limited)] Mining features... [Shellcode (limited)] Encoding patterns... [Shellcode (limited)] Building models [Shellcode (limited)] Random Forest [Shellcode (limited)] Support Vector Machines [Analysis (limited)] Mining features... [Analysis (limited)] Encoding patterns... [Analysis (limited)] Building models [Analysis (limited)] Random Forest [Analysis (limited)] Support Vector Machines [Backdoor (limited)] Mining features... [Backdoor (limited)] Encoding patterns... [Backdoor (limited)] Building models [Backdoor (limited)] Random Forest [Backdoor (limited)] Support Vector Machines [DoS (limited)] Mining features... [DoS (limited)] Encoding patterns... [DoS (limited)] Building models [DoS (limited)] Random Forest [DoS (limited)] Support Vector Mach[...]Results -- simple encodingdef to_list(x): return list(x) def ranges(bin_no, bins): if bin_no == len(bins): return '>{0:f}'.format(bins[-1]) else: return '<{0:,f}, {1:,f})'.format(bins[bin_no-1], bins[bin_no]) quantile_bins = pd.DataFrame(quantiles, columns=['feat', 'bins']) patterns_rec = patterns.merge(feature_encoding, on='feature_enc') patterns_host = patterns_rec[['pattern_id', 'feature', 'label']].to_pandas() patterns_host['feat'] = patterns_host.apply(lambda row: row['feature'].split('='), axis = 1) patterns_host['bin'] = patterns_host.apply(lambda row: row['feat'][1], axis = 1) patterns_host['feat'] = patterns_host.apply(lambda row: row['feat'][0][:-4], axis = 1) patterns_host = patterns_host.merge(quantile_bins, on=['feat']) patterns_host['ranges'] = patterns_host.apply(lambda row: ranges(int(row['bin']), row['bins']), axis=1) patterns_host['feature'] = patterns_host['feat'] + '=' + patterns_host['ranges'] patterns_host = patterns_host[['label', 'pattern_id', 'feature']].sort_values(by='pattern_id') patterns_host_agg = patterns_host.groupby(['label', 'feature']).agg({'pattern_id': to_list}).reset_index() patterns_host_agg['pattern_id'] = patterns_host_agg.apply(lambda row: ','.join([str(e) for e in row['pattern_id']]), axis = 1)# patterns_host_agg = patterns_host_agg.groupby(['label', 'pattern_id']).agg({'feature': to_list}).reset_index() # patterns_host_agg.to_dict('records') # quantile_bins statsLogistic Regression modelreg = LogisticRegression(fit_intercept=False, C=0.1) reg.fit(X, y) encoded_testing = encoded_testing.drop('pred') encoded_testing['pred'] = reg.predict(encoded_testing[['col_' + str(i) for i in range(len(patterns_to_encode))]]) results = encoded_testing.groupby(['label', 'pred']).agg({'id': 'count'}).reset_index() results calculate_metrics(results)Support Vector Machinesreg = SVC() reg.fit(X, y) encoded_testing = encoded_testing.drop('pred') encoded_testing['pred'] = reg.predict(encoded_testing[['col_' + str(i) for i in range(len(patterns_to_encode))]]) results = encoded_testing.groupby(['label', 'pred']).agg({'id': 'count'}).reset_index() results calculate_metrics(results)Binned features# training_df.head() X = training_df[[col for col in training_df.columns if col not in ['id', 'label', 'attack_id']]] for col in X: X[col] = X[col].astype('float32') y = training_df['label'].astype('float32') X_testing = testing_binned[[col for col in testing_binned.columns if col not in ['id', 'test', 'label', 'attack_id']]] y_testing = testing_binned[['id', 'label']] reg = SVC() reg.fit(X, y) X.columns for col in X_testing: X_testing[col] = X_testing[col].astype('float32') # X_testing.columns y_testing['pred'] = reg.predict(X_testing) results = y_testing.groupby(['label', 'pred']).agg({'id': 'count'}).reset_index() results calculate_metrics(results) encoded_df.groupby('col_0').agg({'id': 'count', 'label': 'sum'}) sub = df_binned.query('ct_dst_sport_ltm_bin == 4')[['id', 'label']] sub['pred'] = 1 sub['label'].sum(),sub['id'].count() # sub fff = df_binned.merge(sub, on='id', how='left')#.head() fff['pred'] = fff['pred'].fillna(0) fff.groupby('pred').agg({'id': 'count', 'label_x': 'sum'})Recursiondef factorial(x): # Exit condition if x == 1: return 1 return x * factorial(x - 1) print(f'Factorial of 5 is {factorial(5)}')Factorial of 5 is 120Implementationclass Node: ''' Helper class which implements a single tree node. ''' def __init__(self, feature=None, threshold=None, data_left=None, data_right=None, gain=None, value=None): self.feature = feature self.threshold = threshold self.data_left = data_left self.data_right = data_right self.gain = gain self.value = value class DecisionTree: ''' Class which implements a decision tree classifier algorithm. ''' def __init__(self, min_samples_split=2, max_depth=5): self.min_samples_split = min_samples_split self.max_depth = max_depth self.root = None @staticmethod def _entropy(s): ''' Helper function, calculates entropy from an array of integer values. :param s: list :return: float, entropy value ''' # Convert to integers to avoid runtime errors counts = np.bincount(np.array(s, dtype=np.int64)) # Probabilities of each class label percentages = counts / len(s) # Caclulate entropy entropy = 0 for pct in percentages: if pct > 0: entropy += pct * np.log2(pct) return -entropy def _information_gain(self, parent, left_child, right_child): ''' Helper function, calculates information gain from a parent and two child nodes. :param parent: list, the parent node:param left_child: list, left child of a parent :param right_child: list, right child of a parent :return: float, information gain ''' num_left = len(left_child) / len(parent) num_right = len(right_child) / len(parent) # One-liner which implements the previously discussed formula return self._entropy(parent) - (num_left * self._entropy(left_child) + num_right * self._entropy(right_child)) def _best_split(self, X, y): ''' Helper function, calculates the best split for given features and target :param X: np.array, features :param y: np.array or list, target :return: dict ''' best_split = {} best_info_gain = -1 n_rows, n_cols = X.shape # For every dataset feature for f_idx in range(n_cols): X_curr = X[:, f_idx] # For every unique value of that feature for threshold in np.unique(X_curr): # Construct a dataset and split it to the left and right parts # Left part includes records lower or equal to the threshold # Right part includes records higher than the threshold df = np.concatenate((X, y.reshape(1, -1).T), axis=1) df_left = np.array([row for row in df if row[f_idx] <= threshold]) df_right = np.array([row for row in df if row[f_idx] > threshold]) # Do the calculation only if there's data in both subsets if len(df_left) > 0 and len(df_right) > 0: # Obtain the value of the target variable for subsets y = df[:, -1] y_left = df_left[:, -1] y_right = df_right[:, -1] # Caclulate the information gain and save the split parameters # if the current split if better then the previous best gain = self._information_gain(y, y_left, y_right) if gain > best_info_gain: best_split = { 'feature_index': f_idx, 'threshold': threshold, 'df_left': df_left, 'df_right': df_right, 'gain': gain } best_info_gain = gain return best_split def _build(self, X, y, depth=0): ''' Helper recursive function, used to build a decision tree from the input data. :param X: np.array, features :param y: np.array or list, target :param depth: current depth of a tree, used as a stopping criteria :return: Node ''' n_rows, n_cols = X.shape # Check to see if a node should be leaf node if n_rows >= self.min_samples_split and depth <= self.max_depth: # Get the best split best = self._best_split(X, y) # If the split isn't pure if best['gain'] > 0: # Build a tree on the left left = self._build( X=best['df_left'][:, :-1], y=best['df_left'][:, -1], depth=depth + 1 ) right = self._build( X=best['df_right'][:, :-1], y=best['df_right'][:, -1], depth=depth + 1 ) return Node( feature=best['feature_index'], threshold=best['threshold'], data_left=left, data_right=right, gain=best['gain'] ) # Leaf node - value is the most common target value return Node( value=Counter(y).most_common(1)[0][0] ) def fit(self, X, y): ''' Function used to train a decision tree classifier model. :param X: np.array, features :param y: np.array or list, target :return: None ''' # Call a recursive function to build the tree self.root = self._build(X, y) def _predict(self, x, tree): ''' Helper recursive function, used to predict a single instance (tree traversal). :param x: single observation :param tree: built tree :return: float, predicted class ''' # Leaf node if tree.value != None: return tree.value feature_value = x[tree.feature] # Go to the left if feature_value <= tree.threshold: return self._predict(x=x, tree=tree.data_left) # Go to the right if feature_value > tree.threshold: return self._predict(x=x, tree=tree.data_right) def predict(self, X): ''' Function used to classify new instances. :param X: np.array, features :return: np.array, predicted classes ''' # Call the _predict() function for every observation return [self._predict(x, self.root) for x in X]Testingfrom sklearn.datasets import load_iris iris = load_iris() X = iris['data'] y = iris['target'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) model = DecisionTree() model.fit(X_train, y_train) preds = model.predict(X_test) np.array(preds, dtype=np.int64) y_test from sklearn.metrics import accuracy_score accuracy_score(y_test, preds)Comparison with Scikit-Learnfrom sklearn.tree import DecisionTreeClassifier sk_model = DecisionTreeClassifier() sk_model.fit(X_train, y_train) sk_preds = sk_model.predict(X_test) accuracy_score(y_test, sk_preds)Metalearner Utils> Metalearner Utils#hide from nbdev.showdoc import * #export # REFERENCE: https://github.com/uber/causalml # Copyright 2019 Uber Technology, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pandas as pd import numpy as np from packaging import version from xgboost import __version__ as xgboost_version def convert_pd_to_np(*args): output = [obj.to_numpy() if hasattr(obj, "to_numpy") else obj for obj in args] return output if len(output) > 1 else output[0] def check_treatment_vector(treatment, control_name=None): n_unique_treatments = np.unique(treatment).shape[0] assert n_unique_treatments > 1, \ 'Treatment vector must have at least two levels.' if control_name is not None: assert control_name in treatment, \ 'Control group level {} not found in treatment vector.'.format(control_name) def check_p_conditions(p, t_groups): eps = np.finfo(float).eps assert isinstance(p, (np.ndarray, pd.Series, dict)), \ 'p must be an np.ndarray, pd.Series, or dict type' if isinstance(p, (np.ndarray, pd.Series)): assert t_groups.shape[0] == 1, \ 'If p is passed as an np.ndarray, there must be only 1 unique non-control group in the treatment vector.' assert (0 + eps < p).all() and (p < 1 - eps).all(), \ 'The values of p should lie within the (0, 1) interval.' if isinstance(p, dict): for t_name in t_groups: assert (0 + eps < p[t_name]).all() and (p[t_name] < 1 - eps).all(), \ 'The values of p should lie within the (0, 1) interval.' def check_explain_conditions(method, models, X=None, treatment=None, y=None): valid_methods = ['gini', 'permutation', 'shapley'] assert method in valid_methods, 'Current supported methods: {}'.format(', '.join(valid_methods)) if method in ('gini', 'shapley'): conds = [hasattr(mod, "feature_importances_") for mod in models] assert all(conds), "Both models must have .feature_importances_ attribute if method = {}".format(method) if method in ('permutation', 'shapley'): assert all(arr is not None for arr in (X, treatment, y)), \ "X, treatment, and y must be provided if method = {}".format(method) def clean_xgboost_objective(objective): """ Translate objective to be compatible with loaded xgboost version Args ---- objective : string The objective to translate. Returns ------- The translated objective, or original if no translation was required. """ compat_before_v83 = {'reg:squarederror': 'reg:linear'} compat_v83_or_later = {'reg:linear': 'reg:squarederror'} if version.parse(xgboost_version) < version.parse('0.83'): if objective in compat_before_v83: objective = compat_before_v83[objective] else: if objective in compat_v83_or_later: objective = compat_v83_or_later[objective] return objective def get_xgboost_objective_metric(objective): """ Get the xgboost version-compatible objective and evaluation metric from a potentially version-incompatible input. Args ---- objective : string An xgboost objective that may be incompatible with the installed version. Returns ------- A tuple with the translated objective and evaluation metric. """ def clean_dict_keys(orig): return {clean_xgboost_objective(k): v for (k, v) in orig.items()} metric_mapping = clean_dict_keys({ 'rank:pairwise': 'auc', 'reg:squarederror': 'rmse', }) objective = clean_xgboost_objective(objective) assert (objective in metric_mapping), \ 'Effect learner objective must be one of: ' + ", ".join(metric_mapping) return objective, metric_mapping[objective] #export EPS = 1e-15 #export import logging import numpy as np from sklearn.metrics import mean_squared_error as mse from sklearn.metrics import mean_absolute_error as mae # noqa from sklearn.metrics import r2_score # noqa logger = logging.getLogger('causalnlp') def ape(y, p): """Absolute Percentage Error (APE). Args: y (float): target p (float): prediction Returns: e (float): APE """ assert np.abs(y) > EPS return np.abs(1 - p / y) def mape(y, p): """Mean Absolute Percentage Error (MAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): MAPE """ filt = np.abs(y) > EPS return np.mean(np.abs(1 - p[filt] / y[filt])) def smape(y, p): """Symmetric Mean Absolute Percentage Error (sMAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): sMAPE """ return 2. * np.mean(np.abs(y - p) / (np.abs(y) + np.abs(p))) def rmse(y, p): """Root Mean Squared Error (RMSE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): RMSE """ # check and get number of samples assert y.shape == p.shape return np.sqrt(mse(y, p)) def gini(y, p): """Normalized Gini Coefficient. Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): normalized Gini coefficient """ # check and get number of samples assert y.shape == p.shape n_samples = y.shape[0] # sort rows on prediction column # (from largest to smallest) arr = np.array([y, p]).transpose() true_order = arr[arr[:, 0].argsort()][::-1, 0] pred_order = arr[arr[:, 1].argsort()][::-1, 0] # get Lorenz curves l_true = np.cumsum(true_order) / np.sum(true_order) l_pred = np.cumsum(pred_order) / np.sum(pred_order) l_ones = np.linspace(1/n_samples, 1, n_samples) # get Gini coefficients (area between curves) g_true = np.sum(l_ones - l_true) g_pred = np.sum(l_ones - l_pred) # normalize to true Gini coefficient return g_pred / g_true def regression_metrics(y, p, w=None, metrics={'RMSE': rmse, 'sMAPE': smape, 'Gini': gini}): """Log metrics for regressors. Args: y (numpy.array): target p (numpy.array): prediction w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log metrics for the treatment and control group separately metrics (dict, optional): a dictionary of the metric names and functions """ assert metrics assert y.shape[0] == p.shape[0] for name, func in metrics.items(): if w is not None: assert y.shape[0] == w.shape[0] if w.dtype != bool: w = w == 1 logger.info('{:>8s} (Control): {:10.4f}'.format(name, func(y[~w], p[~w]))) logger.info('{:>8s} (Treatment): {:10.4f}'.format(name, func(y[w], p[w]))) else: logger.info('{:>8s}: {:10.4f}'.format(name, func(y, p))) #export import logging from sklearn.metrics import log_loss, roc_auc_score logger = logging.getLogger('causalml') def logloss(y, p): """Bounded log loss error. Args: y (numpy.array): target p (numpy.array): prediction Returns: bounded log loss error """ p[p < EPS] = EPS p[p > 1 - EPS] = 1 - EPS return log_loss(y, p) def classification_metrics(y, p, w=None, metrics={'AUC': roc_auc_score, 'Log Loss': logloss}): """Log metrics for classifiers. Args: y (numpy.array): target p (numpy.array): prediction w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log metrics for the treatment and control group separately metrics (dict, optional): a dictionary of the metric names and functions """ regression_metrics(y=y, p=p, w=w, metrics=metrics) #export import argparse import logging import sys import numpy as np import pandas as pd from sklearn.neighbors import NearestNeighbors from sklearn.preprocessing import StandardScaler from sklearn.utils import check_random_state logger = logging.getLogger('causalnlp') def smd(feature, treatment): """Calculate the standard mean difference (SMD) of a feature between the treatment and control groups. The definition is available at https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3144483/#s11title Args: feature (pandas.Series): a column of a feature to calculate SMD for treatment (pandas.Series): a column that indicate whether a row is in the treatment group or not Returns: (float): The SMD of the feature """ t = feature[treatment == 1] c = feature[treatment == 0] return (t.mean() - c.mean()) / np.sqrt(.5 * (t.var() + c.var())) def create_table_one(data, treatment_col, features): """Report balance in input features between the treatment and control groups. References: R's tableone at CRAN: https://github.com/kaz-yos/tableone Python's tableone at PyPi: https://github.com/tompollard/tableone Args: data (pandas.DataFrame): total or matched sample data treatment_col (str): the column name for the treatment features (list of str): the column names of features Returns: (pandas.DataFrame): A table with the means and standard deviations in the treatment and control groups, and the SMD between two groups for the features. """ t1 = pd.pivot_table(data[features + [treatment_col]], columns=treatment_col, aggfunc=[lambda x: '{:.2f} ({:.2f})'.format(x.mean(), x.std())]) t1.columns = t1.columns.droplevel(level=0) t1['SMD'] = data[features].apply( lambda x: smd(x, data[treatment_col]) ).round(4) n_row = pd.pivot_table(data[[features[0], treatment_col]], columns=treatment_col, aggfunc=['count']) n_row.columns = n_row.columns.droplevel(level=0) n_row['SMD'] = '' n_row.index = ['n'] t1 = pd.concat([n_row, t1], axis=0) t1.columns.name = '' t1.columns = ['Control', 'Treatment', 'SMD'] t1.index.name = 'Variable' return t1 class NearestNeighborMatch(object): """ Propensity score matching based on the nearest neighbor algorithm. Attributes: caliper (float): threshold to be considered as a match. replace (bool): whether to match with replacement or not ratio (int): ratio of control / treatment to be matched. used only if replace=True. shuffle (bool): whether to shuffle the treatment group data before matching random_state (numpy.random.RandomState or int): RandomState or an int seed """ def __init__(self, caliper=.2, replace=False, ratio=1, shuffle=True, random_state=None): """Initialize a propensity score matching model. Args: caliper (float): threshold to be considered as a match. replace (bool): whether to match with replacement or not shuffle (bool): whether to shuffle the treatment group data before matching or not random_state (numpy.random.RandomState or int): RandomState or an int seed """ self.caliper = caliper self.replace = replace self.ratio = ratio self.shuffle = shuffle self.random_state = check_random_state(random_state) def match(self, data, treatment_col, score_cols): """Find matches from the control group by matching on specified columns (propensity preferred). Args: data (pandas.DataFrame): total input data treatment_col (str): the column name for the treatment score_cols (list): list of column names for matching (propensity column should be included) Returns: (pandas.DataFrame): The subset of data consisting of matched treatment and control group data. """ assert type(score_cols) is list, 'score_cols must be a list' treatment = data.loc[data[treatment_col] == 1, score_cols] control = data.loc[data[treatment_col] == 0, score_cols] sdcal = self.caliper * np.std(data[score_cols].values) if self.replace: scaler = StandardScaler() scaler.fit(data[score_cols]) treatment_scaled = pd.DataFrame(scaler.transform(treatment), index=treatment.index) control_scaled = pd.DataFrame(scaler.transform(control), index=control.index) # SD is the same as caliper because we use a StandardScaler above sdcal = self.caliper matching_model = NearestNeighbors(n_neighbors=self.ratio) matching_model.fit(control_scaled) distances, indices = matching_model.kneighbors(treatment_scaled) # distances and indices are (n_obs, self.ratio) matrices. # To index easily, reshape distances, indices and treatment into # the (n_obs * self.ratio, 1) matrices and data frame. distances = distances.T.flatten() indices = indices.T.flatten() treatment_scaled = pd.concat([treatment_scaled] * self.ratio, axis=0) cond = (distances / np.sqrt(len(score_cols)) ) < sdcal # Deduplicate the indices of the treatment group t_idx_matched = np.unique(treatment_scaled.loc[cond].index) # XXX: Should we deduplicate the indices of the control group too? c_idx_matched = np.array(control_scaled.iloc[indices[cond]].index) else: assert len(score_cols) == 1, ( 'Matching on multiple columns is only supported using the ' 'replacement method (if matching on multiple columns, set ' 'replace=True).' ) # unpack score_cols for the single-variable matching case score_col = score_cols[0] if self.shuffle: t_indices = self.random_state.permutation(treatment.index) else: t_indices = treatment.index t_idx_matched = [] c_idx_matched = [] control['unmatched'] = True for t_idx in t_indices: dist = np.abs(control.loc[control.unmatched, score_col] - treatment.loc[t_idx, score_col]) c_idx_min = dist.idxmin() if dist[c_idx_min] <= sdcal: t_idx_matched.append(t_idx) c_idx_matched.append(c_idx_min) control.loc[c_idx_min, 'unmatched'] = False return data.loc[np.concatenate([np.array(t_idx_matched), np.array(c_idx_matched)])] def match_by_group(self, data, treatment_col, score_cols, groupby_col): """Find matches from the control group stratified by groupby_col, by matching on specified columns (propensity preferred). Args: data (pandas.DataFrame): total sample data treatment_col (str): the column name for the treatment score_cols (list): list of column names for matching (propensity column should be included) groupby_col (str): the column name to be used for stratification Returns: (pandas.DataFrame): The subset of data consisting of matched treatment and control group data. """ matched = data.groupby(groupby_col).apply( lambda x: self.match(data=x, treatment_col=treatment_col, score_cols=score_cols) ) return matched.reset_index(level=0, drop=True) class MatchOptimizer(object): def __init__(self, treatment_col='is_treatment', ps_col='pihat', user_col=None, matching_covariates=['pihat'], max_smd=0.1, max_deviation=0.1, caliper_range=(0.01, 0.5), max_pihat_range=(0.95, 0.999), max_iter_per_param=5, min_users_per_group=1000, smd_cols=['pihat'], dev_cols_transformations={'pihat': np.mean}, dev_factor=1., verbose=True): """Finds the set of parameters that gives the best matching result. Score = (number of features with SMD > max_smd) + (sum of deviations for important variables * deviation factor) The logic behind the scoring is that we are most concerned with minimizing the number of features where SMD is lower than a certain threshold (max_smd). However, we would also like the matched dataset not deviate too much from the original dataset, in terms of key variable(s), so that we still retain a similar userbase. Args: - treatment_col (str): name of the treatment column - ps_col (str): name of the propensity score column - max_smd (float): maximum acceptable SMD - max_deviation (float): maximum acceptable deviation for important variables - caliper_range (tuple): low and high bounds for caliper search range - max_pihat_range (tuple): low and high bounds for max pihat search range - max_iter_per_param (int): maximum number of search values per parameters - min_users_per_group (int): minimum number of users per group in matched set - smd_cols (list): score is more sensitive to these features exceeding max_smd - dev_factor (float): importance weight factor for dev_cols (e.g. dev_factor=1 means a 10% deviation leads to penalty of 1 in score) - dev_cols_transformations (dict): dict of transformations to be made on dev_cols - verbose (bool): boolean flag for printing statements Returns: The best matched dataset (pd.DataFrame) """ self.treatment_col = treatment_col self.ps_col = ps_col self.user_col = user_col self.matching_covariates = matching_covariates self.max_smd = max_smd self.max_deviation = max_deviation self.caliper_range = np.linspace(*caliper_range, num=max_iter_per_param) self.max_pihat_range = np.linspace(*max_pihat_range, num=max_iter_per_param) self.max_iter_per_param = max_iter_per_param self.min_users_per_group = min_users_per_group self.smd_cols = smd_cols self.dev_factor = dev_factor self.dev_cols_transformations = dev_cols_transformations self.best_params = {} self.best_score = 1e7 # ideal score is 0 self.verbose = verbose self.pass_all = False def single_match(self, score_cols, pihat_threshold, caliper): matcher = NearestNeighborMatch(caliper=caliper, replace=True) df_matched = matcher.match( data=self.df[self.df[self.ps_col] < pihat_threshold], treatment_col=self.treatment_col, score_cols=score_cols ) return df_matched def check_table_one(self, tableone, matched, score_cols, pihat_threshold, caliper): # check if better than past runs smd_values = np.abs(tableone[tableone.index != 'n']['SMD'].astype(float)) num_cols_over_smd = (smd_values >= self.max_smd).sum() self.cols_to_fix = smd_values[smd_values >= self.max_smd].sort_values(ascending=False).index.values if self.user_col is None: num_users_per_group = matched.reset_index().groupby(self.treatment_col)['index'].count().min() else: num_users_per_group = matched.groupby(self.treatment_col)[self.user_col].count().min() deviations = [np.abs(self.original_stats[col] / matched[matched[self.treatment_col] == 1][col].mean() - 1) for col in self.dev_cols_transformations.keys()] score = num_cols_over_smd score += len([col for col in self.smd_cols if smd_values.loc[col] >= self.max_smd]) score += np.sum([dev*10*self.dev_factor for dev in deviations]) # check if can be considered as best score if score < self.best_score and num_users_per_group > self.min_users_per_group: self.best_score = score self.best_params = {'score_cols': score_cols.copy(), 'pihat': pihat_threshold, 'caliper': caliper} self.best_matched = matched.copy() if self.verbose: logger.info('\tScore: {:.03f} (Best Score: {:.03f})\n'.format(score, self.best_score)) # check if passes all criteria self.pass_all = ((num_users_per_group > self.min_users_per_group) and (num_cols_over_smd == 0) and all(dev < self.max_deviation for dev in deviations)) def match_and_check(self, score_cols, pihat_threshold, caliper): if self.verbose: logger.info('Preparing match for: caliper={:.03f}, ' 'pihat_threshold={:.03f}, ' 'score_cols={}'.format(caliper, pihat_threshold, score_cols)) df_matched = self.single_match(score_cols=score_cols, pihat_threshold=pihat_threshold, caliper=caliper) tableone = create_table_one(df_matched, self.treatment_col, self.matching_covariates) self.check_table_one(tableone, df_matched, score_cols, pihat_threshold, caliper) def search_best_match(self, df): self.df = df self.original_stats = {} for col, trans in self.dev_cols_transformations.items(): self.original_stats[col] = trans(self.df[self.df[self.treatment_col] == 1][col]) # search best max pihat if self.verbose: logger.info('SEARCHING FOR BEST PIHAT') score_cols = [self.ps_col] caliper = self.caliper_range[-1] for pihat_threshold in self.max_pihat_range: self.match_and_check(score_cols, pihat_threshold, caliper) # search best score_cols if self.verbose: logger.info('SEARCHING FOR BEST SCORE_COLS') pihat_threshold = self.best_params['pihat'] caliper = self.caliper_range[int(self.caliper_range.shape[0]/2)] score_cols = [self.ps_col] while not self.pass_all: if len(self.cols_to_fix) == 0: break elif np.intersect1d(self.cols_to_fix, score_cols).shape[0] > 0: break else: score_cols.append(self.cols_to_fix[0]) self.match_and_check(score_cols, pihat_threshold, caliper) # search best caliper if self.verbose: logger.info('SEARCHING FOR BEST CALIPER') score_cols = self.best_params['score_cols'] pihat_threshold = self.best_params['pihat'] for caliper in self.caliper_range: self.match_and_check(score_cols, pihat_threshold, caliper) # summarize if self.verbose: logger.info('\n-----\nBest params are:\n{}'.format(self.best_params)) return self.best_matched #hide from nbdev.export import notebook2script; notebook2script()Converted 00_causalinference.ipynb. Converted 01_autocoder.ipynb. Converted 02_analyzers.ipynb. Converted 03_key_driver_analysis.ipynb. Converted 04_preprocessing.ipynb. Converted 05a_meta.base.ipynb. Converted 05b_meta.explainer.ipynb. Converted 05c_meta.utils.ipynb. Converted 05d_meta.propensity.ipynb. Converted 05e_meta.tlearner.ipynb. Converted 05f_meta.slearner.ipynb. Converted 05g_meta.xlearner.ipynb. Converted 05h_meta.rlearner.ipynb. Converted 99_examples.ipynb. Converted index.ipynb.Entropy and Persistent Homology © []() Industrial and Enterprise Systems Engineering, The Grainger College of Engineering, UIUC [Reference](https://towardsdatascience.com/how-to-pull-data-from-an-api-using-python-requests-edcc8d6441b1) Part 1: Download Data from INaturalist website using APIFrom API documentation,we get the following information:`Please note that we throttle API usage to a max of 100 requests per minute, though we ask that you try to keep it to 60 requests per minute or lower, and to keep under 10,000 requests per day. If we notice usage that has serious impact on our performance we may institute blocks without notification.``per_pageAllowed values: 1 to 200`Locations data collected from:* place_id: 1563* place_id: 49906# importing required libraries import requests import math import time import pandas as pd from datetime import datetime import os # Data for storing all the pulled data data = { 'time_observed_at': list(), 'species_guess': list(), 'genus_name': list(), 'rank': list(), 'wikipedia_url': list(), 'iconic_taxon_name': list(), 'preferred_common_name': list(), 'uri': list(), 'longitude': list(), 'latitude': list(), 'place_guess': list() }Part 1.1. Function for getting the dictionary of pages and page_numbersFor each page, there is a different per_page limit. This was not mentioned in the api documentation of the inaturalist website.# pages = {} # place_id = 1563 # page_number = 1 # per_page = 200 #max_limit # i = 1 # sleep_time = 60 # while True: # while per_page != 0: # #go through each page and per_page, get the request's response, if it is 200, append page_number:per_page to the dictionary. # #if for any per_page, we get status_code != 200, we decrement the per_page by 1, and check response. # response = requests.get("https://api.inaturalist.org/v1/observations?place_id={}&page={}&per_page={}".format(place_id, page_number, per_page)) # if i%50 == 0: #We have a limit of 60 pages per minute, for pulling data from API # time.sleep(sleep_time) # if response.status_code == 200: # pages[page_number] = per_page # page_number +=1 # elif response.status_code != 200: # per_page -= 1 # i += 1 # break def findPerPage(place_id, page_number, per_page, how_many_pages): pages = {} i = 1 sleep_time = 60 while True: while per_page != 0: #go through each page and per_page, get the request's response, # if it is 200, append page_number:per_page to the dictionary. # if for any per_page, we get status_code != 200, we decrement the per_page by 1, and check response. response = requests.get("https://api.inaturalist.org/v1/observations?place_id={}&page={}&per_page={}".format(place_id, page_number, per_page)) if i%50 == 0: #We have a limit of 60 pages per minute, for pulling data from API time.sleep(sleep_time) if response.status_code == 200: pages[page_number] = per_page page_number +=1 elif response.status_code != 200: per_page -= 1 i += 1 # if how_many_pages >= 200: # return pages break return pages def download_csv_pages(pages): pages_Series = pd.Series(pages) #pages_Series.to_csv('pages.csv') pages_df = pd.DataFrame(pages_Series, columns=['page_number', 'per_page']) pages_df.to_csv('pages.csv') def download_pickle_pages(pages): pages_Series = pd.Series(pages) #pages_Series.to_csv('pages.csv') pages_df = pd.DataFrame(pages_Series, columns=['page_number', 'per_page']) pages_df.to_pickle('pages.pkl') # pages_Series = pd.Series(pages) # pages_Series.to_csv('pages.csv') # pages_df = pd.read_csv('pages.csv', names=['page_number', 'per_page']) # pages_df.info() # pages_df.to_csv('pages.csv') # pages_df.to_pickle('pages.pkl')Part 1.2. Pull data# Function for pulling the data def pull_data(data, place_id, sleep_time, page_number, per_page): ''' place_id: each location has a place_id, which we can get from the website sleep_time: time in seconds we want to sleep page_number: depends on the number of observations. total_observations/per_page per_page: #observations per page ''' if page_number%40 == 0: time.sleep(sleep_time) else: response = requests.get("https://api.inaturalist.org/v1/observations?place_id={}&page={}&per_page={}".format(place_id, page_number, per_page)) file_dict = response.json() if ('results' in file_dict): for j in range(per_page): if 'time_observed_at' in file_dict['results'][j]: data['time_observed_at'].append(file_dict['results'][j]['time_observed_at']) else: data['time_observed_at'].append(None) if 'species_guess' in file_dict['results'][j]: data['species_guess'].append(file_dict['results'][j]['species_guess']) else: data['species_guess'].append(None) if ('taxon' in file_dict['results'][j]) & (file_dict['results'][j]['taxon'] is not None) : #print(j) if 'name' in file_dict['results'][j]['taxon']: data['genus_name'].append(file_dict['results'][j]['taxon']['name']) else: data['genus_name'].append(None) if 'rank' in file_dict['results'][j]['taxon']: data['rank'].append(file_dict['results'][j]['taxon']['rank']) else: data['rank'].append(None) if 'wikipedia_url' in file_dict['results'][j]['taxon']: data['wikipedia_url'].append(file_dict['results'][j]['taxon']['wikipedia_url']) else: data['wikipedia_url'].append(None) if 'iconic_taxon_name' in file_dict['results'][j]['taxon']: data['iconic_taxon_name'].append(file_dict['results'][j]['taxon']['iconic_taxon_name']) else: data['iconic_taxon_name'].append(None) #print(j) if 'preferred_common_name' in file_dict['results'][j]['taxon']: data['preferred_common_name'].append(file_dict['results'][j]['taxon']['preferred_common_name']) else: data['preferred_common_name'].append(None) else: data['genus_name'].append(None) data['rank'].append(None) data['wikipedia_url'].append(None) data['iconic_taxon_name'].append(None) data['preferred_common_name'].append(None) if 'uri' in file_dict['results'][j]: data['uri'].append(file_dict['results'][j]['uri']) else: data['uri'].append(None) if 'geojson' in file_dict['results'][j]: data['longitude'].append(file_dict['results'][j]['geojson']['coordinates'][0]) data['latitude'].append(file_dict['results'][j]['geojson']['coordinates'][1]) else: data['longitude'].append(None) data['latitude'].append(None) if 'place_guess' in file_dict['results'][j]: data['place_guess'].append(file_dict['results'][j]['place_guess']) else: data['place_guess'].append(None) else: print(page_number)`pages.csv' contains the ```python{'page_number': 'per_page'}```dictionary. For each page, we have a maximum limit on the number of observations that one can pull.pages = pd.read_csv('pages.csv') # Uncomment for pulling data. Change place_id # for i in range(1, len(pages)): # pull_data(data, 1563, 60, int(pages['page_number'][i]), int(pages['per_page'][i])) # data_df = pd.DataFrame(data) # data_df.info() # place_id = 1563 # csv_file = 'data_' + str(place_id) + '_' + str(datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) + '.csv' # pickle_file = 'data_' + str(place_id) + '_' + str(datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) + '.pkl' # data_df.to_csv(csv_file) # data_df.to_pickle(pickle_file) # data_df.info() def getData(pages_df, data, place_id): for i in range(1, len(pages)): pull_data(data, place_id, 60, int(pages_df['page_number'][i]), int(pages_df['per_page'][i])) data_df = pd.DataFrame(data) return data_df def download_csv_data(data, place_id, ): csv_file = 'data_' + str(place_id) + '_' + str(datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) + '.csv' pickle_file = 'data_' + str(place_id) + '_' + str(datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) + '.pkl' data_df.to_csv(csv_file) data_df.to_pickle(pickle_file)***Use pickle instead of csv***data_df = pd.read_csv('data_1563_2021_06_24_12_57_28.csv', index_col= 0) # data_df.to_pickle('data_1563_2021_06_06_20_47_20.pickle') # data_df = pd.read_pickle('data_1563_2021_06_06_20_47_20.pickle') data_df.info() data_df.dropna(subset = ['wikipedia_url'], inplace = True) data_df.info() data_df.reset_index(inplace=True, drop = True) data_df.head(15)Part 2. Get taxonomy data from the rows Search for a term in wikipedia search and get its pageUncomment for looking at results. I didn't find it useful.# import wikipedia # result = wikipedia.search("Monarda_fistulosa") # print(result) # # get the page: Neural network # page = wikipedia.page(result[0]) # print(page) # # get the title of the page # title = page.title # print(title) # # get the categories of the page # categories = page.categories # print(categories) # # get the whole wikipedia page text (content) # content = page.content # print(content) # # get all the links in the page # links = page.links # print(links) # # get the page references # references = page.references # print(references) # # summary # summary = page.summary # print(summary)Part 2.1. Scraping data using beautiful soup# For each row, if wikipedia_uri exists, we will use beautiful soup to extract the data # related to taxonomy # import required modules import requests from bs4 import BeautifulSoup # get URL page = requests.get("http://en.wikipedia.org/wiki/Monarda_fistulosa") # display status code # print(page.status_code) # display scrapped data # print(page.content) # scrape webpage soup = BeautifulSoup(page.content, 'html.parser') #.get_text(strip=True) #This removes \xa0 # display scrapped data # print(soup.prettify()) # list(soup.children) # find all occurance of p in HTML # includes HTML tags # print(soup.find_all('p')) data_df[data_df['rank'] == 'species' ]['wikipedia_url'][5]Part 3.1.1. Get table from the id# create object # object = soup.find(id="mw-content-text") # # find tags # items = object.find_all(class_="infobox biota") # result = items[0] # # display tags # print(result.prettify()) table = soup.find_all('table') # table[0] # for child in soup.find_all('table')[0].children: # for td in child: # print(td) # list(soup.find_all('table')[0].tr.next_siblings) # for sibling in soup.find_all('table')[0].tr.next_siblings: # for td in sibling: # print(td) table = soup.find('table', attrs={'class':'infobox biota'}) #class="infobox biota" table_rows = table.find_all('tr') # table_rows # len(table_rows) data_taxonomy = { 'Kingdom': list(), 'Phylum': list(), 'Class': list(), 'Order': list(), 'Suborder': list(), 'Family': list(), 'Genus': list(), 'Species': list() } l = [] data_taxonomy['Kingdom'].insert(0, None) data_taxonomy['Phylum'].insert(0, None) data_taxonomy['Class'].insert(0, None) data_taxonomy['Order'].insert(0, None) data_taxonomy['Suborder'].insert(0, None) data_taxonomy['Family'].insert(0, None) data_taxonomy['Genus'].insert(0, None) data_taxonomy['Species'].insert(0, None) for tr in table_rows: td = tr.find_all('td') row = [tr.text.replace('\n', '').replace(':', '').replace(u'\xa0', ' ') for tr in td] if 'Kingdom' in row: data_taxonomy['Kingdom'][0] = row[1] l.append(row) if 'Phylum' in row: data_taxonomy['Phylum'][0] = row[1] l.append(row) if 'Class' in row: data_taxonomy['Class'][0] = row[1] l.append(row) if 'Order' in row: data_taxonomy['Order'][0] = row[1] l.append(row) if 'Suborder' in row: data_taxonomy['Suborder'][0] = row[1] l.append(row) if 'Family' in row: data_taxonomy['Family'][0] = row[1] l.append(row) if 'Genus' in row: data_taxonomy['Genus'][0] = row[1] l.append(row) if 'Species' in row: data_taxonomy['Species'][0] = row[1] l.append(row) # elif 'Clade' in row: # l.append(row) l # 'Kingdom' in l[4] # table_rows = table_rows[4:13] # l = [] # for tr in table_rows: # td = tr.find_all('td') # row = [tr.text for tr in td] # l.append(row) # l data_taxonomy len(data_df) data_taxonomy = { 'Kingdom': list(), 'Phylum': list(), 'Class': list(), 'Order': list(), 'Suborder': list(), 'Family': list(), 'Genus': list(), 'Species': list() } for i in range(len(data_df)): if 'http' in data_df['wikipedia_url'][i]: #print(i) # get URL data_taxonomy['Kingdom'].insert(i, None) data_taxonomy['Phylum'].insert(i, None) data_taxonomy['Class'].insert(i, None) data_taxonomy['Order'].insert(i, None) data_taxonomy['Suborder'].insert(i, None) data_taxonomy['Family'].insert(i, None) data_taxonomy['Genus'].insert(i, None) data_taxonomy['Species'].insert(i, None) page = requests.get(data_df['wikipedia_url'][i]) # scrape webpage soup = BeautifulSoup(page.content, 'html.parser') #table = soup.find_all('table') table = soup.find('table', attrs={'class':'infobox biota'}) #class="infobox biota" if table is not None: table_rows = table.find_all('tr') for tr in table_rows: td = tr.find_all('td') row = [tr.text.replace('\n', '').replace(':', '').replace(u'\xa0', ' ') for tr in td] if 'Kingdom' in row: data_taxonomy['Kingdom'][i] = row[1] #l.append(row) if 'Phylum' in row: data_taxonomy['Phylum'][i] = row[1] #l.append(row) if 'Class' in row: data_taxonomy['Class'][i] = row[1] #l.append(row) if 'Order' in row: data_taxonomy['Order'][i] = row[1] #l.append(row) if 'Suborder' in row: data_taxonomy['Suborder'][i] = row[1] #l.append(row) if 'Family' in row: data_taxonomy['Family'][i] = row[1] #l.append(row) if 'Genus' in row: data_taxonomy['Genus'][i] = row[1] #l.append(row) if 'Species' in row: data_taxonomy['Species'][i] = row[1] #l.append(row) len(data_taxonomy['Species']) data_taxonomy_df = pd.DataFrame(data_taxonomy) data_taxonomy_df.to_csv('data_taxonomy_df.csv') page = requests.get(data_df['wikipedia_url'][0]) data_taxonomy_df.head()Fig 1df = pd.read_csv( "https://raw.githubusercontent.com/RDeconomist/observatory/main/Average%20real%20GDP%201910s%20%26%201920s.csv" ) f = "fig1_twenties" f1 = eco_git_path + f + ".csv" df.to_csv("data/" + f + ".csv") f += local_suffix open("visualisation/" + f + ".html", "w").write( vega_embed.replace( "JSON_PATH", f1.replace("/data/", "/visualisation/").replace(".csv", ".json") ) ) if LOCAL: f1 = df df.head() bars = alt.Chart(f1).encode( x=alt.X( "Average real GDP:Q", stack=False, title="", axis=alt.Axis( grid=False, title="%", titleAnchor="end", titleX=415, titleY=7, labelColor=colors["eco-gray"], titleColor=colors["eco-gray"], tickColor=colors["eco-gray"], domainColor=colors["eco-gray"], ), ), y=alt.Y("Country:N", title="", axis=None), ) bars1 = ( bars.mark_bar(size=11, yOffset=-10, color=colors["eco-turquiose"], opacity=0.8) .transform_filter("datum.Decade=='1910s average'") .transform_filter("datum['Average real GDP']>0") ) labels1 = ( bars1.mark_text(align="left", xOffset=5, yOffset=-10, color=colors["eco-turquiose"]) .encode(text="label:N") .transform_calculate(label="'+'+round(datum['Average real GDP']*10)/10") ) bars2 = ( bars.mark_bar(size=11, yOffset=-10, color=colors["eco-pink"], opacity=0.8) .transform_filter("datum.Decade=='1910s average'") .transform_filter("datum['Average real GDP']<=0") ) labels2 = ( bars2.mark_text(align="right", xOffset=-5, yOffset=-10, color=colors["eco-pink"]) .encode(text="label:N") .transform_calculate(label="round(datum['Average real GDP']*10)/10") ) labels2b = ( bars2.mark_text(align="right", xOffset=-3, yOffset=-10, color="#ffffff") .encode(text="label:N", x="x:Q") .transform_calculate(x="0") .transform_calculate(label="'1910s'") ) bars3 = ( bars.mark_bar(size=11, yOffset=2, color=colors["eco-light-blue"], opacity=0.8) .transform_filter("datum.Decade=='1920s average'") .transform_filter("datum['Average real GDP']>0") ) labels3 = ( bars3.mark_text(align="left", xOffset=5, yOffset=4, color=colors["eco-light-blue"]) .encode(text="label:N") .transform_calculate(label="'+'+round(datum['Average real GDP']*10)/10") ) labels1b = ( bars1.mark_text(align="left", xOffset=3, yOffset=-10, size=10, color="#ffffff") .encode(text="label:N", x="x:Q") .transform_calculate(x="0") .transform_calculate(label="'1910s'") .transform_filter("datum['Average real GDP']>1") ) labels3b = ( bars3.mark_text(align="left", xOffset=3, yOffset=3, size=10, color="#ffffff") .encode(text="label:N", x="x:Q") .transform_calculate(x="0") .transform_calculate(label="'1920s'") .transform_filter("datum['Average real GDP']>1") .transform_filter("datum['Average real GDP']<5") ) labels3b2 = ( bars3.mark_text(align="left", xOffset=3, yOffset=3, size=10, color="#ffffff") .encode(text="label:N", x="x:Q") .transform_calculate(x="0") .transform_calculate(label="'1920s average'") .transform_filter("datum['Average real GDP']>5") ) labels = ( bars.mark_text(align="right", xOffset=-5, yOffset=4, color=colors["eco-gray"]) .encode(text="Country:N", x="x:Q") .transform_calculate(x="0") ) rule = ( alt.Chart(pd.DataFrame([{"x": 0}])) .mark_rule(color=colors["eco-gray"]) .encode(x="x:Q") ) layers = ( ( bars1 + labels1 + bars2 + labels2 + bars3 + labels3 + rule + labels + labels2b + labels1b + labels3b + labels3b2 ) .configure_view(stroke=None) .properties(title="") .properties(height=300, width=400) ) layers.save("visualisation/" + f + ".json") layersFull GUIfrom lib.geometadp import md_manager obj = md_manager.geo_metadata() obj.manage() import ipywidgets as widgets up = widgets.FileUpload() def onclick(change): #print(change.new) #print(change.new) uploaded_file = up.value[0] uploaded_file["size"] uploaded_file.size up.observe(onclick,'value') upCopyright 2016 Google Inc. All Rights Reserved.Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. Table of Contents%%javascript // From https://github.com/kmahelona/ipython_notebook_goodies $.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')Basics There's lots of guides out there on decorators ([this](http://thecodeship.com/patterns/guide-to-python-function-decorators/) one is good), but I was never really sure when I would need to use decorators. Hopefully this will help motivate them a little more. Here I hope to show you:* When decorators might come in handy* How to write one* How to generalize using `*args` and `**kwargs` sorcery. You should read this if:* You've heard of decorators and want to know more about them, and/or* You want to know what `*args` and `**kwargs` mean.If you're here just for `*args` and `**kwargs`, start reading [here](args). MotivationLet's say you're defining methods on numbers:def add(n1, n2): return n1 + n2 def multiply(n1, n2): return n1 * n2 def exponentiate(n1, n2): """Raise n1 to the power of n2""" import math return math.pow(n1, n2)Well, we only want these functions to work if both inputs are numbers. So we could do:def is_number(n): """Return True iff n is a number.""" # A number can always be converted to a float try: float(n) return True except ValueError: return False def add(n1, n2): if not (is_number(n1) and is_number(n2)): print("Arguments must be numbers!") return return n1 + n2 def multiply(n1, n2): if not (is_number(n1) and is_number(n2)): print("Arguments must be numbers!") return return n1 * n2 def exponentiate(n1, n2): """Raise n1 to the power of n2""" if not (is_number(n1) and is_number(n2)): print("Arguments must be numbers!") return import math return math.pow(n1, n2)But this is yucky: we had to copy and paste code. This should always make you sad! For example, what if you wanted to change the message slightly? Or to return an error instead? You'd have to change it everywhere it appears... We want the copy & pasted code to live in just one place, so any changes just go there (DRY code: Don't Repeat Yourself). So let's **refactor**.def validate_two_arguments(n1, n2): """ Returns True if n1 and n2 are both numbers. """ if not (is_number(n1) and is_number(n2)): return False return True def add(n1, n2): if validate_two_arguments(n1, n2): return n1 + n2 def multiply(n1, n2): if validate_two_arguments(n1, n2): return n1 * n2 def exponentiate(n1, n2): """Raise n1 to the power of n2""" if validate_two_arguments(n1, n2): import math return math.pow(n1, n2)This is definitely better. But there's still some repeated logic. Like, what if we want to return an error if we don't get numbers, or print something before running the code? We'd still have to make the changes in multiple places. The code isn't DRY. Basic decoratorsWe can refactor further with the **decorator pattern**.We want to write something that looks like @decorator def add(n1, n2): return n1 + n2so that all the logic about validating `n1` and `n2` lives in one place, and the functions just do what we want them to do. Since the @ syntax just means `add = decorator(add)`, we know the decorator needs to take a function as an argument, and it needs to return a function. (This should be confusing at first. Functions returning functions are scary, but think about it until that doesn't seem outlandish to you.)This returned function should act the same way as `add`, so it should take two arguments. And within this returned function, we want to first check that the arguments are numbers. If they are, we want to call the original function that we decorated (in this case, `add`). If not, we don't want to do anything. Here's what that looks like (there's a lot here, so use the comments to understand what's happening):# The decorator: takes a function. def validate_arguments(func): # The decorator will be returning wrapped_func, a function that has the # same signature as add, multiply, etc. def wrapped_func(n1, n2): # If we don't have two numbers, we don't want to run the function. # Best practice ("be explicit") is to raise an error here # instead of just returning None. if not validate_two_arguments(n1, n2): raise Exception("Arguments must be numbers!") # We've passed our checks, so we can call the function with the passed in arguments. # If you like, think of this as # result = func(n1, n2) # return result # to distinguish it from the outer return where we're returning a function. return func(n1, n2) # This is where we return the function that has the same signature. return wrapped_func @validate_arguments def add(n1, n2): return n1 + n2 # Don't forget, the @ syntax just means # add = validate_decorator(add) print(add(1, 3)) try: add(2, 'hi') except Exception as e: print("Caught Exception: {}".format(e))4 Caught Exception: Arguments must be numbers!This pattern is nice because we've even refactored out all the validation logic (even the "if blah then blah" part) into the decorator. Generalizing with \*args and \**kwargsWhat if we want to validate a function that has a different number of arguments?@validate_arguments # Won't work! def add3(n1, n2, n3): return n1 + n2 + n3 add3(1, 2, 3)We can't decorate this because the wrapped function expects 2 arguments. Here's where we use the `*` symbol. I'll write out the code so you can see how it looks, and we'll look at what `*args` is doing below.# The decorator: takes a function. def validate_arguments(func): # Note the *args! Think of this as representing "as many arguments as you want". # So this function will take an arbitrary number of arguments. def wrapped_func(*args): # We just want to apply the check to each argument. for arg in args: if not is_number(arg): raise Exception("Arguments must be numbers!") # We also want to make sure there's at least two arguments. if len(args) < 2: raise Exception("Must specify at least 2 arguments!") # We've passed our checks, so we can call the function with the # passed-in arguments. # Right now, args is a tuple of all the different arguments passed in # (more explanation below), so we want to expand them back out when # calling the function. return func(*args) return wrapped_func @validate_arguments # This works def add3(n1, n2, n3): return n1 + n2 + n3 add3(1, 2, 3) @validate_arguments # And so does this def addn(*args): """Add an arbitrary number of numbers together""" cumu = 0 for arg in args: cumu += arg return cumu print(addn(1, 2, 3, 4, 5)) # range(n) gives a list, so we expand the list into positional arguments... print(addn(*range(10)))15 45`*args`What is this `*` nonsense?You've probably seen `*args` and `**kwargs` in documentation before. Here's what they mean:* When calling a function, `*` **expands an iterable** into **positional arguments**. * Terminology note: in a call like `bing(1, 'hi', name='fig')`, `1` is the first positional argument, `'hi'` is the second positional argument, and there's a keyword argument `'name'` with the value `'fig'`.* When defining a signature, `*args` represents an **arbitrary number of positional arguments**.def foo(*args): print("foo args: {}".format(args)) print("foo args type: {}".format(type(args))) # So foo can take an arbitrary number of arguments print("First call:") foo(1, 2, 'a', 3, True) # Which can be written using the * syntax to expand an iterable print("\nSecond call:") l = [1, 2, 'a', 3, True] foo(*l)First call: foo args: (1, 2, 'a', 3, True) foo args type: Second call: foo args: (1, 2, 'a', 3, True) foo args type: Back to the decorator(If you're just here for \*args and \*\*kwargs, skip down to [here](kwargs))So let's look at the decorator code again, minus the comments: def validate_decorator(func): def wrapped_func(*args): for arg in args: if not is_number(arg): print("arguments must be numbers!") return return func(*args) return wrapped_func * `def wrapped_func(*args)` says that `wrapped_func` can take an arbitrary number of arguments.* Within `wrapped_func`, we interact with `args` as a tuple containing all the (positional) arguments passed in. * If all the arguments are numbers, we call `func`, the function we decorated, by **expanding** the `args` tuple back out into positional arguments: `func(*args)`.* Finally the decorator needs to return a function (remember that the `@` syntax is just sugar for `add = decorator(add)`.Congrats, you now understand decorators! You can do tons of other stuff with them, but hopefully now you're equipped to read the other guides online. --- As for `**kwargs`: * When calling a function, `**` **expands a dict** into **keyword arguments**.* When defining a signature, `**kwargs` represents an **arbitrary number of keyword arguments**.def bar(**kwargs): print("bar kwargs: {}".format(kwargs)) # bar takes an arbitrary number of keyword arguments print("First call:") bar(location='US-PAO', ldap='awan', age=None) # Which can also be written using the ** syntax to expand a dict print("\nSecond call:") d = {'location': 'US-PAO', 'ldap': 'awan', 'age': None} bar(**d)First call: bar kwargs: {'age': None, 'location': 'US-PAO', 'ldap': 'awan'} Second call: bar kwargs: {'age': None, 'location': 'US-PAO', 'ldap': 'awan'}And in case your head doesn't hurt yet, we can do both together:def baz(*args, **kwargs): print("baz args: {}. kwargs: {}".format(args, kwargs)) # Calling baz with a mixture of positional and keyword arguments print("First call:") baz(1, 3, 'hi', name='Joe', age=37, occupation='Engineer') # Which is the same as print("\nSecond call:") l = [1, 3, 'hi'] d = {'name': 'Joe', 'age': 37, 'occupation': 'Engineer'} baz(*l, **d)First call: baz args: (1, 3, 'hi'). kwargs: {'age': 37, 'name': 'Joe', 'occupation': 'Engineer'} Second call: baz args: (1, 3, 'hi'). kwargs: {'age': 37, 'name': 'Joe', 'occupation': 'Engineer'}--- Advanced decoratorsThis section will introduce some of the many other useful ways you can use decorators. We'll talk about* Passing arguments into decorators* `functools.wraps`* Returning a different function* Decorators and objects.Use the [table of contents](toc) at the top to make it easier to look around. Decorators with argumentsA common thing to want to do is to do some kind of configuration in a decorator. For example, let's say we want to define a `divide_n` method, and to make it easy to use we want to hide the existence of integer division. Let's define a decorator that converts arguments into floats.def convert_arguments(func): """ Convert func arguments to floats. """ # Introducing the leading underscore: (weakly) marks a private # method/property that should not be accessed outside the defining # scope. Look up PEP 8 for more. def _wrapped_func(*args): new_args = [float(arg) for arg in args] return func(*new_args) return _wrapped_func @convert_arguments @validate_arguments def divide_n(*args): cumu = args[0] for arg in args[1:]: cumu = cumu / arg return cumu # The user doesn't need to think about integer division! divide_n(103, 2, 8)But now let's say we want to define a `divide_n_as_integers` function. We could write a new decorator, or we could alter our decorator so that we can specify what we want to convert the arguments to. Let's try the latter. (For you smart alecks out there: yes you could use the `//` operator, but you'd still have to replicate the logic in `divide_n`. Nice try.)def convert_arguments_to(to_type=float): """ Convert arguments to the given to_type by casting them. """ def _wrapper(func): def _wrapped_func(*args): new_args = [to_type(arg) for arg in args] return func(*new_args) return _wrapped_func return _wrapper @validate_arguments def divide_n(*args): cumu = args[0] for arg in args[1:]: cumu = cumu / arg return cumu @convert_arguments_to(to_type=int) def divide_n_as_integers(*args): return divide_n(*args) @convert_arguments_to(to_type=float) def divide_n_as_float(*args): return divide_n(*args) print(divide_n_as_float(7, 3)) print(divide_n_as_integers(7, 3))2.33333333333 2Did you notice the tricky thing about creating a decorator that takes arguments? **We had to create a function to "return a decorator".** The outermost function, `convert_arguments_to`, returns a function that takes a function, which is what we've been calling a "decorator". To think about why this is necessary, let's start from the form that we wanted to write, and unpack from there. We wanted to be able to do: @decorator(decorator_arg) def myfunc(*func_args): pass Unpacking the syntactic sugar gives us def myfunc(*func_args): pass myfunc = decorator(decorator_arg)(myfunc) Written this way, it should immediately be clear that `decorator(decorator_arg)` **returns a function that takes a function**. So that's how you write a decorator that takes an argument: it actually has to be a function that takes your decorator arguments, and returns a function that takes a function. functools.wrapsIf you've played around with the examples above, you might've seen that the name of the wrapped function changes after you apply a decorator... And perhaps more importantly, the docstring of the wrapped function changes too (this is important for when generating documentation, e.g. with Sphinx).@validate_arguments def foo(*args): """foo frobs bar""" pass print(foo.__name__) print(foo.__doc__)wrapped_func None[`functools.wraps`](https://docs.python.org/2/library/functools.htmlfunctools.wraps) solves this problem. Use it as follows:from functools import wraps def better_validate_arguments(func): @wraps(func) def wrapped_func(*args): for arg in args: if not is_number(arg): raise Exception("Arguments must be numbers!") if len(args) < 2: raise Exception("Must specify at least 2 arguments!") return func(*args) return wrapped_func @better_validate_arguments def bar(*args): """bar frobs foo""" pass print(bar.__name__) print(bar.__doc__)bar bar frobs fooThink of the `@wraps` decorator making it so that `wrapped_func` knows what function it originally wrapped. Returning a different functionDecorators don't even have to return the function that's passed in. You can have some fun with this...def jedi_mind_trick(func): def _jedi_func(): return "Not the droid you're looking for" return _jedi_func @jedi_mind_trick def get_droid(): return "Found the droid!" get_droid()Handling image coordinatesThe `astropy.wcs` package implements the FITS World Coordinate System (WCS) standard, and some commonly-used distortion conventions used in imaging data.This tutorial will show how to extract from FITS files, and how to use it to transform coordinates.import os import numpy as np %matplotlib inline from matplotlib import pyplot as plt from astropy.utils.data import get_pkg_data_filename from astropy.io import fits from astropy import wcsCreating a WCS object from the header of a FITS file Open a FITS image with `astropy.fits` and examine the contents.image_file = fits.open('data/w5.fits') image_file.info()To create a WCS object pass the header with the WCS keywords to astropy.wcs.WCS. In this case it is the primary header.w = wcs.WCS(image_file[0].header) print(w)For this case, `fits.getheader` may be used to save a step.w = wcs.WCS(fits.getheader('data/w5.fits'))Examine the physical types of the world coordinate axes, to show they are RA and Dec.w.world_axis_physical_typesPrint the number of pixel and world axes, and the shape of the dataprint(w.pixel_n_dim, w.world_n_dim, w.array_shape)Transforming between pixel coordinates and sky coordinates To determine the sky coordinate associated with a position on the detector, pass x-pixel and y-pixel values to the `pixel_to_world` method.The inputs can be numbers, numpy arrays or array-like objects. See the [Pixel Conventions and Definitions](https://docs.astropy.org/en/stable/wcs/index.htmlpixel-conventions) for an explanation of the pixel numbering convention.The output is a SkyCoord object.coord = w.pixel_to_world([10, 100], [24, 500.5]) print(coord)Perfom the inverse transformation - from sky to detector coordinates.w.world_to_pixel(coord)Creating a WCS programatically A WCS object can be created programatically. Here is a concise example with 1 arcsecond pixels that is aligned with "North up, East to the left".my_wcs = wcs.WCS(naxis=2) my_wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] my_wcs.wcs.crpix = [512, 512] my_wcs.wcs.crval = [70., 20.] my_wcs.wcs.cdelt = [-1/3600, 1/3600] my_wcs.array_shape = [1024, 1024] # NAXIS2, NAXIS1 my_wcsExercise: compute the coordinates of image cornersFor the WCS `w` we created from the `'data/w5.fits'` image, compute and print the RA and Dec coordinates of the centers of the corner pixels of the image, which in pixel units are (0, 0), (0, 999), (999, 0), (999, 999).Print the output of the `w.calc_footprint()` method for comparison. If you prefer to load the solution, uncomment the line below and run the cell.# %load wcs_corners_solution.pyTo parmed.Structurefrom molsysmt.tools import file_pdb #file_pdb.to_parmed_Structure(item)Step 1: Import the requests libraryimport requestsStep 2: Send an HTTP request, get the response, and save in a variableresponse = requests.get("http://www.epicurious.com/search/Tofu+Chili")Step 3: Check the response status code to see if everything went as plannedstatus code 200: the request response cycle was successfulany other status code: it didn't work (e.g., 404 = page not found)print(response.status_code)Step 4: Get the content of the responseConvert to utf-8 if necessaryresponse.content.decode('utf-8')Problem: Get the contents of Wikipedia's main page and look for the string "Did you know" in iturl = "https://en.wikipedia.org/wiki/main_page" #The rest of your code should go below this lineJSONThe python library - json - deals with converting text to and from JSONimport json data_string = '[{"b": [2, 4], "c": 3.0, "a": "A"}]' python_data = json.loads(data_string) print(python_data)json.loads recursively decodes a string in JSON format into equivalent python objectsdata_string's outermost element is converted into a python listthe first element of that list is converted into a dictionarythe key of that dictionary is converted into a stringthe value of that dictionary is converted into a list of two integer elementsprint(type(data_string),type(python_data)) print(type(python_data[0]),python_data[0]) print(type(python_data[0]['b']),python_data[0]['b'])json.loads will throw an exception if the format is incorrect#Wrong # json.loads("Hello") #Correct json.loads('"Hello"') import json data_string = json.dumps(python_data) print(type(data_string)) print(data_string)requests library and JSONaddress="Columbia University, New York, NY" url="https://maps.googleapis.com/maps/api/geocode/json?address=%s" % (address) response = requests.get(url).json() print(type(response))Exception checking!address="Columbia University, New York, NY" url="https://maps.googleapis.com/maps/api/geocode/json?address=%s" % (address) try: response = requests.get(url) if not response.status_code == 200: print("HTTP error",response.status_code) else: try: response_data = response.json() except: print("Response not in valid JSON format") except: print("Something went wrong with requests.get") print(response_data) # def id_generator(dict_var, search): # for k, v in dict_var.items(): # if k == search: # yield v # elif isinstance(v, dict): # for id_val in id_generator(v): # yield id_val # # for _ in id_generator(response_data, 'long_name'): # print(_) # The JSON might contain a list of objects, which needs to be searched: def item_generator1(json_input, lookup_key): if isinstance(json_input, dict): for k, v in json_input.items(): if k == lookup_key: yield v else: for child_val in item_generator(v, lookup_key): yield child_val elif isinstance(json_input, list): for item in json_input: for item_val in item_generator(item, lookup_key): yield item_val # The JSON might contain a list of objects, which needs to be searched: def item_generator(json_input, lookup_key): if isinstance(json_input, dict): for k, v in json_input.items(): if k == lookup_key: yield {k:v} else: for child_val in item_generator(v, lookup_key): yield child_val elif isinstance(json_input, list): for item in json_input: for item_val in item_generator(item, lookup_key): yield item_val def get_json_dict(json_data, lookup_key): return list(item_generator(json_data, lookup_key)) print(get_json_dict(response_data, 'location')) for x in get_json_dict(response_data, 'location'): print(x) for v in item_generator(response_data, 'address_components'): print(v) item_generator(response_data, 'address_components')Problem 1: Write a function that takes an address as an argument and returns a (latitude, longitude) tupledef get_lat_lng(address_string): #python code goes hereProblem 2: Extend the function so that it takes a possibly incomplete address as an argument and returns a list of tuples of the form (complete address, latitude, longitude)def get_lat_lng(address_string): #python code goes hereXMLThe python library - lxml - deals with converting an xml string to python objects and vice versadata_string = """ New York Deco Richard Berenholtz Five Hundred Buildings of New York and over one million other books are available for Amazon Kindle. Five Hundred Buildings of New York Bill Harris Jorg Brockmann """ from lxml import etree root = etree.XML(data_string) print(root.tag,type(root.tag)) print(etree.tostring(root, pretty_print=True).decode("utf-8"))Iterating over an XML treeUse an iterator. The iterator will generate every tree element for a given subtreefor element in root.iter(): print(element)Or just use the child in subtree constructionfor child in root: print(child)Accessing the tagfor child in root: print(child.tag)Using the iterator to get specific tagsIn the below example, only the author tags are accessedFor each author tag, the .find function accesses the First_Name and Last_Name tagsThe .find function only looks at the children, not other descendants, so be careful!The .text attribute prints the text in a leaf nodefor element in root.iter("Author"): print(element.find("Last_Name").text, element.find("First_Name").text)Problem: Find the last names of all authors in the tree “root” using xpathfor element in root.findall('Book/Authors/Author/Last_Name'): print(element.text) for element in root.findall('Book/Title'): print(element.text)Using values of attributes as filtersExample: Find the first name of the author of a book that weighs 1.5 ozroot.find('Book[@Weight="1.5"]/Authors/Author/First_Name').textProblem: Print first and last names of all authors who live in New York Cityroot.find('Book/Authors/Author[@="New "]/Last_Name').text for element in root.findall('Book/Authors/Author[@="New "]'): print(element.find("Last_Name").text, element.find("First_Name").text)DictionariesIt store data/value with flag or key. Dictionary is initialize with my_dict = {} it can be empty. if we want add value with own.my_dict = {1:'shahin' , 2:'swarna' , 3:'tanvir', 4:'pial'}my_dict = {'key':'jhon', 1:[2,3,4]} key/flag can be string/character/number.my_dict = dict([(1,'apple'),(2,'ball')]) from sequence having each item as a pair() -> Tuples{} -> Dictionaries[] -> Lists Concatenationmy_dict = {2:'swarna', 1:'shahin', 3:'pial'} print(my_dict) my_dict.update({3:'pilu'}) print(my_dict){2: 'swarna', 1: 'shahin', 3: 'pial'} {2: 'swarna', 1: 'shahin', 3: 'pilu'}Repeatationprint(my_dict*2) # Repeatation is not possible in this waySlicingprint(my_dict[2]) print(my_dict[-1]) # it shows errorBuilt In Function of Dictionarymy_dict = {2:'swarna', 1:'shahin', 3:'pial'} print(len(my_dict)) my_dict2 = my_dict.copy() # copy total dictionary element in other dictiontary my_dict.clear() # delete all element from dictionary print(my_dict2) print(my_dict) print(my_dict2.values()) # values of Dictionary print(my_dict2.keys()) # keyes of Dictionary print(sorted(my_dict2)) print(my_dict2.items()) # show the total item print(my_dict2.get(1)) print(my_dict2.update({4:'jahid'})) print(my_dict2.pop(4)) # remove the key and value print(my_dict2) print(my_dict2) print(my_dict2)3 {2: 'swarna', 1: 'shahin', 3: 'pial'} {} dict_values(['swarna', 'shahin', 'pial']) dict_keys([2, 1, 3]) [1, 2, 3] dict_items([(2, 'swarna'), (1, 'shahin'), (3, 'pial')]) shahin None jahid {2: 'swarna', 1: 'shahin', 3: 'pial'} {2: 'swarna', 1: 'shahin', 3: 'pial'} {2: 'swarna', 1: 'shahin', 3: 'pial'}Travail Ecrit - Python* , site de l'Ours* OC informatique* Sujet : chapitres 1-10 du livre *Pensez en Python** * Date : jeudi 13 novembre 2018 **Exercice : expression arithmétique** Initialisez les variables `(a, b, c, x)` avec les valeurs `(2, 3, 4, 5)`. Calculez l'expression $$y = a x^2 + b x +c$$et imprimez le résultat.def fonction(a, b, c, x): print(a*x**2+b*x+c) fonction(2, 3, 4, 5)69**Exercice : fonction surface** Importez le module `math`, définissez une fonction `surface(r)` qui calcule $s = \pi r^2$, affichez avec un texte descriptif le résultat pour `r=5`import math def surface(r): print(math.pi*r**2) surface(5)78.53981633974483**Exercice : formule quadratique** La solution d'une formule quadratique de forme$$ a x^2 + b x +c = 0 $$dépend du terme $\Delta = b^2 - 4 a c$* Si $\Delta < 0$ il n'y a pas de solution* Si $\Delta = 0$ il y a une solution : $x = \frac{-b}{2 a}$* Si $\Delta > 0$ il y a deux solutions : $x_1 = \frac{-b +\sqrt\Delta}{2 a}$ and $x_2 = \frac{-b -\sqrt\Delta}{2 a}$Définissez une fonction `quadratique(a, b, c)` qui retourne la solution à l'équation quadratique dans les 3 cas: `None`, `x`, `[x1, x2]`.Montrez la solution pour `quadratique(1, 2, 3)`, `quadratique(1, 2, 1)` et `quadratique(1, 2, -1)`import math def quadratique(a, b, c): delta = b**2-4*a*c if delta < 0 : print('Pas de solution') x = None if delta == 0 : print('Il existe une solution:') x = -b/2*a return x if delta > 0 : print('IL existe deux solutions:') x1=(-b + math.sqrt(delta))/2*a x2=(-b - math.sqrt(delta))/2*a print(x1) print(x2) print(quadratique(1, 2, 3)) print(quadratique(1, 2, 1)) print(quadratique(1, 2, -1)) import math math.sqrt(25)**Exercice : capitalize** Créez une fonction `capitalize(c)` qui transforme une lettre en majuscule si c'est une minuscule, ou la laisse inchangée autrement.def capitalize(c): res = [] for s in c: if s.isupper(): res.append(s.capitalize()) return res else: return c capitalize('a'), capitalize('B'), capitalize('3')**Exercice : capitalize words** Créez une fonction `capitalize_words(s)` qui transforme la première lettre de tous les mots en majuscule.def capitalize_words(s): res = [] for c in s: res.append(c.capitalize_words()) return res capitalize_words('hello world, how are you?') mot = 'banane' len(mot)**Exercice : tranches** Expliquez ce que font les 6 opérateurs de **tranches** ci-dessous.# s[2] - Ecrit la 3ème lettre # s[:2] - Ecrit jusqu'à la 3ème lettre # s[::2] - # s[-1] - Ecrit la dernière lettre # s[:-1] - Ecrit jusqu'à la dernière lettre # s[::-1] -**Exercice : longueur spécifique** Le fichier `words.text` contient 58110 mots en anglais. Affichez les $n=5$ premiers mots qui ont une longueur de $m=10$ et affichez leur nombre total.fin = open('words.txt') n = 5 m = 10 for i in c : fin.readline()**Exercice : répétition**. Affchez les $m=5$ premiers mots qui sont composé de deux parties répétées (par exemple **bonbon**).fin = open('words.txt') m = 5**Exercice : minimum** Créez une fonction `min(L)` qui retourne le minimum d'une liste et l'index de sa position sous forme de list `[val, pos]`.def min(L): len(L) L = [1, 3, 34, -4, -2, 100] min(L)**Exercice : moyenne** Ecrivez une fonction `mean(L)` qui retourne la moyenne d'une liste..def mean(L): pass L = [1, 3, 34, -4, -2, 100] mean(L)Interact Exercise 3 Imports%matplotlib inline from matplotlib import pyplot as plt import numpy as np from IPython.html.widgets import interact, interactive, fixed from IPython.display import displayUsing interact for animation with data A [*soliton*](http://en.wikipedia.org/wiki/Soliton) is a constant velocity wave that maintains its shape as it propagates. They arise from non-linear wave equations, such has the [Korteweg–de Vries](http://en.wikipedia.org/wiki/Korteweg%E2%80%93de_Vries_equation) equation, which has the following analytical solution:$$\phi(x,t) = \frac{1}{2} c \mathrm{sech}^2 \left[ \frac{\sqrt{c}}{2} \left(x - ct - a \right) \right]$$The constant `c` is the velocity and the constant `a` is the initial location of the soliton.Define `soliton(x, t, c, a)` function that computes the value of the soliton wave for the given arguments. Your function should work when the postion `x` *or* `t` are NumPy arrays, in which case it should return a NumPy array itself.def soliton(x, t, c, a): """Return phi(x, t) for a soliton wave with constants c and a.""" # YOUR CODE HERE # v = np.coshx-c*t-a # b = np.sqrt(c)/2 # x = np.array() # t = np.array() phi_x_t = 0.5*c*(1/np.cosh(np.sqrt(c)/2*(x-c*t-a)))**2 return phi_x_t assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays:tmin = 0.0 tmax = 10.0 tpoints = 100 t = np.linspace(tmin, tmax, tpoints) xmin = 0.0 xmax = 10.0 xpoints = 200 x = np.linspace(xmin, xmax, xpoints) c = 1.0 a = 0.0Compute a 2d NumPy array called `phi`:* It should have a dtype of `float`.* It should have a shape of `(xpoints, tpoints)`.* `phi[i,j]` should contain the value $\phi(x[i],t[j])$.# YOUR CODE HERE phi = np.empty([xpoints, tpoints]) for i in x: for j in t: phi[i, j] = soliton(x[i], t[j], c, a) assert phi.shape==(xpoints, tpoints) assert phi.ndim==2 assert phi.dtype==np.dtype(float) assert phi[0,0]==soliton(x[0],t[0],c,a)Write a `plot_soliton_data(i)` function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.def plot_soliton_data(i=0): """Plot the soliton data at t[i] versus x.""" # YOUR CODE HERE plt.plot(soliton(x, t[i], c, a)) f = plt.figure(figsize=(10,6)) plot_soliton_data(0) plt.xlabel('Time') plt.ylabel('Position') plt.title('Soliton position vs. time') plt.ylim(0, 0.6) assert True # leave this for grading the plot_soliton_data functionUse `interact` to animate the `plot_soliton_data` function versus time.# YOUR CODE HERE interact(plot_soliton_data, i=5); assert True # leave this for grading the interact with plot_soliton_data cellLooking for Correlationscorr_matrix = data.corr() corr_matrix['Survived'].sort_values(ascending=False) attributes = ['Age', 'Fare', 'Parch', 'Pclass', 'SibSp', 'Survived'] pd.plotting.scatter_matrix(data[attributes], figsize=(12,8)) plt.savefig('temp__CorrMatrix', format='png')Experimenting with Attribute Combinationsdata['FamilyMembers'] = data['SibSp'] + data['Parch'] corr_matrix = data.corr() corr_matrix['Survived']Udacity Data Engineering Capstone Project ProjectLoad, explore and clean the fact tables with Pandas.**Download the data** * Use data_utils.py functions* Find all the csv files in ../data/source folder Librariesimport os import configparser import pandas as pd import numpy as np from datetime import datetime import glob```flights``` tableinput_data = "../data/source/flightlist_20201201_20201231.csv.gz" df = pd.read_csv( input_data, compression='gzip') df.info() df.isna().sum() df.head() # process opensky data df1 = df.copy() # Remove rows where origin AND destination are unknown df1 = df1.dropna( how='all', subset=['origin', 'destination']) col = 'destination' df1.loc[ ~df1[col].astype(str).str.isalnum()] df1.loc[ df1.duplicated( subset=['callsign', 'icao24', 'origin', 'destination'], keep=False)].sort_values( by=['callsign', 'icao24', 'day'])```covid_19``` tables3_input_data = 's3a://udacity-capstoneproject/source/covid-19-world-cases-deaths-testing/dataset/covid-19-world-cases-deaths-testing.csv' input_data = '../data/source/covid-19_world_cases_deaths_testing.csv' covid19 = pd.read_csv( input_data) covid19 print(covid19.iso_code.unique()) print("Total number of iso_code: ", len(covid19.iso_code.unique())) countries = pd.read_csv('../data/source/countries.csv') print(countries.code.unique()) print("Total number of code in countries.csv: ", len(countries.code.unique())) print(covid19.location.unique()) print("Total number of locations: ", len(covid19.location.unique())) covid19_countries = covid19[['iso_code', 'continent', 'location']].drop_duplicates() covid19_countries covid19_countries.loc[covid19_countries.iso_code.str.contains('OWID')] joined_countries = countries.join( covid19_countries.set_index('location'), on='name', how='left', lsuffix='_cts', rsuffix='_cvd') joined_countries joined_countries.isna().sum() joined_countries.loc[ joined_countries.code.isna()] cases = covid19[['iso_code', 'continent', 'location', 'date', 'total_cases', 'new_cases', 'new_cases_smoothed', 'total_deaths', 'new_deaths', 'new_deaths_smoothed', 'total_cases_per_million', 'new_cases_per_million', 'new_cases_smoothed_per_million', 'total_deaths_per_million', 'new_deaths_per_million', 'new_deaths_smoothed_per_million']] cases.isna().sum()Brazil and USA to China## USA TO CHN usa_chn = df_from_json_clean('https://atlas.media.mit.edu/sitc/export/2012.2017/usa/chn/show/') usa_chn = to_normalized_df(usa_chn) ## BRA TO CHN bra_chn = df_from_json_clean('https://atlas.media.mit.edu/sitc/export/2012.2017/bra/chn/show/') bra_chn = to_normalized_df(bra_chn) graph_usabra_chn = nx.DiGraph() # USA to CHN add_relations_to_graph(graph_usabra_chn, usa_chn, "USA", "CHN") # BRA to CHN add_relations_to_graph(graph_usabra_chn, bra_chn, "BRA", "CHN") graph_usabra_chn['USA'] nx.write_gexf(graph_usabra_chn, "usabra_chn.gexf")Brazil and China to USA## CHN TO USA chn_usa = df_from_json_clean('https://atlas.media.mit.edu/sitc/export/2012.2017/chn/usa/show/') chn_usa = to_normalized_df(chn_usa) ## BRA TO USA bra_usa = df_from_json_clean('https://atlas.media.mit.edu/sitc/export/2012.2017/bra/usa/show/') bra_usa = to_normalized_df(bra_usa) graph_chnbra_usa = nx.DiGraph() # CHN to EUA add_relations_to_graph(graph_chnbra_usa, chn_usa, "CHN", "USA") # BRA to EUA add_relations_to_graph(graph_chnbra_usa, bra_usa, "BRA", "USA") graph_chnbra_usa['CHN'] nx.write_gexf(graph_chnbra_usa, "chnbra_usa.gexf")Titanic * 머신러닝을 사용하지 않고 데이터를 보고 판다스로 생존여부 데이터를 직접 채워주기import pandas as pd import numpy as np데이터를 불러옵니다.train = pd.read_csv('data/train.csv') test = pd.read_csv('data/test.csv') print(train.shape) print(test.shape) train.head() test.head()describe()로 데이터를 요약해 봅니다.train.describe() test.describe()info를 통해 데이터를 요약해 봅니다.train.info() RangeIndex: 891 entries, 0 to 890 Data columns (total 12 columns): PassengerId 891 non-null int64 Survived 891 non-null int64 Pclass 891 non-null int64 Name 891 non-null object Sex 891 non-null object Age 714 non-null float64 SibSp 891 non-null int64 Parch 891 non-null int64 Ticket 891 non-null object Fare 891 non-null float64 Cabin 204 non-null object Embarked 889 non-null object dtypes: float64(2), int64(5), object(5) memory usage: 83.6+ KB성별에 따른 생존률 생존여부가 1과 0으로 되어 있기 때문에 평균을 구하면 생존률이 됩니다.train.groupby('Sex')[['Survived']].mean() train.pivot_table(values=['Survived'], index=['Sex'], aggfunc=np.mean) train.pivot_table(index=['Sex']) # 성별의 4분위수를 보는것은 의미는 없습니다. 다만, describe를 써서 다음과 같은 형태의 분석도 가능하다는 예시입니다. train.groupby('Sex')[['Survived']].describe() # 성별, 선실등급별 생존률 train.pivot_table('Survived', ['Sex', 'Pclass'], aggfunc=np.mean) train.groupby('Sex')[['Survived']].mean()엑셀에서 했던것과 동일하게 여성일 때 생존했다고 가정하고 Survived 데이터를 채워줍니다.test['Survived'] = (test.Sex == 'female') & (test.Age > 0) & (test.Embarked ) test.head() test.isnull().sum() test['Survived'].value_counts()pandas의 데이터프레임 복사에 대해 이해합니다.* 참고 [Understanding SettingwithCopyWarning in pandas](https://www.dataquest.io/blog/settingwithcopywarning/)submission = test[['PassengerId', 'Survived']].copy() submission.head()제출형태에 맞는 데이터 타입으로 변경해 줍니다.submission['Survived'] = submission['Survived'].astype(int) submission.head()csv 형태로 파일을 만듭니다.# %mkdir submissions submission.to_csv('submissions/submission.csv', index=False)탐색기 혹은 Finder에서 제출파일을 얻어 제출하기 위해 경로를 얻어옵니다.#%pwdStable Neural ODEs (*Stable Neural Flows*)First introduce in [ et al, 2020](https://arxiv.org/abs/2003.08063) *Stable Neural FLows* represent a stable variant of Neural ODEs. Their most simple realization has the general nural ODE form$$ \begin{aligned} &\bf{\dot z} = -\nabla_z\varepsilon(s, x, z, \theta)\\ &{\bf z}(0) = h_x(\bf x) \end{aligned}$$where $\varepsilon(x, z, \theta)$ is a neural network.They can be used both as general-purpose modules (e.g. classification, continuous normalizing flows) or, thanks to their unique structure, they can be employed to learn dynamical systems in a similar fashion to Lagrangian/Hamiltonian-inspired modelsimport sys sys.path.append('../') from torchdyn.models import * from torchdyn import * from torchdyn.datasets import * # Vanilla Version of stable neural flows class Stable(nn.Module): """Stable Neural Flow""" def __init__(self, net, depthvar=False, controlled=False): super().__init__() self.net, self.depthvar, self.controlled = net, depthvar, controlled def forward(self, x): with torch.set_grad_enabled(True): bs, n = x.shape[0], x.shape[1] // 2 x = x.requires_grad_(True) eps = self.net(x).sum() out = -torch.autograd.grad(eps, x, allow_unused=False, create_graph=True)[0] out = out[:,:-1] if self.depthvar else out out = out[:,:-2] if self.controlled else out return outLearninig Dynamical SystemsStable neural flows variants in a (autonomous) [port--Hamiltonian](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.366.3380&rep=rep1&type=pdf) form $$ \bf{\dot z} = F({\bf z})\nabla_z\varepsilon(z)$$generalizes the Hamiltonian paradigm to modeling multi-physics systems. They obey to the *power balance equation*$$ \frac{d \varepsilon}{d t} = (\nabla\varepsilon)^\top {\bf F(z)}\nabla\varepsilon$$Therefore, if one wants to learn e.g. some conservative process (of any nature), it is sufficient to introduce the inductive bias on $\bf F$ to be a skew-symmetric matrix such that $\dot \varepsilon = 0$. Here, we showcase the capibilities of stable neural flows (in port-Hamiltonian form) in such tasks.# Conservative variant of stable neural flow class ConservativeStable(nn.Module): """Conservative Stable Neural Flow""" def __init__(self, net, depthvar=False, controlled=False): super().__init__() self.net, self.depthvar, self.controlled = net, depthvar, controlled self.M = torch.nn.Parameter(torch.randn(2,2)).to(device) # impose the system matrix to be skew symmetric def Skew(self): return .5*(self.M - self.M.T) def forward(self, x): with torch.set_grad_enabled(True): bs, n = x.shape[0], x.shape[1] // 2 x = x.requires_grad_(True) eps = self.net(x).sum() out = -torch.autograd.grad(eps, x, allow_unused=False, create_graph=True)[0] #self.out = out out = out[:,:-1] if self.depthvar else out out = out[:,:-2] if self.controlled else out return out @ self.Skew()We aim at using a stable neural ODE learning the following conservative nonlinear dynamical system$$ \begin{bmatrix} \dot x\\ \dot v \end{bmatrix} = \begin{bmatrix} v(t)\\ \pi\left[\cos\left(\pi x(t) - \frac{\pi}{2}\right) - x(t)\right] \end{bmatrix} $$# We use this class to simulate through torchdyn the above nonlinear system class odefunc(nn.Module): def __init__(self, sys): super().__init__() self.sys = sys def forward(self, x): return self.sys(x) ## nonlinear conservative vector field def sys(x): dxdt = x[:,1] dydt = 1*np.pi*torch.cos(np.pi*x[:,0]-np.pi/2) - np.pi*x[:,0]# - .5*np.pi*x[:,1] return torch.cat([dxdt[:,None], dydt[:,None]], 1) # define the system model just like a neural ODE system = NeuralDE(odefunc(sys)) x0, t_span = torch.randn(1000,2), torch.linspace(0, 2, 100) # simulate the system traj = system.trajectory(x0, t_span) # plot the trajectories for i in range(len(x0)): plt.plot(traj[:,i,0], traj[:,i,1], color='blue', alpha=.1)Train the conservative stable neural flowimport torch.utils.data as data device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Data vf = odefunc(sys) X = 4*torch.rand(2048,2).to(device) y = vf(X) train = data.TensorDataset(X, y) trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False) import pytorch_lightning as pl import copy class Learner(pl.LightningModule): def __init__(self, model:nn.Module): super().__init__() self.model = model def forward(self, x): return self.model.defunc(0,x) def loss(self, y, y_hat): return ((y-y_hat)**2).sum(1).mean() def training_step(self, batch, batch_idx): x = torch.randn(2048,2).to(device) y = vf(x) y_hat = self.model.defunc(0,x) loss = self.loss(y_hat, y) logs = {'train_loss': loss} return {'loss': loss, 'log': logs} def configure_optimizers(self): return torch.optim.Adam(self.model.parameters(), lr=0.001) def train_dataloader(self): return trainloader # vector field parametrized by a NN h_dim = 128 f = ConservativeStable(nn.Sequential( nn.Linear(2,h_dim), nn.Tanh(), nn.Linear(h_dim,h_dim), nn.Tanh(), nn.Linear(h_dim,h_dim), nn.Tanh(), nn.Linear(h_dim, 1))) # neural ODE model = NeuralDE(f, order=1, solver='dopri5', sensitivity='adjoint').to(device) seq = nn.Sequential(model).to(device) learn = Learner(model) trainer = pl.Trainer(min_epochs=500, max_epochs=1000) trainer.fit(learn) # Sample random initial conditions X_t = torch.randn(1000, 2).to(device) # Evaluate the model's trajectories s_span = torch.linspace(0, 5, 100) traj = model.trajectory(X_t, s_span).detach().cpu() sys_traj = system.trajectory(X_t, s_span).detach().cpu() # Plot the trajectories with random ICs fig = plt.figure(figsize=(10,3)) ax = fig.add_subplot(121) ax2 = fig.add_subplot(122) for i in range(len(X_t)): ax.plot(traj[:,i,0], traj[:,i,1], color='blue', alpha=0.1); ax.set_xlim([-3,3]) ax.set_ylim([-3,3]) ax.set_xlabel(r"$q$") ax.set_ylabel(r"$p$") ax.set_title("Reconstructed") for i in range(len(X_t)): ax2.plot(sys_traj[:,i,0], sys_traj[:,i,1], color='blue', alpha=0.1); ax2.set_xlim([-3,3]) ax2.set_ylim([-3,3]) ax2.set_xlabel(r"$q$") ax2.set_ylabel(r"$p$") ax2.set_title("Nominal") # Compare the learned vector field to the nominal one import time fig = plt.figure(figsize=(10,3)) ax0 = fig.add_subplot(121) ax1 = fig.add_subplot(122) n_grid = 25 q = torch.linspace(-3,3,n_grid) Q, P = torch.meshgrid(q,q) H, U, V = torch.zeros(Q.shape), torch.zeros(Q.shape), torch.zeros(Q.shape) Ur, Vr = torch.zeros(Q.shape), torch.zeros(Q.shape) for i in range(n_grid): for j in range(n_grid): x = torch.cat([Q[i,j].reshape(1,1),P[i,j].reshape(1,1)],1).to(device) H[i,j] = model.defunc.m.net(x).detach().cpu() O = model.defunc(0,x).detach().cpu() U[i,j], V[i,j] = O[0,0], O[0,1] Ur[i,j], Vr[i,j] = vf(x)[0,0].detach().cpu(), vf(x)[0,1].detach().cpu() ax0.contourf(Q,P,H,100,cmap='inferno') ax0.streamplot(Q.T.numpy(),P.T.numpy(),U.T.numpy(),V.T.numpy(), color='white') ax1.streamplot(Q.T.numpy(),P.T.numpy(),Ur.T.numpy(),Vr.T.numpy(), color='black') ax0.set_xlim([Q.min(),Q.max()]) ; ax1.set_xlim([Q.min(),Q.max()]) ax0.set_ylim([P.min(),P.max()]) ; ax1.set_ylim([P.min(),P.max()]) ax0.set_xticks([]) ; ax1.set_xticks([]) ax0.set_yticks([]) ; ax1.set_yticks([]) ax0.set_title(f"Learnerd Energy & Vector Field") ; ax1.set_title("Nominal Vector Field")Inference with Discrete Latent VariablesThis tutorial describes Pyro's enumeration strategy for discrete latent variable models.This tutorial assumes the reader is already familiar with the [Tensor Shapes Tutorial](http://pyro.ai/examples/tensor_shapes.html). Summary - Pyro implements automatic enumeration over discrete latent variables.- This strategy can be used alone or inside SVI (via [TraceEnum_ELBO](http://docs.pyro.ai/en/dev/inference_algos.htmlpyro.infer.traceenum_elbo.TraceEnum_ELBO)), HMC, or NUTS.- The standalone [infer_discrete](http://docs.pyro.ai/en/dev/inference_algos.htmlpyro.infer.discrete.infer_discrete) can generate samples or MAP estimates.- Annotate a sample site `infer={"enumerate": "parallel"}` to trigger enumeration.- If a sample site determines downstream structure, instead use `{"enumerate": "sequential"}`.- Write your models to allow arbitrarily deep batching on the left, e.g. use broadcasting.- Inference cost is exponential in treewidth, so try to write models with narrow treewidth.- If you have trouble, ask for help on [forum.pyro.ai](https://forum.pyro.ai)! Table of contents- [Overview](Overview)- [Mechanics of enumeration](Mechanics-of-enumeration) - [Multiple latent variables](Multiple-latent-variables) - [Examining discrete latent states](Examining-discrete-latent-states) - [Indexing with enumerated variables](Indexing-with-enumerated-variables)- [Plates and enumeration](Plates-and-enumeration) - [Dependencies among plates](Dependencies-among-plates)- [Time series example](Time-series-example) - [How to enumerate more than 25 variables](How-to-enumerate-more-than-25-variables)import os import torch import pyro import pyro.distributions as dist from torch.distributions import constraints from pyro import poutine from pyro.infer import SVI, Trace_ELBO, TraceEnum_ELBO, config_enumerate, infer_discrete from pyro.infer.autoguide import AutoNormal from pyro.ops.indexing import Vindex smoke_test = ('CI' in os.environ) assert pyro.__version__.startswith('1.8.0') pyro.set_rng_seed(0)Overview Pyro's enumeration strategy ([Obermeyer et al. 2019](https://arxiv.org/abs/1902.03210)) encompasses popular algorithms including variable elimination, exact message passing, forward-filter-backward-sample, inside-out, Baum-Welch, and many other special-case algorithms. Aside from enumeration, Pyro implements a number of inference strategies including variational inference ([SVI](http://docs.pyro.ai/en/dev/inference_algos.html)) and monte carlo ([HMC](http://docs.pyro.ai/en/dev/mcmc.htmlpyro.infer.mcmc.HMC) and [NUTS](http://docs.pyro.ai/en/dev/mcmc.htmlpyro.infer.mcmc.NUTS)). Enumeration can be used either as a stand-alone strategy via [infer_discrete](http://docs.pyro.ai/en/dev/inference_algos.htmlpyro.infer.discrete.infer_discrete), or as a component of other strategies. Thus enumeration allows Pyro to marginalize out discrete latent variables in HMC and SVI models, and to use variational enumeration of discrete variables in SVI guides. Mechanics of enumeration The core idea of enumeration is to interpret discrete [pyro.sample](http://docs.pyro.ai/en/dev/primitives.htmlpyro.sample) statements as full enumeration rather than random sampling. Other inference algorithms can then sum out the enumerated values. For example a sample statement might return a tensor of scalar shape under the standard "sample" interpretation (we'll illustrate with trivial model and guide):def model(): z = pyro.sample("z", dist.Categorical(torch.ones(5))) print(f"model z = {z}") def guide(): z = pyro.sample("z", dist.Categorical(torch.ones(5))) print(f"guide z = {z}") elbo = Trace_ELBO() elbo.loss(model, guide);guide z = 4 model z = 4However under the enumeration interpretation, the same sample site will return a fully enumerated set of values, based on its distribution's [.enumerate_support()](https://pytorch.org/docs/stable/distributions.htmltorch.distributions.distribution.Distribution.enumerate_support) method.elbo = TraceEnum_ELBO(max_plate_nesting=0) elbo.loss(model, config_enumerate(guide, "parallel"));guide z = tensor([0, 1, 2, 3, 4]) model z = tensor([0, 1, 2, 3, 4])Note that we've used "parallel" enumeration to enumerate along a new tensor dimension. This is cheap and allows Pyro to parallelize computation, but requires downstream program structure to avoid branching on the value of `z`. To support dynamic program structure, you can instead use "sequential" enumeration, which runs the entire model,guide pair once per sample value, but requires running the model multiple times.elbo = TraceEnum_ELBO(max_plate_nesting=0) elbo.loss(model, config_enumerate(guide, "sequential"));guide z = 4 model z = 4 guide z = 3 model z = 3 guide z = 2 model z = 2 guide z = 1 model z = 1 guide z = 0 model z = 0Parallel enumeration is cheaper but more complex than sequential enumeration, so we'll focus the rest of this tutorial on the parallel variant. Note that both forms can be interleaved. Multiple latent variables We just saw that a single discrete sample site can be enumerated via nonstandard interpretation. A model with a single discrete latent variable is a mixture model. Models with multiple discrete latent variables can be more complex, including HMMs, CRFs, DBNs, and other structured models. In models with multiple discrete latent variables, Pyro enumerates each variable in a different tensor dimension (counting from the right; see [Tensor Shapes Tutorial](http://pyro.ai/examples/tensor_shapes.html)). This allows Pyro to determine the dependency graph among variables and then perform cheap exact inference using variable elimination algorithms.To understand enumeration dimension allocation, consider the following model, where here we collapse variables out of the model, rather than enumerate them in the guide.@config_enumerate def model(): p = pyro.param("p", torch.randn(3, 3).exp(), constraint=constraints.simplex) x = pyro.sample("x", dist.Categorical(p[0])) y = pyro.sample("y", dist.Categorical(p[x])) z = pyro.sample("z", dist.Categorical(p[y])) print(f" model x.shape = {x.shape}") print(f" model y.shape = {y.shape}") print(f" model z.shape = {z.shape}") return x, y, z def guide(): pass pyro.clear_param_store() print("Sampling:") model() print("Enumerated Inference:") elbo = TraceEnum_ELBO(max_plate_nesting=0) elbo.loss(model, guide);Sampling: model x.shape = torch.Size([]) model y.shape = torch.Size([]) model z.shape = torch.Size([]) Enumerated Inference: model x.shape = torch.Size([3]) model y.shape = torch.Size([3, 1]) model z.shape = torch.Size([3, 1, 1])Examining discrete latent states While enumeration in SVI allows fast learning of parameters like `p` above, it does not give access to predicted values of the discrete latent variables like `x,y,z` above. We can access these using a standalone [infer_discrete](http://docs.pyro.ai/en/dev/inference_algos.htmlpyro.infer.discrete.infer_discrete) handler. In this case the guide was trivial, so we can simply wrap the model in `infer_discrete`. We need to pass a `first_available_dim` argument to tell `infer_discrete` which dimensions are available for enumeration; this is related to the `max_plate_nesting` arg of `TraceEnum_ELBO` via```first_available_dim = -1 - max_plate_nesting```serving_model = infer_discrete(model, first_available_dim=-1) x, y, z = serving_model() # takes the same args as model(), here no args print(f"x = {x}") print(f"y = {y}") print(f"z = {z}")model x.shape = torch.Size([3]) model y.shape = torch.Size([3, 1]) model z.shape = torch.Size([3, 1, 1]) model x.shape = torch.Size([]) model y.shape = torch.Size([]) model z.shape = torch.Size([]) x = 2 y = 1 z = 0Notice that under the hood `infer_discrete` runs the model twice: first in forward-filter mode where sites are enumerated, then in replay-backward-sample model where sites are sampled. `infer_discrete` can also perform MAP inference by passing `temperature=0`. Note that while `infer_discrete` produces correct posterior samples, it does not currently produce correct logprobs, and should not be used in other gradient-based inference algorthms. Indexing with enumerated variablesIt can be tricky to use [advanced indexing](https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html) to select an element of a tensor using one or more enumerated variables. This is especially true in Pyro models where your model's indexing operations need to work in multiple interpretations: both sampling from the model (to generate data) and during enumerated inference. For example, suppose a plated random variable `z` depends on two different random variables:```pyp = pyro.param("p", torch.randn(5, 4, 3, 2).exp(), constraint=constraints.simplex)x = pyro.sample("x", dist.Categorical(torch.ones(4)))y = pyro.sample("y", dist.Categorical(torch.ones(3)))with pyro.plate("z_plate", 5): p_xy = p[..., x, y, :] Not compatible with enumeration! z = pyro.sample("z", dist.Categorical(p_xy)```Due to advanced indexing semantics, the expression `p[..., x, y, :]` will work correctly without enumeration, but is incorrect when `x` or `y` is enumerated. Pyro provides a simple way to index correctly, but first let's see how to correctly index using PyTorch's advanced indexing without Pyro:```py Compatible with enumeration, but not recommended:p_xy = p[torch.arange(5, device=p.device).reshape(5, 1), x.unsqueeze(-1), y.unsqueeze(-1), torch.arange(2, device=p.device)]```Pyro provides a helper [Vindex()[]](http://docs.pyro.ai/en/dev/ops.htmlpyro.ops.indexing.Vindex) to use enumeration-compatible advanced indexing semantics rather than standard PyTorch/NumPy semantics. (Note the `Vindex` name and semantics follow the Numpy Enhancement Proposal [NEP 21](https://numpy.org/neps/nep-0021-advanced-indexing.html)). `Vindex()[]` makes the `.__getitem__()` operator broadcast like other familiar operators `+`, `*` etc. Using `Vindex()[]` we can write the same expression as if `x` and `y` were numbers (i.e. not enumerated):```py Recommended syntax compatible with enumeration:p_xy = Vindex(p)[..., x, y, :]```Here is a complete example:@config_enumerate def model(): p = pyro.param("p", torch.randn(5, 4, 3, 2).exp(), constraint=constraints.simplex) x = pyro.sample("x", dist.Categorical(torch.ones(4))) y = pyro.sample("y", dist.Categorical(torch.ones(3))) with pyro.plate("z_plate", 5): p_xy = Vindex(p)[..., x, y, :] z = pyro.sample("z", dist.Categorical(p_xy)) print(f" p.shape = {p.shape}") print(f" x.shape = {x.shape}") print(f" y.shape = {y.shape}") print(f" p_xy.shape = {p_xy.shape}") print(f" z.shape = {z.shape}") return x, y, z def guide(): pass pyro.clear_param_store() print("Sampling:") model() print("Enumerated Inference:") elbo = TraceEnum_ELBO(max_plate_nesting=1) elbo.loss(model, guide);Sampling: p.shape = torch.Size([5, 4, 3, 2]) x.shape = torch.Size([]) y.shape = torch.Size([]) p_xy.shape = torch.Size([5, 2]) z.shape = torch.Size([5]) Enumerated Inference: p.shape = torch.Size([5, 4, 3, 2]) x.shape = torch.Size([4, 1]) y.shape = torch.Size([3, 1, 1]) p_xy.shape = torch.Size([3, 4, 5, 2]) z.shape = torch.Size([2, 1, 1, 1])When enumering within a plate (as described in the next section) ``Vindex`` can also be used together with capturing the plate index via ``with pyro.plate(...) as i`` to index into batch dimensions. Here's an example with nontrivial event dimensions due to the ``Dirichlet`` distribution.@config_enumerate def model(): data_plate = pyro.plate("data_plate", 6, dim=-1) feature_plate = pyro.plate("feature_plate", 5, dim=-2) component_plate = pyro.plate("component_plate", 4, dim=-1) with feature_plate: with component_plate: p = pyro.sample("p", dist.Dirichlet(torch.ones(3))) with data_plate: c = pyro.sample("c", dist.Categorical(torch.ones(4))) with feature_plate as vdx: # Capture plate index. pc = Vindex(p)[vdx[..., None], c, :] # Reshape it and use in Vindex. x = pyro.sample("x", dist.Categorical(pc), obs=torch.zeros(5, 6, dtype=torch.long)) print(f" p.shape = {p.shape}") print(f" c.shape = {c.shape}") print(f" vdx.shape = {vdx.shape}") print(f" pc.shape = {pc.shape}") print(f" x.shape = {x.shape}") def guide(): feature_plate = pyro.plate("feature_plate", 5, dim=-2) component_plate = pyro.plate("component_plate", 4, dim=-1) with feature_plate, component_plate: pyro.sample("p", dist.Dirichlet(torch.ones(3))) pyro.clear_param_store() print("Sampling:") model() print("Enumerated Inference:") elbo = TraceEnum_ELBO(max_plate_nesting=2) elbo.loss(model, guide);Sampling: p.shape = torch.Size([5, 4, 3]) c.shape = torch.Size([6]) vdx.shape = torch.Size([5]) pc.shape = torch.Size([5, 6, 3]) x.shape = torch.Size([5, 6]) Enumerated Inference: p.shape = torch.Size([5, 4, 3]) c.shape = torch.Size([4, 1, 1]) vdx.shape = torch.Size([5]) pc.shape = torch.Size([4, 5, 1, 3]) x.shape = torch.Size([5, 6])Plates and enumeration Pyro [plates](http://docs.pyro.ai/en/dev/primitives.htmlpyro.plate) express conditional independence among random variables. Pyro's enumeration strategy can take advantage of plates to reduce the high cost (exponential in the size of the plate) of enumerating a cartesian product down to a low cost (linear in the size of the plate) of enumerating conditionally independent random variables in lock-step. This is especially important for e.g. minibatched data.To illustrate, consider a gaussian mixture model with shared variance and different mean.@config_enumerate def model(data, num_components=3): print(f" Running model with {len(data)} data points") p = pyro.sample("p", dist.Dirichlet(0.5 * torch.ones(num_components))) scale = pyro.sample("scale", dist.LogNormal(0, num_components)) with pyro.plate("components", num_components): loc = pyro.sample("loc", dist.Normal(0, 10)) with pyro.plate("data", len(data)): x = pyro.sample("x", dist.Categorical(p)) print(" x.shape = {}".format(x.shape)) pyro.sample("obs", dist.Normal(loc[x], scale), obs=data) print(" dist.Normal(loc[x], scale).batch_shape = {}".format( dist.Normal(loc[x], scale).batch_shape)) guide = AutoNormal(poutine.block(model, hide=["x", "data"])) data = torch.randn(10) pyro.clear_param_store() print("Sampling:") model(data) print("Enumerated Inference:") elbo = TraceEnum_ELBO(max_plate_nesting=1) elbo.loss(model, guide, data);Sampling: Running model with 10 data points x.shape = torch.Size([10]) dist.Normal(loc[x], scale).batch_shape = torch.Size([10]) Enumerated Inference: Running model with 10 data points x.shape = torch.Size([10]) dist.Normal(loc[x], scale).batch_shape = torch.Size([10]) Running model with 10 data points x.shape = torch.Size([3, 1]) dist.Normal(loc[x], scale).batch_shape = torch.Size([3, 1])Observe that during inference the model is run twice, first by the `AutoNormal` to trace sample sites, and second by `elbo` to compute loss. In the first run, `x` has the standard interpretation of one sample per datum, hence shape `(10,)`. In the second run enumeration can use the same three values `(3,1)` for all data points, and relies on broadcasting for any dependent sample or observe sites that depend on data. For example, in the `pyro.sample("obs",...)` statement, the distribution has shape `(3,1)`, the data has shape`(10,)`, and the broadcasted log probability tensor has shape `(3,10)`.For a more in-depth treatment of enumeration in mixture models, see the [Gaussian Mixture Model Tutorial](http://pyro.ai/examples/gmm.html) and the [HMM Example](http://pyro.ai/examples/hmm.html). Dependencies among plates The computational savings of enumerating in vectorized plates comes with restrictions on the dependency structure of models (as described in ([Obermeyer et al. 2019](https://arxiv.org/abs/1902.03210))). These restrictions are in addition to the usual restrictions of conditional independence. The enumeration restrictions are checked by `TraceEnum_ELBO` and will result in an error if violated (however the usual conditional independence restriction cannot be generally verified by Pyro). For completeness we list all three restrictions: Restriction 1: conditional independenceVariables within a plate may not depend on each other (along the plate dimension). This applies to any variable, whether or not it is enumerated. This applies to both sequential plates and vectorized plates. For example the following model is invalid:```pydef invalid_model(): x = 0 for i in pyro.plate("invalid", 10): x = pyro.sample(f"x_{i}", dist.Normal(x, 1.))``` Restriction 2: no downstream couplingNo variable outside of a vectorized plate can depend on an enumerated variable inside of that plate. This would violate Pyro's exponential speedup assumption. For example the following model is invalid:```py@config_enumeratedef invalid_model(data): with pyro.plate("plate", 10): <--- invalid vectorized plate x = pyro.sample("x", dist.Bernoulli(0.5)) assert x.shape == (10,) pyro.sample("obs", dist.Normal(x.sum(), 1.), data)``` To work around this restriction, you can convert the vectorized plate to a sequential plate:```py@config_enumeratedef valid_model(data): x = [] for i in pyro.plate("plate", 10): <--- valid sequential plate x.append(pyro.sample(f"x_{i}", dist.Bernoulli(0.5))) assert len(x) == 10 pyro.sample("obs", dist.Normal(sum(x), 1.), data)``` Restriction 3: single path leaving each plateThe final restriction is subtle, but is required to enable Pyro's exponential speedup> For any enumerated variable `x`, the set of all enumerated variables on which `x` depends must be linearly orderable in their vectorized plate nesting.This requirement only applies when there are at least two plates and at least three variables in different plate contexts. The simplest counterexample is a Boltzmann machine```py@config_enumeratedef invalid_model(data): plate_1 = pyro.plate("plate_1", 10, dim=-1) vectorized plate_2 = pyro.plate("plate_2", 10, dim=-2) vectorized with plate_1: x = pyro.sample("y", dist.Bernoulli(0.5)) with plate_2: y = pyro.sample("x", dist.Bernoulli(0.5)) with plate_1, plate2: z = pyro.sample("z", dist.Bernoulli((1. + x + y) / 4.)) ...```Here we see that the variable `z` depends on variable `x` (which is in `plate_1` but not `plate_2`) and depends on variable `y` (which is in `plate_2` but not `plate_1`). This model is invalid because there is no way to linearly order `x` and `y` such that one's plate nesting is less than the other.To work around this restriction, you can convert one of the plates to a sequential plate:```py@config_enumeratedef valid_model(data): plate_1 = pyro.plate("plate_1", 10, dim=-1) vectorized plate_2 = pyro.plate("plate_2", 10) sequential with plate_1: x = pyro.sample("y", dist.Bernoulli(0.5)) for i in plate_2: y = pyro.sample(f"x_{i}", dist.Bernoulli(0.5)) with plate_1: z = pyro.sample(f"z_{i}", dist.Bernoulli((1. + x + y) / 4.)) ...```but beware that this increases the computational complexity, which may be exponential in the size of the sequential plate. Time series example Consider a discrete HMM with latent states $x_t$ and observations $y_t$. Suppose we want to learn the transition and emission probabilities.data_dim = 4 num_steps = 10 data = dist.Categorical(torch.ones(num_steps, data_dim)).sample() def hmm_model(data, data_dim, hidden_dim=10): print(f"Running for {len(data)} time steps") # Sample global matrices wrt a Jeffreys prior. with pyro.plate("hidden_state", hidden_dim): transition = pyro.sample("transition", dist.Dirichlet(0.5 * torch.ones(hidden_dim))) emission = pyro.sample("emission", dist.Dirichlet(0.5 * torch.ones(data_dim))) x = 0 # initial state for t, y in enumerate(data): x = pyro.sample(f"x_{t}", dist.Categorical(transition[x]), infer={"enumerate": "parallel"}) pyro.sample(f" y_{t}", dist.Categorical(emission[x]), obs=y) print(f" x_{t}.shape = {x.shape}")We can learn the global parameters using SVI with an autoguide.hmm_guide = AutoNormal(poutine.block(hmm_model, expose=["transition", "emission"])) pyro.clear_param_store() elbo = TraceEnum_ELBO(max_plate_nesting=1) elbo.loss(hmm_model, hmm_guide, data, data_dim=data_dim);Running for 10 time steps x_0.shape = torch.Size([]) x_1.shape = torch.Size([]) x_2.shape = torch.Size([]) x_3.shape = torch.Size([]) x_4.shape = torch.Size([]) x_5.shape = torch.Size([]) x_6.shape = torch.Size([]) x_7.shape = torch.Size([]) x_8.shape = torch.Size([]) x_9.shape = torch.Size([]) Running for 10 time steps x_0.shape = torch.Size([10, 1]) x_1.shape = torch.Size([10, 1, 1]) x_2.shape = torch.Size([10, 1, 1, 1]) x_3.shape = torch.Size([10, 1, 1, 1, 1]) x_4.shape = torch.Size([10, 1, 1, 1, 1, 1]) x_5.shape = torch.Size([10, 1, 1, 1, 1, 1, 1]) x_6.shape = torch.Size([10, 1, 1, 1, 1, 1, 1, 1]) x_7.shape = torch.Size([10, 1, 1, 1, 1, 1, 1, 1, 1]) x_8.shape = torch.Size([10, 1, 1, 1, 1, 1, 1, 1, 1, 1]) x_9.shape = torch.Size([10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])Notice that the model was run twice here: first it was run without enumeration by `AutoNormal`, so that the autoguide can record all sample sites; then second it is run by `TraceEnum_ELBO` with enumeration enabled. We see in the first run that samples have the standard interpretation, whereas in the second run samples have the enumeration interpretation.For more complex examples, including minibatching and multiple plates, see the [HMM tutorial](https://github.com/pyro-ppl/pyro/blob/dev/examples/hmm.py). How to enumerate more than 25 variables PyTorch tensors have a dimension limit of 25 in CUDA and 64 in CPU. By default Pyro enumerates each sample site in a new dimension. If you need more sample sites, you can annotate your model with [pyro.markov](http://docs.pyro.ai/en/dev/poutine.htmlpyro.poutine.markov) to tell Pyro when it is safe to recycle tensor dimensions. Let's see how that works with the HMM model from above. The only change we need is to annotate the for loop with `pyro.markov`, informing Pyro that the variables in each step of the loop depend only on variables outside of the loop and variables at this step and the previous step of the loop:```diff- for t, y in enumerate(data):+ for t, y in pyro.markov(enumerate(data)):```def hmm_model(data, data_dim, hidden_dim=10): with pyro.plate("hidden_state", hidden_dim): transition = pyro.sample("transition", dist.Dirichlet(0.5 * torch.ones(hidden_dim))) emission = pyro.sample("emission", dist.Dirichlet(0.5 * torch.ones(data_dim))) x = 0 # initial state for t, y in pyro.markov(enumerate(data)): x = pyro.sample(f"x_{t}", dist.Categorical(transition[x]), infer={"enumerate": "parallel"}) pyro.sample(f"y_{t}", dist.Categorical(emission[x]), obs=y) print(f"x_{t}.shape = {x.shape}") # We'll reuse the same guide and elbo. elbo.loss(hmm_model, hmm_guide, data, data_dim=data_dim);x_0.shape = torch.Size([10, 1]) x_1.shape = torch.Size([10, 1, 1]) x_2.shape = torch.Size([10, 1]) x_3.shape = torch.Size([10, 1, 1]) x_4.shape = torch.Size([10, 1]) x_5.shape = torch.Size([10, 1, 1]) x_6.shape = torch.Size([10, 1]) x_7.shape = torch.Size([10, 1, 1]) x_8.shape = torch.Size([10, 1]) x_9.shape = torch.Size([10, 1, 1])Track comparison for global wave modelimport pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib_inline.backend_inline import set_matplotlib_formats set_matplotlib_formats('png') %matplotlib inline from mikeio.eum import EUMType, ItemInfo from fmskill import ModelResult, TrackObservation, ConnectorBig dataRun the download.ipynb firstfn = '../data/SW_gwm_3a_extracted_2018.dfs0' mr = ModelResult(fn, name='GWM') fn = '../data/altimetry_3a_2018_filter1.dfs0' o1 = TrackObservation(fn, item=2, name='3a') con = Connector(o1, mr[2]) cc = con.extract() cc['3a'].skill(end='2018-1-15') cc['3a'].skill()Spatial skillSpatial skill with 1 deg bins and default bin edges.ss = cc.spatial_skill(metrics=['bias'], bins=(np.arange(-180,180,1), np.arange(-90,90,1)), n_min=20)Add attrs and plotss.ds['bias'].attrs = dict(long_name="Bias of significant wave height, Hm0",units="m") ss.ds['n'].attrs = dict(long_name="N of significant wave height",units="-") fig, axes = plt.subplots(ncols=1, nrows=2, figsize = (8, 10)) ss.plot('n', ax=axes[0]) ss.plot('bias', ax=axes[1]);Multiple bins - spatial skill for wave height Use all_df to obtain and df argument to pass customized data back to comparer.all_df = cc.all_df.copy() mean_val = all_df[['mod_val','obs_val']].mean(axis=1) all_df['val_cat'] = pd.cut(mean_val,[0,2,5,np.inf],labels=["Hm0[m]=[0, 2)","Hm0[m]=[2, 5)","Hm0[m]=[5, inf)"]) all_df.head() ss = cc.spatial_skill(df=all_df, by=["val_cat"], metrics=["bias"], bins=(np.arange(-180,180,5), np.arange(-90,90,5)), n_min=20) ss.ds['bias'].attrs = dict(long_name="Bias of significant wave height, Hm0", units="m") ss.ds['n'].attrs = dict(long_name="N of significant wave height", units="-") ss.ds['val_cat'].attrs = dict(long_name="Range of sign. wave height, Hm0", units="m") ss.plot('n', figsize=(12,4)); ss.plot('bias', figsize=(12,4));import numpy as np import matplotlib.pyplot as plt import mathTaking our fake astronomy data to the next level---------------Similar to what we did yesterday, we're going to continue working with fake astronomy data!def square_dip(t,array,mass,depth,period,seed=88): np.random.seed(seed) transit_expected = array.copy() start = round(np.random.uniform(1,20)) stop = start + mass while stop < len(t): transit_expected[start:stop] = depth start += period stop = start + mass return transit_expected gConstant = 6.67408 * 10**-11 def find_orbits(gConstant, mass, period): numerator = gConstant*mass*t**2 denominator = 4*math.pi**2 divide = numerator / denominator radius = np.cbrt(divide) return radius def find_velocity(gConstant,mass,radius): numerator = gConstant * mass velocity = numerator / radius return math.sqrt(velocity) def add_scatter(t,array,limit): # This is just a lazy (or SMART) way to add the noise :) return array + np.random.normal(0, limit, len(t)) t = np.arange(0,200) # the array of time values # making some fake error bars errors = 0.01 + np.random.uniform(0,0.01,len(t)) # this is where you define your planets! star = np.ones(len(t)) # Array of 1's that mimics the light from our star. planet1 = square_dip(t, star, 5,0.8,22) # Adding the first planet #adding in noise to make it look like real data! measurements = add_scatter(t, planet1, 0.015) # use the array called "planet1" here because we only have one planet so far plt.figure(figsize=(11,5.5)) # we want to see the planets individually plt.plot(t, planet1, label='Planet 1') ## plot the line fit for Planet 1 plt.errorbar(t, measurements, errors, fmt='k.') ## plot the measurements and errors for Planet 1 plt.legend() plt.xlabel('time [days]', size=14) plt.ylabel('relative brightness [%]', size=14) plt.show()So far, so good. Now we want to add a second planet. We are going to take the array called planet1 and send it through our "square-dip" function again. Let's visualize what happens and then go through it.## build a data set of measurements, which includes multiple planet observations at once star = np.ones(len(t)) # array of 1's that mimics the light from our star. planet1 = square_dip(t, star, 5, 0.8, 22, seed=1) # add the first planet all_planets = square_dip(t, planet1 , 8, 0.7, 35, seed=2) # add the second planet to the array that already has the first planet in it. # make sure to use a different random seed than in planet1 # remember that when we make our lines that trace each individual planet, we use the "star" array because # we want to know what the transit for JUST that planet looks like. If we use the arrays with the planets included # we will end up getting a line that traces all the planets at the same time. ## find the individual predicted light curves for each planet planet1 = square_dip(t, star, 5, 0.8, 22, seed=1) # a line that traces just planet1 planet2 = square_dip(t, star, 8, 0.7, 35, seed=2) # a line that traces just planet2 plt.figure(figsize=(11,5.5)) # add noise/scatter to our measurements so they look like real data measurements = add_scatter(t, all_planets, 0.015) plt.errorbar(t, measurements, errors, fmt='k.') ## plot the light curves on top of the data plt.plot(t, planet1, label='Planet 1', color='g') plt.plot(t, planet2, label="Planet 2", color='r') plt.legend() plt.xlabel('time [days]', size=14) plt.ylabel('relative brightness [%]', size=14) plt.savefig('look_at_my_amazing_planets.png') plt.show()Notice how we now have two planets in the "planets" array but each line traces the individual planets in the solar system we have build. Now we are going to make one with 4 planets, and remember that we order the planets from largest 'depth' to smallest 'depth'.## build a data set of measurements, which includes multiple planet observations at once star = np.ones(len(t)) # array of 1's that mimics the light from our star. planet1 = square_dip(t, star, 5, 0.8, 22, seed=1) # add the first planet planet2 = square_dip(t, planet1 , 8, 0.7, 35, seed=2) # add the second planet to the array with planet 1 in it. planet3 = square_dip(t, planet2, 4, 0.6, 58, seed=3) # add the third planet to the array with planet1 and planet2 in it. all_planets = square_dip(t, planet3, 12, 0.44, 49, seed=4) # add the fourth planet tot he array with planet1, planet2 and planet3 in it. ## find the individual predicted light curves for each planet planet1 = square_dip(t, star, 5, 0.8, 22, seed=1) planetO1 = find_orbits(gConstant, 5, 22) planetV1 = find_velocity(gConstant, 5, radius) # a line tracing only planet1 planet2 = square_dip(t, star, 8, 0.7, 35, seed=2) # a line tracing only planet2 planet3 = square_dip(t, star, 4, 0.6, 58, seed=3) # a line tracing only planet3 planet4 = square_dip(t, star, 12, 0.44, 49, seed=4) # a line tracing only planet4 plt.figure(figsize=(11,5.5)) plt.plot(t, planet1, label='Planet 1', color='g') plt.plot(t, planet2, label="Planet 2", color='r') plt.plot(t, planet3, label='Planet 3', color='b') plt.plot(t, planet4, label='Planet 4', color='darkorange') # adding in noise to make it look like real data! measurements = add_scatter(t, all_planets, 0.015) # use the array called all_planets to add some pretend noise/scatter # all_planets represents our measured data, which includes all four planets plt.errorbar(t, measurements, errors, fmt='k.') plt.legend() plt.xlabel('time [days]') plt.ylabel('relative brightness [%]') #plt.savefig('look_at_my_amazing_planets.png') plt.show()What do the orbits in your solar system look like? Another fun way you can describe your solar system with physics is by describing the orbital properties of the planets around your star. In general, if we know the orbital period of our planet, we can solve for the radius of orbit (assuming circular orbits): $T^2 = \frac{4 \pi^2}{GM}r^3$ where $T$ is the orbital period, $G$ is the gravitational constant, and $M$ is the mass of your star. This is part of Kepler's 3rd law of planetary motion! Write a python function called "find_orbits" that calculates the orbital radii $r$ for the planets in your solar system above. Assume that your star has the same mass as the sun (1.99$\times 10^{30}$ kg).import math def find_orbits(gravity, mass, period): numerator = 4 * math.pi**2 denominator = gravity * mass divide = numerator / denominator product = divide * **3 return math.sqrt(product)Where are your planets located around your "sun" compared to the planets in our solar system?find_orbits(4,10,5)Once you know the orbital radii, you can also solve for the orbital velocities of your planets:$v^2 = \frac{GM}{r}$ Write another python function that finds the velocities of your planets.def find_velocity(gravity,mass,radius): numerator = gravity * mass velocity = numerator / radius return math.sqrt(velocity)Earth Engine Object DetectionIn this notebook, we'll develop a model to detect cars in 15cm aerial imagery. Part 1: Creating a ModelLets start by importing TensorFlow and the Colab auth library for communication with Google Cloud Storage.import tensorflow as tf from google.colab import auth auth.authenticate_user()Now we'll need to generate training / evaluation data. We'll start by hand annotating the outlines of cars in a roughly 1km^2 region of Mountain View, CA. [We can do this using the geometry editor](https://code.earthengine.google.com/1b573c8d1b3b4bcb9e972eb8994abc4f) in the Earth Engine Code Editor. We can use this annotated data to create a vector mask of cars/non-cars.With the car mask, [we'll generate training and evaluation FeatureCollections and export them to cloud.](https://code.earthengine.google.com/c84a1d9e610ec91044c82766e53fe48a) Lets create a dataset reader in TensorFlow for training/eval data.# Our input function will return 4 features, each a 'side' x 'side' tensor # representing the area centered on a pixel with the label 'class' def input_fn(fileNames, numEpochs=None, shuffle=True, batchSize=100, side=61): ds = tf.data.TFRecordDataset(fileNames, compression_type='GZIP') feature_columns = { 'R': tf.FixedLenFeature([side, side], dtype=tf.float32), 'G': tf.FixedLenFeature([side, side], dtype=tf.float32), 'B': tf.FixedLenFeature([side, side], dtype=tf.float32), 'L': tf.FixedLenFeature([side, side], dtype=tf.float32), 'class': tf.FixedLenFeature([1, 1], dtype=tf.float32) } def parse(example_proto): parsed_features = tf.parse_single_example(example_proto, feature_columns) # Separate the class labels from the training features labels = parsed_features.pop('class') # For faster training / stability, we'll bring our [0, 255] RGBL values into # the range [0, 1] parsed_features = { k:tf.divide(v, 255.0) for (k,v) in parsed_features.items()} return parsed_features, labels ds = ds.map(parse, num_parallel_calls=5) if shuffle: # We choose 30 since, with a batch size of 100, we'll keep 3000 (the size # of the training data) examples in memory for the shuffle ds = ds.shuffle(buffer_size=batchSize * 30) ds = ds.batch(batchSize).repeat(numEpochs) iterator = ds.make_one_shot_iterator() features, labels = iterator.get_next() return features, labelsIts time to create a model. We'll build a [Fully Convolutional NN](https://people.eecs.berkeley.edu/~jonlong/long_shelhamer_fcn.pdf) so that we can train our model on 61x61 patches, and later apply it to much larger areas for prediction. Note, using a FCNN allows us to make predictions on image data of any dimensions.# A helper function for defining a convolutional layer. We use batch # normalization to speed up training given our limited training data, therefore # we can't use vanilla conv2d(activation='relu', ...) def conv_layer(inputs, filters, kernel_size, training): # Note that the default padding scheme is VALID. conv = tf.layers.conv2d( inputs=inputs, filters=filters, kernel_size=kernel_size, data_format='channels_last') norm = tf.layers.batch_normalization(inputs=conv, training=training) return tf.nn.relu(norm) # Our model will combine convolutions of the full patch on the luminance # channel with convolutions of the RGB channels on a smaller region of the # patch. The model will finally scale the predicted 2D region to match the size # of the input features minus the kernel contributions to the edges. def fcnn(feat, mode): training = mode == tf.estimator.ModeKeys.TRAIN # interleave the red, green, and blue channels so that a batch is along axis=0 rgb = tf.stack([feat['R'], feat['G'], feat['B']], axis=1) # Strip a 15 pixel border from the rgb channels. We'll only use the larger # area to provide context to the foveated rgb region. rgb = rgb[:, :, 15:-15, 15:-15] # Convert from NCHW to NHWC rgb = tf.transpose(rgb, [0, 2, 3, 1]) # Add a dimension for 'channel' to make this tensor 4D l = tf.expand_dims(feat['L'], 3) # We'll get the size of the original source pixels from l minus the "kernel" # surrounding each pixel. We choose to build the meat of our CNN around this # reduced region to reduce the model size, training time, etc... original_dims=tf.add(tf.shape(l)[1:3], -60) # Perform 5 convolutions in a row, reducing the information in the luminance # channel to a 25x25 region per-pixel. for i in range(6): l = conv_layer(inputs=l, filters=3 + i, kernel_size=7, training=training) rgb = conv_layer(inputs=rgb, filters=8, kernel_size=7, training=training) # Combine rgb and l to form a 4D tensor with 16 filters rgbl = tf.concat([rgb, l], 3) comb1 = tf.layers.max_pooling2d( inputs=rgbl, pool_size=3, strides=2, data_format='channels_last') comb2 = conv_layer(inputs=comb1, filters=32, kernel_size=5, training=training) comb2 = tf.layers.max_pooling2d( inputs=comb2, pool_size=3, strides=2, data_format='channels_last') comb3 = conv_layer(inputs=comb2, filters=64, kernel_size=3, training=training) # We stay convolutional by substituting a conv op for a dense layer, and # keeping the kernel size 1x1. dense = conv_layer( inputs=comb3, filters=64, kernel_size=1, training=training) dropout = tf.layers.dropout( inputs=dense, rate=0.4, training=training) # The final layer is just linear activiation; we use the same trick we did # with the previous conv layer to produce a single classification. dense_final = tf.layers.conv2d( inputs=dropout, filters=1, kernel_size=1, data_format='channels_last') # Squash all predictions into the range (0, 1) probs = tf.multiply(tf.add(tf.tanh(dense_final), 1.0), 0.5) # We won't bother adding the resize op to the graph unless we're running # predictions. # # In a more mature model, we might use a "deconvolution" here by 4x followed # by a slight resize to recover a finer amount of detail. Training this way # would require larger (in area) training samples so we could give the # transposed convolution op something to learn from. if mode == tf.estimator.ModeKeys.PREDICT: probs = tf.image.resize_images( images=probs, size=original_dims) # Remove the un-needed channel dimension of 1 probs = tf.squeeze(probs) # When training/evaluating, 1D tensor of shape [N]. When predicting, 3D tensor # of shape [N, H, W] return probsTo facillitate easier training/evaluation/prediction, we'll use TensorFlow's estimator API. We're required todefine a function that the estimator can configure with a mode that will return [estimator specs](https://www.tensorflow.org/api_docs/python/tf/estimator/EstimatorSpec) describing how our modelshould behave depending on the mode.def model_fn(features, labels, mode): # Whatever mode we're in, we'll always want to generate predictions from the # incoming features. probs = fcnn(features, mode) predicted_class = tf.cast(tf.greater(probs, 0.5), tf.float32) if mode == tf.estimator.ModeKeys.PREDICT: # We reshape the predictions into 1D arrays to make writing prediction data # into TFRecord files easier # # We'll need these prediction labels later when we build TFRecord files return tf.estimator.EstimatorSpec(mode=mode, predictions = { 'class_id': tf.reshape(predicted_class, [-1]), 'probability': tf.reshape(probs, [-1]) }) labels = tf.squeeze(labels) # Since we're performing a binary classification, we can use a simple loss # function. loss = tf.losses.mean_squared_error(labels, probs) if mode == tf.estimator.ModeKeys.TRAIN: # Adaptive moment estimation has been shown to converge faster than plain # old gradient descent in CNNs. optimizer = tf.train.AdamOptimizer(learning_rate=0.0001) # We need the weight updates to perform the minimization step as batch # normalization depends on it with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): train_op = optimizer.minimize( loss=loss, global_step=tf.train.get_global_step()) logging_hook = tf.train.LoggingTensorHook( {"batch_predictions" : predicted_class, "batch_labels": labels}, every_n_iter=1000) return tf.estimator.EstimatorSpec( mode=mode, loss=loss, train_op=train_op, training_hooks=[logging_hook]) eval_metric_ops = {"accuracy": tf.metrics.accuracy( labels=labels, predictions=predicted_class) } return tf.estimator.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)Now lets create the model object. Don't forget to replace the paths below with the paths to your own GCS bucket / training / evaluation inputs!tf.logging.set_verbosity(tf.logging.INFO) auto_classifier = tf.estimator.Estimator( model_fn=model_fn, model_dir="gs://cfb-batch-export/eeus18/autoclassifier")**And train it!**# If we want to clear the checkpointed model, we can delete the mode directory # to start fresh # !rm -rf "/autoclassifier" train_file = 'gs://cfb-batch-export/cars_training.tfrecord.gz' auto_classifier.train( input_fn=lambda: input_fn(fileNames=[train_file]), steps=50000)And evaluate it! Estimator is awesome!eval_file = 'gs://cfb-batch-export/cars_training.tfrecord.gz' acc = auto_classifier.evaluate(input_fn=lambda: input_fn( fileNames=[eval_file], numEpochs=1, batchSize=100, shuffle=False))['accuracy']Part 2: Creating / Visualizing PredictionsWe'll now need to [export an area on which to perform inference](https://code.earthengine.google.com/3ece5d0b4b2e0f0d4371ba3f5eb5940d). Note we get a "-mixer.json" with our export which we'll leave alone for now. Be sure to export this image at 15cm/px. We'll define a similar dataset input function as our training / evaluation input function, except we don't carryany class labels in, we'll instead predict these.# The default value of side is now 316, as our intent is to create predictions # for 256x256 image patches with a 30 pixel wide border. def infer_input_fn(fileNames, side=316, batchSize=100): ds = tf.data.TFRecordDataset(fileNames, compression_type='GZIP') feature_columns = { 'R': tf.FixedLenFeature([side,side], dtype=tf.float32), 'G': tf.FixedLenFeature([side,side], dtype=tf.float32), 'B': tf.FixedLenFeature([side,side], dtype=tf.float32), 'L': tf.FixedLenFeature([side,side], dtype=tf.float32), } def parse(example_proto): parsed_features = tf.parse_single_example(example_proto, feature_columns) parsed_features = { k:tf.divide(v, 255.0) for (k,v) in parsed_features.items()} return parsed_features ds = ds.map(parse, num_parallel_calls=5).batch(batchSize) iterator = ds.make_one_shot_iterator() features = iterator.get_next() return featuresLets define a function to take a dictionary of a single patch's predictions and write them to an example. Bywriting examples this way, we'll wind up with an image with 2 bands: 'class_id' and 'probability'def make_example(pred_dict): class_id = pred_dict['class_id'] probability = pred_dict['probability'] return tf.train.Example( features=tf.train.Features( feature={ 'class_id': tf.train.Feature( float_list=tf.train.FloatList( value=class_id)), 'probability': tf.train.Feature( float_list=tf.train.FloatList( value=probability)) } ) )Don't forget to replace the paths below with the paths to your prediction inputs!predict_files = ['gs://cfb-batch-export/cars_inference2-00000.tfrecord.gz', 'gs://cfb-batch-export/cars_inference2-00001.tfrecord.gz', 'gs://cfb-batch-export/cars_inference2-00002.tfrecord.gz', 'gs://cfb-batch-export/cars_inference2-00003.tfrecord.gz', 'gs://cfb-batch-export/cars_inference2-00004.tfrecord.gz']We're ready to make our predictions. We'll move our predictions into TFRecord files while following a few constraintsso that we can re-ingest these files into Earth Engine. Firstly, we must provide as many predictions as therewere examples in each patch. As each incoming patch has (256+60) x (256+60) examples (pixels), we'llneed our model to produce 256 x 256 labels. Note we ignore the 30 pixel border for ingesting our predictions as this is only context for classifications of the pixels *(we specified 256, 256 as our patch dimensions in Earth Engine, and a kernel of 61, 61)*.To avoid too many large files, we'll keep each file to a minimum of 50 patches of inference labels.predictions = auto_classifier.predict(input_fn=lambda: infer_input_fn( fileNames=predict_files, batchSize=1, side=316), yield_single_examples=False) MAX_RECORDS_PER_FILE = 50 output_path = 'gs://cfb-batch-export/labels/cars_labels-{:05}.tfrecord' # Create the records we'll ingest into EE file_number = 0 still_writing = True total_patches = 0 while still_writing: file_path = output_path.format(file_number) writer = tf.python_io.TFRecordWriter(file_path) print "Writing file: {}".format(file_path) try: written_records = 0 while True: pred_dict = predictions.next() writer.write(make_example(pred_dict).SerializeToString()) written_records += 1 total_patches += 1 if written_records % 5 == 0: print " Writing patch: {}".format(written_records) if written_records == MAX_RECORDS_PER_FILE: break except: # Stop writing for any exception. Note that reaching the end of the prediction # dataset throws an exception. still_writing=False finally: file_number += 1 writer.close() print('Wrote: {} patches.').format(total_patches)With our TFRecords in hand, we're ready to ingest them into Earth Engine. Lets get authorized!!pip install earthengine-api !earthengine authenticate --quietBe sure to replace *YOUR AUTH HERE* with your auth code!!earthengine authenticate --frontend=http://localhost:8080 --authorization-code=4/UADoP7aQAIrx8pShKZIlhIQXlHpUsBPpTPRJDbX-YZyf9lpJ18ky8yAWe'll now start the ingestion. If you intend on running this yourself, you'll have to replace `cfb-batch-export` with your cloud bucket and provide your own asset id. We'll also need to pass the mixer file we ignored earlier so Earth Engine knows where our labeled patches came from.!earthengine upload image --asset_id=users/cfb/badge gs://cfb-batch-export/test_help/tile3_23-00000.tfrecord gs://cfb-batch-export/test_help/tile3_23-00001.tfrecord gs://cfb-batch-export/test_help/tile3_23-00002.tfrecord gs://cfb-batch-export/test_help/tile3_23-00003.tfrecord gs://cfb-batch-export/test_help/tile3_23-mixer.jsonCopyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. GenSen with PytorchIn this tutorial, you will train a GenSen model for the sentence similarity task. We use the [SNLI](https://nlp.stanford.edu/projects/snli/) dataset in this example. For a more detailed walkthrough about data processing jump to [SNLI Data Prep](../01-prep-data/snli.ipynb). A quickstart version of this notebook can be found [here](../00-quick-start/) Notes:The model training part of this notebook can only run on a GPU machine. The running time shown in the notebook is on a Standard_NC6 Azure VM with 1 NVIDIA Tesla K80 GPU and 12 GB GPU memory. See the [README](README.md) for more details of the running time. Overview What is GenSen?GenSen is a technique to learn general purpose, fixed-length representations of sentences via multi-task training. GenSen model combines the benefits of diverse sentence-representation learning objectives into a single multi-task framework. "This is the first large-scale reusable sentence representation model obtained by combining a set of training objectives with the level of diversity explored here, i.e. multi-lingual NMT, natural language inference, constituency parsing and skip-thought vectors." [\[1\]](References) These representations are useful for transfer and low-resource learning. GenSen is trained on several data sources with multiple training objectives on over 100 milion sentences.The GenSen model is most similar to that of Luong et al. (2015) [\[4\]](References), who train a many-to-many **sequence-to-sequence** model on a diverse set of weakly related tasks that includes machine translation, constituency parsing, image captioning, sequence autoencoding, and intra-sentence skip-thoughts. However, there are two key differences. "First, like McCann et al. (2017) [\[5\]](References), their use of an attention mechanism prevents learning a fixed-length vector representation for a sentence. Second, their work aims for improvements on the same tasks on which the model is trained, as opposed to learning re-usable sentence representations that transfer elsewhere." [\[1\]](References) Why GenSen?GenSen model performs the state-of-the-art results on multiple datasets, such as MRPC, SICK-R, SICK-E and STS, for sentence similarity. The reported results are as follows compared with other models [\[3\]](References):| Model | MRPC | SICK-R | SICK-E | STS || --- | --- | --- | --- | --- || GenSen (Subramanian et al., 2018) | 78.6/84.4 | 0.888 | 87.8 | 78.9/78.6 || [InferSent](https://arxiv.org/abs/1705.02364) (Conneau et al., 2017) | 76.2/83.1 | 0.884 | 86.3 | 75.8/75.5 || [TF-KLD](https://www.aclweb.org/anthology/D13-1090) (Ji and Eisenstein, 2013) | 80.4/85.9 | - | - | - | OutlineThis notebook is organized as follows:1. Data preparation and inspection.2. Model training and prediction.For a more detailed deep dive of the Gensen model checkout the [Gensen Deep Dive Notebook](gensen_aml_deep_dive.ipynb) 0. Global Settingsimport sys sys.path.append("../..") import os import papermill as pm import scrapbook as sb from utils_nlp.dataset.preprocess import to_lowercase, to_nltk_tokens from utils_nlp.dataset import snli, preprocess from utils_nlp.models.pretrained_embeddings.glove import download_and_extract from utils_nlp.dataset import Split from examples.sentence_similarity.gensen_wrapper import GenSenClassifier print("System version: {}".format(sys.version)) max_epoch = None config_filepath = 'gensen_config.json' base_data_path = '../../data' nrows = None1. Data Preparation and inspection The [SNLI](https://nlp.stanford.edu/projects/snli/) corpus (version 1.0) is a collection of 570k human-written English sentence pairs manually labeled for balanced classification with the labels entailment, contradiction, and neutral, supporting the task of natural language inference (NLI), also known as recognizing textual entailment (RTE). 1.1 Load the datasetWe provide a function load_pandas_df which does the following* Downloads the SNLI zipfile at the specified directory location* Extracts the file based on the specified split* Loads the split as a pandas dataframe The zipfile contains the following files: * snli_1.0_dev.txt * snli_1.0_train.txt * snli_1.0_test.tx * snli_1.0_dev.jsonl * snli_1.0_train.jsonl * snli_1.0_test.jsonl The loader defaults to reading from the .txt file; however, you can change this to .jsonl by setting the optional file_type parameter when calling the function.train = snli.load_pandas_df(base_data_path, file_split=Split.TRAIN, nrows=nrows) dev = snli.load_pandas_df(base_data_path, file_split=Split.DEV, nrows=nrows) test = snli.load_pandas_df(base_data_path, file_split=Split.TEST, nrows=nrows) train.head()1.2 TokenizeWe have loaded the dataset into pandas.DataFrame, we now convert sentences to tokens. We also clean the data before tokenizing. This includes dropping unneccessary columns and renaming the relevant columns as score, sentence_1, and sentence_2.def clean_and_tokenize(df): df = snli.clean_cols(df) df = snli.clean_rows(df) df = preprocess.to_lowercase(df) df = preprocess.to_nltk_tokens(df) return df train = clean_and_tokenize(train) dev = clean_and_tokenize(dev) test = clean_and_tokenize(test)Once we have the clean pandas dataframes, we do lowercase standardization and tokenization. We use the [NLTK] (https://www.nltk.org/) library for tokenization.dev.head()2. Model application, performance and analysis of the resultsThe model has been implemented as a GenSen class with the specifics hidden inside the fit() method, so that no explicit call is needed. The algorithm operates in three different steps:** Model initialization ** : This is where we tell our class how to train the model. The main parameters to specify are the number of1. config file which contains information about the number of training epochs, the minibatch size etc.2. cache_dir which is the folder where all the data will be saved.3. learning rate for the model4. path to the pretrained embedding vectors.** Model fit ** : This is where we train the model on the data. The method takes two arguments: the training, dev and test set pandas dataframes. Note that the model is trained only on the training set, the test set is used to display the test set accuracy of the trained model, that in turn is an estimation of the generazation capabilities of the algorithm. It is generally useful to look at these quantities to have a first idea of the optimization behaviour.** Model prediction ** : This is where we generate the similarity for a pair of sentences. Once the model has been trained and we are satisfied with its overall accuracy we use the saved model to show the similarity between two provided sentences. 2.0 Download pretrained vectorsIn this example we use gloVe for pretrained embedding vectors.pretrained_embedding_path = download_and_extract(base_data_path)Vector file already exists. No changes made.2.1 Initialize Modelclf = GenSenClassifier(config_file = config_filepath, pretrained_embedding_path = pretrained_embedding_path, learning_rate = 0.0001, cache_dir=base_data_path, max_epoch=max_epoch)2.2 Train Model%%time clf.fit(train, dev, test)/data/anaconda/envs/nlp_gpu/lib/python3.6/site-packages/torch/nn/modules/rnn.py:46: UserWarning: dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1, but got dropout=0.8 and num_layers=1 "num_layers={}".format(dropout, num_layers)) ../../examples/sentence_similarity/gensen_train.py:431: UserWarning: torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_. torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) ../../utils_nlp/models/gensen/utils.py:364: UserWarning: volatile was removed and now has no effect. Use `with torch.no_grad():` instead. Variable(torch.LongTensor(sorted_src_lens), volatile=True) /data/anaconda/envs/nlp_gpu/lib/python3.6/site-packages/torch/nn/functional.py:1332: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead. warnings.warn("nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.") /data/anaconda/envs/nlp_gpu/lib/python[...]2.3 PredictIn the predict method we perform Pearson's Correlation computation [\[2\]](References) on the outputs of the model. The predictions of the model can be further improved by hyperparameter tuning which we walk through in the other example [here](gensen_aml_deep_dive.ipynb).sentences = [ 'The sky is blue and beautiful', 'Love this blue and beautiful sky!' ] results = clf.predict(sentences) print("******** Similarity Score for sentences **************") print(results) # Record results with scrapbook for tests sb.glue("results", results.to_dict())******** Similarity Score for sentences ************** 0 1 0 1.000000 0.966793 1 0.966793 1.000000Introduction to scikit-learn (sklearn)This notebook demostrates some of the most useful functions of the beautiful scikit-learn libraryWhat we're going to cover:0. An end-to-end scikit-learn workflow1. Getting the data ready2. Choose the right estimator/algorithm for our problems3. Fit the model/algorithm and use it to make predictions on our data4. Evaluating a model5. Improve a model6. Save and load a trained model7. Putting it all together! 0. An end-to-end scikit-learn workflowimport numpy as np # 1. Get the data ready import pandas as pd heart_disease=pd.read_csv("heart-disease.csv") heart_disease # create X (features matrix) X=heart_disease.drop("target",axis=1) # create y (labels) y=heart_disease["target"] pip install sklearn # choose the right model and hyperparameters from sklearn.ensemble import RandomForestClassifier clf=RandomForestClassifier(n_estimators=100) # we'll keep the default hyperparameters clf.get_params() # 3. Fit the model to the training data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2) import sklearn sklearn.show_versions(); clf.fit(X_train,y_train); X_train X_test # make a prediction y_pred=clf.predict(X_test) y_pred y_test # 4. Evaluate the model om the training data and test data clf.score(X_train, y_train) clf.score(X_test,y_test) from sklearn.metrics import classification_report, confusion_matrix, accuracy_score print(classification_report(y_test,y_pred)) confusion_matrix(y_test,y_pred) accuracy_score(y_test,y_pred) # 5. Improve a model # Try different amount of n_estimators np.random.seed(42) for i in range(10,100,10): print(f"Trying model with {i} estimators..") clf=RandomForestClassifier(n_estimators=i).fit(X_train,y_train) print(f"Model accuracy on test set: {clf.score(X_test,y_test)*100:.2f}%") print("") # 6. Save a model and load it import pickle pickle.dump(clf,open("random_forest_model_1.plk","wb")) loaded_model=pickle.load(open("random_forest_model_1.plk","rb")) loaded_model.score(X_test,y_test) import warnings warnings.filterwarnings("default") # Standard imports import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline1. Getting our data ready to be used with machine learningThree main things we have to do: 1. Split the data into features and labels (usually 'X' & 'Y') 2. Filling (also called imputing) or disregarding missing values 3. Converting non-numerical values to numerical values (also called feature encoding)heart_disease=pd.read_csv("heart-disease.csv") heart_disease X=heart_disease.drop("target",axis=1) X.head() y=heart_disease["target"] y.head() # Split the data into training and test sets from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) X_train X_train.shape, X_test.shape, y_train.shape, y_test.shape X.shape[0]*0.8 len(heart_disease) 242.4+611.1 Make sure it's all numericalcar_sales=pd.read_csv("car-sales-extended.csv") car_sales.head() len(car_sales) car_sales.dtypes # split into X/y X=car_sales.drop("Price",axis=1) y=car_sales["Price"] # split into training and test X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.2) # Build machine learning model from sklearn.ensemble import RandomForestRegressor model=RandomForestRegressor() model.fit(X_train,y_train) model.score(X_test,y_test) # Turn the categories into numbers from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer categorical_features=["Make","Colour","Doors"] one_hot=OneHotEncoder() transformer=ColumnTransformer([("one_hot", one_hot, categorical_features)], remainder="passthrough") transformed_X= transformer.fit_transform(X) transformed_X X.head() pd.DataFrame(transformed_X) dummies=pd.get_dummies(car_sales[["Make","Colour","Doors"]]) dummies # Let's refit the model np.random.seed(42) X_train, X_test, y_train, y_test=train_test_split(transformed_X, y, test_size=0.2) model.fit(X_train,y_train) model.score(X_test,y_test)1.2 What if there were missing values?1. Fill them with some value (also known as imputation).2. Remove the samples with missing data altogether.# import car sales missing data car_sales_missing=pd.read_csv("car-sales-extended-missing-data.csv") car_sales_missing.head() car_sales_missing car_sales_missing["Doors"].value_counts() car_sales_missing.isna().sum() # Create X & y X=car_sales_missing.drop("Price",axis=1) y=car_sales_missing["Price"] # Let's try and convert our data to numbers from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer categorical_features=["Make","Colour","Doors"] one_hot=OneHotEncoder() transformer=ColumnTransformer([("one_hot", one_hot, categorical_features)], remainder="passthrough") transformed_X= transformer.fit_transform(X) transformed_XOption 1: Fill missing data with Pandas# Fill the "Make" column car_sales_missing["Make"].fillna("missing", inplace=True) # Fill the "colour" column car_sales_missing["Colour"].fillna("missing",inplace=True) # Fill the "Odometer (KM)" column car_sales_missing["Odometer (KM)"].fillna(car_sales_missing["Odometer (KM)"].mean(),inplace=True) # Fill the "Door" column car_sales_missing["Doors"].fillna(4,inplace=True) # Check our dataframe again car_sales_missing.isna().sum() # Remove rows with missing Price value car_sales_missing.dropna(inplace=True) car_sales_missing.isna().sum() len(car_sales_missing) X=car_sales_missing.drop("Price",axis=1) y=car_sales_missing["Price"] # Let's try and convert our data to numbers # Turn the categories into numbers from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer categorical_features=["Make","Colour","Doors"] one_hot=OneHotEncoder() transformer=ColumnTransformer([("one_hot", one_hot, categorical_features)], remainder="passthrough") transformed_X= transformer.fit_transform(car_sales_missing) transformed_XOption 2: Fill missing values with Scikit-learnimport pandas as pd car_sales_missing=pd.read_csv("car-sales-extended-missing-data.csv") car_sales_missing.head() car_sales_missing.isna().sum() car_sales_missing.dropna(subset=["Price"],inplace=True) car_sales_missing.isna().sum() # Split into X & Y X=car_sales_missing.drop("Price",axis=1) y=car_sales_missing["Price"] X y # Fill missing values with Scikit-Learn from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer # Fill categorical values with 'missing' and numerical values with mean cat_imputer=SimpleImputer(strategy="constant",fill_value="missing") door_imputer=SimpleImputer(strategy="constant",fill_value=4) num_imputer=SimpleImputer(strategy="mean") #Define columns cat_features=["Make","Colour"] door_features=["Doors"] num_features=["Odometer (KM)"] #Create an imputer (something that fills missing data) imputer=ColumnTransformer([( "cat_imputer",cat_imputer,cat_features), ("door_imputer",door_imputer,door_features), ("num_imputer",num_imputer,num_features)]) #Transform the data filled_X=imputer.fit_transform(X) filled_X car_sales_filled=pd.DataFrame(filled_X, columns=["Make","Colour","Doors","Odometer (KM)"]) car_sales_filled.head() car_sales_filled.isna().sum() #Turn the categories into numbers from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer categorical_features=["Make","Colour","Doors"] one_hot=OneHotEncoder() transformer=ColumnTransformer([("one_hot", one_hot, categorical_features)], remainder="passthrough") transformed_X=transformer.fit_transform(car_sales_filled) transformed_X # Now we've got our data as numbers and filled (no missing values) # Let's fit a model import numpy as np np.random.seed(42) from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test=train_test_split(transformed_X, y, test_size=0.2) model=RandomForestRegressor(n_estimators=100) model.fit(X_train,y_train) model.score(X_test,y_test) car_sales=pd.read_csv("car-sales-extended.csv") len(car_sales_filled),len(car_sales)2. Chosing the right estimator/algorithm for your problemSome things to note:* Sklearn refers to machine learning model, algorithms as estimators* Classification problem - predicting a category (heart disease or not) * Sometimes you'll see 'clf' (short for classifier) used as a classification estimators* Regression problem - predicting a number (selling price of a car)if you're working on a machine learning problem and looking to use Sklearn and not sure what model yo should use, refer to the sklearn machine learning map: https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html 2.1 Picking a machine learning model for a regression problem Lets's use the California Housing dataset.# Get California Housing dataset from sklearn.datasets import fetch_california_housing housing=fetch_california_housing() housing_df=pd.DataFrame(housing["data"],columns=housing["feature_names"]) housing_df.head(100) housing_df["target"]=housing["target"] housing_df.head() housing_df=housing_df.drop("MedHouseVal",axis=1) housing_df.head(10) # import algorithm/estimator from sklearn.linear_model import Ridge # setup random seed np.random.seed(42) # Create the data X=housing_df.drop("target",axis=1) y=housing_df["target"] #median house price in $100,000s # split into train and test sets X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2) # instantiate and fit the model (on the training set) model=Ridge() model.fit(X_train,y_train) model.score(X_test,y_test)What if Ridge didn't work our the score didn't fit our needs?well, we could always try different model...how about we try an ensemble model (an ensemble is combination of smaller model to try and make better predictions than just a single model)sklearn's ensemble models can be found here: https://scikit-learn.org/stable/modules/ensemble.html# import the RandomForestRegressor model class from the ensemble module from sklearn.ensemble import RandomForestRegressor # setup random seed np.random.seed(42) #create the data X=housing_df.drop("target",axis=1) y=housing_df["target"] #Split into train and test sets X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2) # create random forest model model=RandomForestRegressor(n_estimators=100) model.fit(X_train,y_train) # check the score of the model (on the test set) model.score(X_test, y_test)2.2 Picking a machine learning model for a classification problemheart_disease=pd.read_csv("heart-disease.csv") heart_disease.head() len(heart_disease)Consulting the map and it says to try 'LinearSVC'.# Import the linearSVC estimator class from sklearn.svm import LinearSVC # Setup random seed np.random.seed(42) # make the data X=heart_disease.drop("target",axis=1) y=heart_disease["target"] # split the data X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2) # instantiate LineaSVC clf=LinearSVC() clf.fit(X_train, y_train) # evaluate the linearSVC clf.score(X_test, y_test) heart_disease["target"].value_counts() # Import the RandomForestClassifier estimator class from sklearn.ensemble import RandomForestClassifier # Setup random seed np.random.seed(42) # make the data X=heart_disease.drop("target",axis=1) y=heart_disease["target"] # split the data X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2) # instantiate RandomForestClassifier clf=RandomForestClassifier(n_estimators=100) clf.fit(X_train, y_train) # evaluate the RandomForestClassifier clf.score(X_test, y_test)Tidbit: 1. if you have structured data, use ensemble methods 2. if you have unstructured data, use deep learning or transfer learningheart_disease.head()3. Fit the model/algorithm on our data and use it to make predictions 3.1 Fitting the model to the dataDifferent names for: * X= features, features variables, data* y=labels, targets, target variables# Import the RandomForestClassifier estimator class from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split # Setup random seed np.random.seed(42) # make the data X=heart_disease.drop("target",axis=1) y=heart_disease["target"] # split the data X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2) # instantiate RandomForestClassifier clf=RandomForestClassifier(n_estimators=100) #Fit the model to the data (training the machine learning model) clf.fit(X_train, y_train) # evaluate the RandomForestClassifier (use the patterns the model has learned) clf.score(X_test, y_test) X.head() y.tail()3.2 Make predictions using a machine learning model2 ways to make predictions1. predict()2. predict_proba()# Use a trained model to make predictions clf.predict(np.array([1,7,8,3,4])) #this doesn't work.. X_test.head() X_test.shape clf.predict(X_test) y_test np.array(y_test) # Compare predictions to truth labels to evaluate the model y_preds=clf.predict(X_test) np.mean(y_preds==y_test) clf.score(X_test,y_test) from sklearn.metrics import accuracy_score accuracy_score(y_test,y_preds) # predict_proba() returns probabilities of a classufication label # make predictions with predict_proba() clf.predict_proba(X_test[:5]) # Let's predict() on the same data # index del valor mayor clf.predict(X_test[:5]) X_test[:5] heart_disease["target"].value_counts()predict() can also be used for regression models.housing_df.head() from sklearn.ensemble import RandomForestRegressor np.random.seed(42) # Create the data X=housing_df.drop("target",axis=1) y=housing_df["target"] # split into training and test sets X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2) # create model instance model= RandomForestRegressor(n_estimators=100) # fit the model to the data model.fit(X_train,y_train) # make predictions y_preds=model.predict(X_test) y_preds[:10] np.array(y_test[:10]) # Compare the predictions to the truth from sklearn.metrics import mean_absolute_error mean_absolute_error(y_test,y_preds) housing_df["target"].head()4. Evaluating a machine learning modelThree ways to evaluate Scikit-learn models/estimators: 1. Estimators's built-in 'score()' method 2. The 'scoring' parameter 3. Problem specific metric functions 4.1 evaluating a model with 'score' methodfrom sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split import pandas as pd import numpy as np heart_disease=pd.read_csv("heart-disease.csv") np.random.seed(42) heart_disease.head() # Create X & y X=heart_disease.drop("target",axis=1) y=heart_disease["target"] # Create train/test X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2) # Instantied Random Forest Classifier clf=RandomForestClassifier(n_estimators=100) # fit the model to the data (training the machine learning model) clf.fit(X_train,y_train) # The highest value from .score() method is 1.0, the lowest is 0.0 clf.score(X_train,y_train) clf.score(X_test,y_test)Let's use the .score() method on our regresion ploblem...# Get California Housing dataset from sklearn.datasets import fetch_california_housing housing=fetch_california_housing() housing_df=pd.DataFrame(housing["data"],columns=housing["feature_names"]) housing_df.head() np.random.seed(42) housing_df["target"]=housing["target"] X=housing_df.drop("target",axis=1) y=housing_df["target"] X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) model=RandomForestRegressor(n_estimators=100) model.fit(X_train,y_train) # the default score() evaluation metric is r_squared for regression algorithms # the highest =1.0 the lowest 0.0 model.score(X_test,y_test) model.score(X_test,y_test) housing_df.head() y_test.head() y_test.mean()4.2 Evaluating a model using a 'scoring' parameterfrom sklearn.model_selection import cross_val_score np.random.seed(42) X=heart_disease.drop("target",axis=1) y=heart_disease["target"] # Create train/test X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2) # Instantied Random Forest Classifier clf=RandomForestClassifier(n_estimators=100) # fit the model to the data (training the machine learning model) clf.fit(X_train,y_train); clf.score(X_test,y_test) cross_val_score(clf,X,y,cv=5) cross_val_score(clf,X,y,cv=10) np.random.seed(42) # single training and test split score clf_single_score=clf.score(X_test,y_test) # take the mean of 5-fold cross validation score clf_cross_val_score=np.mean(cross_val_score(clf,X,y,cv=5)) #compare two clf_single_score,clf_cross_val_score # default scoring parameter of classifier = mean accuracy clf.score() # scoring parameter set to None for default cross_val_score(clf,X,y,cv=5,scoring=None)Classifications model evaluation metrics 1. Accuracy 2. Area under ROC curve 3. Confusion matrix 4. Classification reportfrom sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestClassifier np.random.seed(42) X=heart_disease.drop("target",axis=1) y=heart_disease["target"] clf=RandomForestClassifier(n_estimators=100) cross_val_score=cross_val_score(clf,X,y,cv=5) np.mean(cross_val_score) print(f"heart disease Classifier Cross-Validated Accuracy: {np.mean(cross_val_score)*100:.2f}%")heart disease Classifier Cross-Validated Accuracy: 82.48%**Area under the receiver operating characteristic curve (AUC/ROC)*** Area under curve (AUC)* ROC curve ROC curves are a comparison of a model's true positive rate (tpr) versus a models false positive rate (fpr).* True positive = model predicts 1 when truth is 1* False positive = model predicts 1 when truth is 0* True negative = model predicts 0 when truth is 0* False negative = model predicts 0 when truth is 1# create X_test... X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) from sklearn.metrics import roc_curve #fit the classifier clf.fit(X_train,y_train) # make predictions with probabilities y_probs=clf.predict_proba(X_test) y_probs[:10],len(y_probs) y_probs_positive=y_probs[:,1] y_probs_positive[:10] # calcular fpr, ttpr and thresholds fpr, tpr, thresholds=roc_curve(y_test,y_probs_positive) # check the false positive fpr # create a function for plotting ROC curve import matplotlib.pyplot as plt def plot_roc_curve(fpr,tpr): """ Plot a ROC curver given the false positive rate (fpr) and true positive rate (tpr) of a model. """ # plot roc curve plt.plot(fpr,tpr,color="orange", label="ROC") # plot line with not predictive power (baseline) plt.plot([0,1],[0,1],color="darkblue",linestyle="--",label="Guessing") # customize the plot plt.xlabel("False positive rate (fpr)") plt.ylabel("True positive rate (tpr)") plt.title("Receiver operating characteristic (ROC) curve") plt.legend() plt.show() plot_roc_curve(fpr,tpr) from sklearn.metrics import roc_auc_score roc_auc_score(y_test,y_probs_positive) # plot perfect roc curve and AUC score fpr,tpr,thresholds=roc_curve(y_test,y_test) plot_roc_curve(fpr,tpr) # perfect AUC score roc_auc_score(y_test,y_test)**Confusing matrix**A confusing matrix is a quick way to compare the labels a model predicts and the actuals labels it was a supposed to predict. In essence, giving you and idea of where the model is getting confused.from sklearn.metrics import confusion_matrix y_preds=clf.predict(X_test) confusion_matrix(y_test,y_preds) # visualize confusing matrix with pd.crosstab() pd.crosstab(y_test,y_preds,rownames=["Actual labels"],colnames=["Predicted labels"]) 24+5+3+29 len(X_test) # make confusion matrix more visual with Seaborn's heatmap() import seaborn as sns # set the font scale sns.set(font_scale=1.5) # create a confusion matrix conf_mat=confusion_matrix(y_test,y_preds) # plot it using seaborn sns.heatmap(conf_mat)Creating a confusion matrix using Scikit-Learnimport sklearn sklearn.__version__ clf from sklearn.metrics import ConfusionMatrixDisplay ConfusionMatrixDisplay.from_estimator(estimator=clf,X=X,y=y); ConfusionMatrixDisplay.from_predictions(y_true=y_test,y_pred=y_preds);**Classification report**from sklearn.metrics import classification_report print(classification_report(y_test,y_preds)) # where precision and recall become valuable disease_true=np.zeros(10000) disease_true[0]=1 # only one positive case disease_preds=np.zeros(10000) #models predict every case as 0 pd.DataFrame(classification_report(disease_true,disease_preds,output_dict=True))c:\users\ulisesj.000\appdata\local\programs\python\python38-32\lib\site-packages\sklearn\metrics\_classification.py:1318: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) c:\users\ulisesj.000\appdata\local\programs\python\python38-32\lib\site-packages\sklearn\metrics\_classification.py:1318: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) c:\users\ulisesj.000\appdata\local\programs\python\python38-32\lib\site-packages\sklearn\metrics\_classification.py:1318: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavi[...]4.2.2 Regression model evaluation metricsThe one we're goin to cover are:1. R^2 (pronounced r-squared) or coefficient of determination2. Mean absolute error (MAE)3. Mean squared error (MSE)from sklearn.ensemble import RandomForestRegressor np.random.seed(42) X=housing_df.drop("target",axis=1) y=housing_df["target"] X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) model=RandomForestRegressor(n_estimators=100) model.fit(X_train,y_train) model.score(X_test,y_test) housing_df.head() y_test[:5] y_test.mean() from sklearn.metrics import r2_score # fill an array with y_test mean y_test_mean=np.full(len(y_test),y_test.mean()) y_test_mean[:10] r2_score(y_true=y_test, y_pred=y_test_mean) r2_score(y_true=y_test, y_pred=y_test)**Mean absolute error (MAE)**MAE is the average of the absolute differences between predictions and actual values.It gives you idea of how wrong your models predictions are.# MAE from sklearn.metrics import mean_absolute_error y_preds=model.predict(X_test) mae=mean_absolute_error(y_test,y_preds) mae y_preds y_test[:20] df=pd.DataFrame(data={"actual values":y_test, "predicted values":y_preds}) df["differences"]=df["predicted values"] -df["actual values"] df.head() df["differences"].mean() #MAE using formulas and differences np.abs(df.differences).mean() # Get California Housing dataset from sklearn.datasets import fetch_california_housing housing=fetch_california_housing() import pandas as pd housing_df=pd.DataFrame(housing["data"],columns=housing["feature_names"]) from sklearn.ensemble import RandomForestRegressor import numpy as np from sklearn.model_selection import train_test_split np.random.seed(42) housing_df["target"]=housing["target"] X=housing_df.drop("target",axis=1) y=housing_df["target"] X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) model=RandomForestRegressor(n_estimators=100) model.fit(X_train,y_train)**Mean squared error (MSE)**MSE is the mean of the squared of the errors between actual and predicted values.# Mean squared error from sklearn.metrics import mean_squared_error y_preds=model.predict(X_test) mse=mean_squared_error(y_test,y_preds) mse df["squared differences"]=np.square(df["differences"]) df.head() # calculate msi by hand squared=np.square(df["differences"]) squared.mean() df.iloc[0]["squared differences"].mean()4.2.3 Finally using the scoring parameterfrom sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestClassifier heart_disease=pd.read_csv("heart-disease.csv") np.random.seed(42) X=heart_disease.drop("target",axis=1) y=heart_disease["target"] clf=RandomForestClassifier(n_estimators=100) np.random.seed(42) #cross validation accuracy cv_acc=cross_val_score(clf,X,y,cv=5,scoring=None) #if scoring =None, estimator's default scoring evaluating metric is used(accuracy for classification models) cv_acc # cross validation accuracy print(f"The cross validated accuracy is: {np.mean(cv_acc)*100:.2f}%") np.random.seed(42) cv_acc=cross_val_score(clf,X,y,cv=5, scoring="accuracy") cv_accPrecisionnp.random.seed(42) cv_precision=cross_val_score(clf,X,y,cv=5, scoring="precision") cv_precision print(f"The cross-validated precision is: {np.mean(cv_precision)}")The cross-validated precision is: 0.8329547346025924Recallnp.random.seed(42) cv_recall=cross_val_score(clf,X,y,cv=5,scoring="recall") cv_recall print(f"The cross-validated recall is: {np.mean(cv_recall)}")The cross-validated recall is: 0.8545454545454545Let's see the scoring parameter being using for a regression problem...from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor np.random.seed(42) X=housing_df.drop("target",axis=1) y=housing_df["target"] model=RandomForestRegressor(n_estimators=100) np.random.seed(42) cv_r2=cross_val_score(model,X,y,cv=3,scoring=None) np.mean(cv_r2) cv_r2 # Mean squared error cv_mse=cross_val_score(model,X,y,cv=3,scoring="neg_mean_squared_error") np.mean(cv_mse) cv_mse # Mean absolute error cv_mae=cross_val_score(model,X,y,cv=3,scoring="neg_mean_absolute_error") np.mean(cv_mae) cv_mae4.3 Using different evaluation metrics as Scikit-learn functionsThe 3rd way to evaluate scikit learn machine learning models/estimators is to using 'sklearn.metrics' modulefrom sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import numpy as np np.random.seed(42) #create X and y heart_disease=pd.read_csv("heart-disease.csv") X=heart_disease.drop("target",axis=1) y=heart_disease["target"] #split the data X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) #create model clf=RandomForestClassifier() #fit the model clf.fit(X_train,y_train) #make predictions y_preds=clf.predict(X_test) #evaluate model using evaluation functions print("Classifier metrics on the test set") print(f"Accuracy: {accuracy_score(y_test,y_preds)*100:.2f}%") print(f"Precison: {precision_score(y_test,y_preds)*100:.2f}%") print(f"Recall: {recall_score(y_test,y_preds)*100:.2f}%") print(f"F1: {f1_score(y_test,y_preds)*100:.2f}%") from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split np.random.seed(42) #create X and y X=heart_disease.drop("target",axis=1) y=heart_disease["target"] #split the data X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) #create model model=RandomForestRegressor() #fit the model model.fit(X_train,y_train) #make predictions y_preds=model.predict(X_test) #evaluate model using evaluation functions print("Regression metrics on the test set") print(f"R2 score: {r2_score(y_test,y_preds)}") print(f"MAE: {mean_absolute_error(y_test,y_preds)}") print(f"MSE: {mean_squared_error(y_test,y_preds)}")Regression metrics on the test set R2 score: 0.5106393318965518 MAE: 0.23754098360655734 MSE: 0.122044262295081975. Improving a modelFirst predictions=baseline predictions.First model = baseline model.From a data perspective* Could we collect more data? (generally, the more data, the better)* Could we improve our data? From a model perspective:* Is there a better model we could use?* Could we improve the current model?Hyperparameters vs. parameters* Parameters = model find these patterns on data * Hyperparameters = setting from on a model you can adjust to (potencially) improve its ability to find pattern Three ways to adjust hyperparameters:1. By hand2. Randomly with RandomSearchCV3. Exhaustively with GridSearchCVfrom sklearn.ensemble import RandomForestClassifier clf=RandomForestClassifier() clf.get_params()5.1 Turning hyperparameters by handLet's make 3 sets, training, validation and test.clf.get_params()We're going to try and adjust.* `max_depth`* `max_features`* `min_samples_leaf`* `min_samples_split`* `n_estimators`from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score def evaluate_preds(y_true,y_preds): """ Performs evaluation comparison on y_true labels vs. y_pred labels """ accuracy=accuracy_score(y_true,y_preds) precision=precision_score(y_true,y_preds) recall=recall_score(y_true,y_preds) f1=f1_score(y_true,y_preds) metric_dict={"accuracy":round(accuracy,2), "precision":round(precision,2), "recall":round(recall,2), "f1":round(f1,2)} print(f"Acc:{accuracy*100:.2f}%") print(f"Precision: {precision*100:.2f}") print(f"Recall: {recall:.2f}") print(f"F1 score: {f1:.2f}") return metric_dict from sklearn.model_selection import train_test_split import pandas as pd heart_disease=pd.read_csv("heart-disease.csv") heart_disease.head() from sklearn.ensemble import RandomForestClassifier import numpy as np np.random.seed(42) # shuffle the data heart_disease_shuffled=heart_disease.sample(frac=1) # split into X & y X=heart_disease_shuffled.drop("target",axis=1) y=heart_disease_shuffled["target"] # split the data into train, validation train_split=round(0.7*len(heart_disease_shuffled)) # 70% of data valid_split=round(train_split+0.15*len(heart_disease_shuffled)) # 15% of data X_train,y_train=X[:train_split],y[:train_split] X_valid,y_valid=X[train_split:valid_split],y[train_split:valid_split] X_test,y_test=X[valid_split:],y[valid_split:] len(X_train),len(X_valid),len(X_test) clf=RandomForestClassifier() clf.fit(X_train,y_train) # Make baseline predictions y_preds= clf.predict(X_valid) # Evaluate the classifier on validation set baseline_metrics=evaluate_preds(y_valid,y_preds) baseline_metrics np.random.seed(42) clf_2=RandomForestClassifier(n_estimators=100) clf_2.fit(X_train,y_train) # make predicitions y_preds2=clf_2.predict(X_valid) # evaluate the second classifier clf_2_metrics=evaluate_preds(y_valid,y_preds2)Acc:82.22% Precision: 84.00 Recall: 0.84 F1 score: 0.845.2 Hyperparameters tuning with RandomizedSearchCVfrom sklearn.model_selection import RandomizedSearchCV import numpy as np grid={"n_estimators":[10,100,200,500,1000,1200], "max_depth":[None,5,10,20,30], "max_features":["auto","sqrt"], "min_samples_split":[2,4,6], "min_samples_leaf":[1,2,6]} np.random.seed(42) # split into X & y X=heart_disease_shuffled.drop("target",axis=1) y=heart_disease_shuffled["target"] # split into train and test sets X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) # Instantiate RandomForestClassifier clf=RandomForestClassifier(n_jobs=1) # setup RandomizedSearchCV rs_clf=RandomizedSearchCV(estimator=clf, param_distributions=grid, n_iter=10, cv=5, verbose=2) # fit the RandomizedSearchCV version of clf rs_clf.fit(X_train,y_train) rs_clf.best_params_ # make predictions with the best hyperparameters rs_y_preds=rs_clf.predict(X_test) # EVALUATE The predictions rs_metrics=evaluate_preds(y_test,rs_y_preds)Acc:81.97% Precision: 77.42 Recall: 0.86 F1 score: 0.815.3 Hyperparameters tuning with GridSearchCVgrid grid_2={"n_estimators":[100,200,500], "max_depth":[None], "max_features":["auto","sqrt"], "min_samples_split":[6], "min_samples_leaf":[1,2]} from sklearn.model_selection import GridSearchCV, train_test_split np.random.seed(42) # split into X & y X=heart_disease_shuffled.drop("target",axis=1) y=heart_disease_shuffled["target"] # split into train and test sets X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) # Instantiate RandomForestClassifier clf=RandomForestClassifier(n_jobs=1) # setup GridSearchCV gs_clf=GridSearchCV(estimator=clf, param_grid=grid_2, cv=5, verbose=2) # fit the GridSearchCV version of clf gs_clf.fit(X_train,y_train) gs_clf.best_params_ gs_y_preds=gs_clf.predict(X_test) # evaluate the predictions gs_metrics=evaluate_preds(y_test,gs_y_preds)Acc:78.69% Precision: 74.19 Recall: 0.82 F1 score: 0.78Let's compare our different models metricimport matplotlib.pyplot as plt compare_metrics=pd.DataFrame({"baseline":baseline_metrics, "clf_2":clf_2_metrics, "random search":rs_metrics, "grid search":gs_metrics}) compare_metrics.plot.bar(figsize=(10,8));6 Saving and loading machine learning models Two ways to save and load machine learning models:1. With python's `pickle` module2. With the `joblib` module**Pickle**import pickle # save an existing model to file pickle.dump(gs_clf,open("gs_random_random_forest_model_1.plk","wb")) # laod a save model loaded_pickle_model=pickle.load(open("gs_random_random_forest_model_1.plk","rb")) # make some predictions pickle_y_preds=loaded_pickle_model.predict(X_test) evaluate_preds(y_test,pickle_y_preds)Acc:78.69% Precision: 74.19 Recall: 0.82 F1 score: 0.78**Joblib**from joblib import dump,load # save model to file dump(gs_clf,filename="gs_random_forest_model_1.joblib") # import a save joblib model loaded_job_model=load(filename="gs_random_forest_model_1.joblib") # make and evaluate joblib predictions joblib_y_preds=loaded_job_model.predict(X_test) evaluate_preds(y_test,joblib_y_preds)Acc:78.69% Precision: 74.19 Recall: 0.82 F1 score: 0.787. Putting all together!data=pd.read_csv("car-sales-extended-missing-data.csv") data.head() data.dtypes data.isna().sum()Steps we want to do (all in one cell):1. Fill missing data2. Convert data to numbers3. Build a model on data# getting data ready import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder # modelling from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split,GridSearchCV # setup random seed import numpy as np np.random.seed(42) # import data and drop rows with the missing labels data=pd.read_csv("car-sales-extended-missing-data.csv") data.dropna(subset=["Price"],inplace=True) # define different features on transformer pipeline categorical_features=["Make","Colour"] categorical_transformer=Pipeline(steps=[ ("imputer",SimpleImputer(strategy="constant",fill_value="missing")), ("onehot",OneHotEncoder(handle_unknown="ignore"))]) door_feature=["Doors"] door_transformer=Pipeline(steps=[ ("imputer",SimpleImputer(strategy="constant",fill_value=4)) ]) numerical_features=["Odometer (KM)"] numerical_transformer=Pipeline(steps=[ ("imputer",SimpleImputer(strategy="mean")) ]) # setup the preprocessing steps(fill missing values, then convert to numbers) preprocessor=ColumnTransformer( transformers=[ ("cat",categorical_transformer,categorical_features), ("door",door_transformer,door_feature), ("num",numerical_transformer,numerical_features) ]) # create a preprocessing and modelling pipeline model=Pipeline(steps=[("preprocessor",preprocessor), ("model",RandomForestRegressor())]) # split data X = data.drop("Price",axis=1) y = data["Price"] X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) # fit and score model model.fit(X_train,y_train) model.score(X_test,y_test) data.isna().sum()it's also possible to use `GridSearchCV` or `RandomizedSearchCV` with our `Pipeline`# use GridSearchCV with our regression Pipeline from sklearn.model_selection import GridSearchCV pipe_grid={ "preprocessor__num__imputer__strategy":["mean","median"], "model__n_estimators":[100,1000], "model__max_depth":[None,5], "model__max_features":["auto"], "model__min_samples_split":[2,4] } gs_model=GridSearchCV(model,pipe_grid,cv=5,verbose=2) gs_model.fit(X_train,y_train) gs_model.score(X_test,y_test) import pickle # save an existing model to file pickle.dump(gs_model,open("final_example.plk","wb"))OpenFindIt - Find FilesSearches domain for files.Enter `url` to crawl at cell with `%system scrapy ...`Then choose ```Kernel > Restart Kernel and Run All Cells...``` from the menu. Install prerequisites ! Ensure scrapy is installed and on your local computer !pip install scrapy import pandas as pd import sys import scrapy !pip install -r ./requirements.txtRequirement already satisfied: requests==2.20.0 in c:\users\joel\anaconda3\lib\site-packages (from -r ./requirements.txt (line 1)) (2.20.0) Requirement already satisfied: scrapy==1.6.0 in c:\users\joel\anaconda3\lib\site-packages (from -r ./requirements.txt (line 2)) (1.6.0) Requirement already satisfied: scrapy-random-useragent==0.2 in c:\users\joel\anaconda3\lib\site-packages (from -r ./requirements.txt (line 3)) (0.2) Requirement already satisfied: youtube-dl in c:\users\joel\anaconda3\lib\site-packages (from -r ./requirements.txt (line 4)) (2020.5.8) Requirement already satisfied: certifi>=2017.4.17 in c:\users\joel\anaconda3\lib\site-packages (from requests==2.20.0->-r ./requirements.txt (line 1)) (2019.11.28) Requirement already satisfied: chardet<3.1.0,>=3.0.2 in c:\users\joel\anaconda3\lib\site-packages (from requests==2.20.0->-r ./requirements.txt (line 1)) (3.0.4) Requirement already satisfied: urllib3<1.25,>=1.21.1 in c:\users\joel\anaconda3\lib\site-packages (from requests=[...]Force the scraper to provide feedbackfrom IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all"Create tmp folder to hold resultsmkdir resultsA subdirectory or file results already exists.Run OpenFindIt Scrapy script for filesWe have no way to show progress of the run. It might take a minute, it might take hours depending on how many pages you crawl. Go get a coffee. You will know it is running if you see the time glass icon in this browser tab.Change the sites to crawl by putting the url(s) you want to crawl in the openfindit/config/sites.txt file.%system scrapy crawl findfiles -a urls="http://joelcrawfordsmith.com/openassessit/demo/test-index.html" -s DEPTH_LIMIT=3 -s CLOSESPIDER_PAGECOUNT=500 -t csv -o - > "results/openfindit-findfiles-result.csv" df = pd.read_csv("results/openfindit-findfiles-result.csv", dtype=str) dfThis is gonna be fun!!! BD first we import the stuff we need!%matplotlib inline import numpy as np import matplotlib.pyplot as pltnow we define the function we want to integrate!def fun(x): return np.exp(-2*x)*np.cos(10*x)now we define its integral so we know what the right answer is! XDdef fun_integral(x): return (1/52.)*np.exp(-2*x)*(5*np.sin(10*x)- np.cos(10*x))defining the single interval value of the trapezoid method! :-)def trapezoid_interval(f, x, h): return 0.5*h*(f(x+h)+ f(x))executing the trapezoid method: adding the single intervals valuesdef trapezoid_method(f, a, b, N): # f = function to integrate # a = lower limit of integration # b = uper limit of integration # N = number of intervals to use #defining the number line interval to integrate over! :D x = np.linspace(a, b, N) h = x[1] - x[0] #defining which will hold the integral value as we sum Fint = 0.0 #performing the intergal !!! :D for i in range(0, len(x)-1, 1): Fint += trapezoid_interval(f, x[i], h) #return the answer! :D return FintLets do the integral and compare it to the correct answer! :Dprint("Trapezoid method result") print(trapezoid_method(fun, 0, np.pi, 3500)) print("Analytical solution") Answer = fun_integral(np.pi)- fun_integral(0) print(Answer)as we can see, about 3500 intervals is enough to reach the tolerance of 10^-6 Defining the single interval element of simpson's method! :-)def simpsons_interval(f, x, h): return h*( f(x) + 4*f(x+h) + f(x+2*h))/3.Executing simpsons method by summing the elements ! :Ddef simpsons_method(f, a, b, N): #f = func to integrate #a = lower limit of integration #b = upper limit of integration #N = number of intervals to use #the number of elements will be N -1 # so if N is odd we dont need to adjust anything #define the number line interval to integrat over x = np.linspace(a, b, N) h = x[1] - x[0] #define the variable to hold the integral value as we sum intervals! :D Fint = 0.0 #executing simpsons method for i in range(0, len(x)-2, 2): Fint += simpsons_interval(f, x[i], h) #applying simpsons method to the last element sepratelt in case the #total number of intervals is even if((N%2)==0): Fint += simpsons_interval(f, x[-2], 0.5*h) return FintLets print our answer from simpsons method and compare it to the analytical solution! :-)print("Simpson's method result") print(simpsons_method(fun, 0, np.pi, 150)) print("Analytical solution") Answer = fun_integral(np.pi)- fun_integral(0) print(Answer)as we can see within 150 intervals we got a result within the tolerance ! Defining the romberg integration interval value! :Ddef romberg_interval(f, a, b, i): #we need the diff b-a h = b-a #increment between new function evaluations! dh = h/2.**(i) # cofactor K = h/2.**(i+1) #function evaluations! M = 0.0 for j in range(2**i): M += f(a + 0.5*dh + j*dh) return K*Mexecuting the romberg method ! :)def romberg_integration(f, a, b, tol): #iteraton variable! :D i = 0 #max number of iterations! imax = 1000 #defining an error estimate - set to a large value delta = 100.0*np.fabs(tol) #setting an array of integral answers I = np.zeros(imax, dtype = float) #0th romberg integration I[0] = 0.5*(b-a)**(f(a) + f(b)) #iterate by 1 i += 1 while(delta>tol): I[i] = 0.5*I[i-1] + romberg_interval(f, a, b, i) #compute the new fractional error estimate delta = np.fabs((I[i]-I[i-1])/I[i]) print(i, I[i], I[i-1], delta) if(delta>tol): i+=1 #if we wave reached the ma xnumber of iterations if(i>imax): print("Max iterations reached. :(") raise StopIteration('Stopping iterations after', i) return I[i]Lets compute the romberg integral and compare it to the analytical solution!print("Romberg method result") tolerance = 1.0e-6 RI = romberg_integration(fun, 0, np.pi, tolerance) print(RI) print("Analytical solution") Answer = fun_integral(np.pi)- fun_integral(0) print(Answer)Data SetupIn this notebook, we demonstrate how to:- setup time series data for this tutorial- visualize the dataThe data in this example is taken from the GEFCom2014 forecasting competition1. It consists of 3 years of hourly electricity load and temperature values between 2012 and 2014. 1, , , , and , "Probabilistic energy forecasting: Global Energy Forecasting Competition 2014 and beyond", International Journal of Forecasting, vol.32, no.3, pp 896-913, July-September, 2016.import os import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") %matplotlib inlineDownload the data. (Note: The following code is designed to run on an Azure Notebook (or Unix-like environment). If you are using this notebook in a different environment, you will need to mofidy the code.)data_dir = './data' # If data not downloaded, do so if not os.path.exists(os.path.join(data_dir, 'GEFCom2014.zip')): os.makedirs(data_dir, exist_ok=True) # Download and move the zip file !wget https://www.dropbox.com/s/pqenrr2mcvl0hk9/GEFCom2014.zip !mv GEFCom2014.zip ./data else: print("Nothing to do")Nothing to doExtract zip file.# If not done already, extract zipped data and save as compressed csv from common.extract_data import extract_data if not os.path.exists(os.path.join(data_dir, 'energy.csv.gz')): extract_data(data_dir) else: print("Nothing to do")Nothing to doLoad the data from csv into a Pandas dataframeenergy = pd.read_csv(os.path.join(data_dir, 'energy.csv.gz'), parse_dates=['timestamp']) # load_data(data_dir)[['load']] energy.head()Reindex the dataframe such that the dataframe has a record for every time pointbetween the minimum and maximum timestamp in the time series. This helps to identify missing time periods in the data (there are none in this dataset).energy.index = energy['timestamp'] energy = energy.reindex(pd.date_range(min(energy['timestamp']), max(energy['timestamp']), freq='H')) energy = energy.drop('timestamp', axis=1) # Check for missing energy.isna().any()Finally, save as parquet file.energy.to_parquet(os.path.join(data_dir, 'energy.parquet'))Plot all available load data (January 2012 to Dec 2014)energy.plot(y='load', subplots=True, figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show()Plot first week of July 2014energy['2014-07-01':'2014-07-07'].plot(y='load', subplots=True, figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show()PyGSLIB========Draw---------------The GSLIb equivalent parameter file is``` Parameters for DRAW *******************START OF PARAMETERS:data/cluster.dat \file with data3 \ number of variables1 2 3 \ columns for variables0 \ column for probabilities (0=equal)-1.0e21 1.0e21 \ trimming limits69069 100 \random number seed, number to drawdraw.out \file for realizations```#general imports import matplotlib.pyplot as plt import pygslib import numpy as np import pandas as pd #make the plots inline %matplotlib inlineGetting the data ready for work---------If the data is in GSLIB format you can use the function `pygslib.gslib.read_gslib_file(filename)` to import the data into a Pandas DataFrame.#get the data in gslib format into a pandas Dataframe cluster = pygslib.gslib.read_gslib_file('../datasets/cluster.dat') print ('\n\t\tCluster Data \n',cluster.tail())Cluster Data Xlocation Ylocation Primary Secondary Declustering Weight 135 31.5 41.5 22.75 8.21 0.427 136 34.5 32.5 9.42 6.76 0.413 137 35.5 31.5 8.48 12.78 0.419 138 35.5 33.5 2.82 9.21 0.271 139 36.5 32.5 5.26 12.40 0.252Testing Drawprint (pygslib.gslib.__draw.draw.__doc__) cluster['NO-Weight']=1. parameters_draw = { 'vr' : cluster[['Xlocation','Ylocation','Primary']], # data 'wt' : cluster['NO-Weight'], # weight/prob (use wt[:]=1 for equal probability) 'rseed' : 69069, # random number seed (conditioning cat.) 'ndraw' : 100} # number to draw vo,sumwts,error = pygslib.gslib.__draw.draw(**parameters_draw) print ('error ? ', error != 0, error) print ('is 1./sumwts == nd?', 1./sumwts, len(cluster)) #making the output (which is numpy array) a pandas dataframe for nice printing dfvo=pd.DataFrame(vo,columns= ['Xlocation','Ylocation','Primary'])Comparing results with gslibprint (dfvo.head(6)) print ('******') print (dfvo.tail(6))Xlocation Ylocation Primary 0 39.5 18.5 0.06 1 39.5 18.5 0.06 2 39.5 18.5 0.06 3 39.5 18.5 0.06 4 39.5 18.5 0.06 5 39.5 18.5 0.06 ****** Xlocation Ylocation Primary 94 39.5 18.5 0.06 95 39.5 18.5 0.06 96 39.5 18.5 0.06 97 39.5 18.5 0.06 98 39.5 18.5 0.06 99 39.5 18.5 0.06Author: Task - 2: Prediction using Unsupervised ML Problem Statement : From the given ‘Iris’ dataset, predict the optimum number of clusters and represent it visually. The Sparks Foundation (Graduate Rotational Internship Program (GRIP) June 2021 Batch) Data Science & Business Analytics Tasks Importing required librariesimport pandas as pd from sklearn import datasets import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inlineLoading the Datasetsdf = pd.read_csv('Iris.csv')Exploratory Data Analysisdf.head() df.describe() df.shape df.info() df.isnull().any()Data Visualizationplt.figure(figsize=(5,7)) plt.hist(df['SepalLengthCm'], color = "red") plt.title('Distribution of Sepal length') plt.xlabel('Sepal length') plt.show() plt.figure(figsize=(5,7)) plt.hist(df['PetalLengthCm']) plt.title('Distribution of Petal length') plt.xlabel('Petal length') plt.show() plt.figure(figsize=(5,7)) plt.hist(df['SepalWidthCm'],color = "red") plt.title('Distribution of Sepal length') plt.xlabel('Sepal length') plt.show() plt.figure(figsize=(5,7)) plt.hist(df['PetalWidthCm']) plt.title('Distribution of Petal length') plt.xlabel('Petal length') plt.show() df['Species'].unique().tolist() sns.pairplot(df.drop("Id", axis=1), hue="Species") plt.show() no_id = df.copy() no_id.drop("Id", axis = 1, inplace = True) sns.heatmap( no_id.corr() , annot = True, linewidth = 4, linecolor= "blue", cbar = True, cmap = 'Greys' ) #shows the correlation relationship between variables plt.show() df.drop("Id", axis=1).boxplot(by = "Species", figsize=(12, 6)) plt.show() #PetalLength vs SepalLength sns.scatterplot(x = 'PetalLengthCm', y = 'SepalLengthCm',edgecolor='k', data = df ,hue ='Species') plt.title('Petal Length vs Sepal Length') plt.show() #PetalWidth vs SepalWidth sns.scatterplot(x = 'PetalWidthCm', y = 'SepalWidthCm',edgecolor='k', data = df ,hue ='Species') plt.title('Petal Width vs Sepal Width') plt.show()Finding the optimum numbers of clusters for k-means classification.x = df.iloc[:, [1,2,3,4]].values #using elbow method to find the optimal number of clusters from sklearn.cluster import KMeans wcss = [] # Within cluster sum of squares for i in range(1, 11): kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) kmeans.fit(x) wcss.append(kmeans.inertia_)D:\anaconda\lib\site-packages\sklearn\cluster\_kmeans.py:881: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1. warnings.warn(**WCSS** is defined as the sum of the squared distance between each member of the cluster and its centroid.#Plotting the results onto a line graph plt.plot(range(1, 11), wcss, "go--", c ='darkmagenta') plt.title('The Elbow method') plt.xlabel('Number of clusters') plt.ylabel('WCSS') plt.show()**We can clearly state from the above graph that the optimum clusters is where the elbow occurs. This is when the within cluster sum of squares (WCSS) doesn't decrease significantly with every iteration.** The number of clusters = 3#Applying kmeans to the dataset / Creating the kmeans classifier kmeans = KMeans(n_clusters = 3, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) y_kmeans = kmeans.fit_predict(x) #Visualising the clusters plt.scatter(x[y_kmeans == 0,0], x[y_kmeans == 0,1], s = 95, edgecolor='k', cmap = 'summer', label = 'Iris-setosa') plt.scatter(x[y_kmeans == 1,0], x[y_kmeans == 1,1], s = 95, edgecolor='k', cmap = 'summer', label = 'Iris-versicolour') plt.scatter(x[y_kmeans == 2,0], x[y_kmeans == 2,1], s = 95, edgecolor='k', cmap = 'summer', label = 'Iris-virginica') centers = kmeans.cluster_centers_ plt.scatter(centers[:,0], centers[:,1], s = 150, edgecolor='k', cmap ='summer', label = 'Centroid') plt.title('Clusters of Iris Species') plt.legend() plt.show()Parameters# num harmonics n = 14 # max frequency w_max = 1800 # num model steps N = 256 random_seed = 100 def generate_signal(n, w_max, N, max_A=1.0, print_info=True): """ Generate random signal. :param int n: number of harmonics. :param float w_max: maximum frequency. :param int N: number of generation steps. :param float max_A: max amplitude of individual harmonics. :param boolean print_info: whether to print frequences, amplitudes and phases of harmonics. :return np.ndarray result_signal: shape = (N, ) """ w_step = w_max / n w = np.array([i * w_step for i in range(1, n + 1)]) w.resize((n, 1)) # generate amplitudes and phases amplitudes = np.random.random((n, 1)) * max_A phases = np.random.randint(-15, 15, size=(n, 1)) harmonics = [np.linspace(0, N, num=N) for i in range(n)] harmonics = np.array(harmonics) harmonics = amplitudes * np.sin(w * harmonics + phases) result_signal = harmonics.sum(axis=0, keepdims=False) if print_info: print("Frequencies: \n", w) print("Amplitudes :\n", amplitudes) print("Phases :\n", phases) return result_signalCalculations# np.random.seed(random_seed) signal_x = generate_signal(n, w_max, N, print_info=False) %%time mean = signal_x.mean() print(mean) %%time dispersion = signal_x.std() ** 2 print(dispersion)2.390780132344333 Wall time: 0 nsGraphsfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(18, 5)) ax.plot(signal_x, color=(1, 0, 0), linewidth=3, ls="-") ax.set(title="Generated signal X", xlabel="time (s)") ax.legend(["signal X"], fontsize=16, loc=0) ax.grid() ax.set(xlim=[0, N/2], ylim=[signal_x.min(), signal_x.max()]) plt.show()DFTdef dft(signal_x): F = np.zeros((N, 2)) for p in range(N): re, im = 0, 0 for k in range(N): re += signal_x[k] * np.cos(2 * np.pi * p * k / N) im += signal_x[k] * np.sin(2 * np.pi * p * k / N) F[p, :] = (re, im) dft_x = np.sqrt(F[:, 0] ** 2 + F[:, 1] ** 2) return dft_xTable DFTclass DFT: def __init__(self, N): """ :param int N: Number of discrete points """ self.N = N self.table = self.create_table(N) def create_table(self, N): W = np.zeros((N, N, 2)) for p in range(N): for k in range(N): re = np.cos(2 * np.pi * p * k / N) im = np.sin(2 * np.pi * p * k / N) W[p, k] = (re, im) return W def dft(self, signal): F = np.zeros((N, 2)) for p in range(N): re, im = 0, 0 for k in range(N): re += signal[k] * self.table[p, k, 0] im += signal[k] * self.table[p, k, 1] F[p, :] = (re, im) dft_x = np.sqrt(F[:, 0] ** 2 + F[:, 1] ** 2) return dft_x transformer = DFT(N) dft_x = dft(signal_x) dft_x2 = transformer.dft(signal_x) fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(18, 8)) ax[0].plot(dft_x , color=(1, 0, 0), linewidth=3, ls="-") ax[0].set(title="Discrete Fourier Trunsform of signal X", xlabel="") ax[0].legend(["dft X"], fontsize=16, loc=0) ax[0].grid() ax[0].set(xlim=[0, N/2], ylim=[dft_x .min(), dft_x .max()]) ax[1].plot(dft_x2 , color=(1, 0.5, 0), linewidth=3, ls="-") ax[1].set(title="Discrete Fourier Trunsform of signal X by using tables", xlabel="") ax[1].legend(["dft table X"], fontsize=16, loc=0) ax[1].grid() ax[1].set(xlim=[0, N/2], ylim=[dft_x2 .min(), dft_x2 .max()]) plt.show()Comparison dft speed and dft with calculated table speedtransformer = DFT(N) %%time times = [] time_table = [] for i in range(0, 30): start_time = time.time() for j in range(i): dft_x = dft(signal_x) end_time = time.time() times.append(end_time - start_time) start_time = time.time() for j in range(i): dft_x2 = transformer.dft(signal_x) end_time = time.time() time_table.append(end_time - start_time) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(18, 6)) ax.plot(times, color=(1, 0, 0), linewidth=3, ls="-", label="standart dft time") ax.plot(time_table, color=(0, 0, 1), linewidth=3, ls="--", label="Table dft time") ax.set(title="Transformation time of N", xlabel="num trensforms", ylabel="time (s)") ax.legend(fontsize=16, loc=0) ax.grid() #ax.set(xlim=[0, N], ylim=[signal_x.min(), signal_x.max()]) plt.show()Three-class classification modelclass SimpleClassifier(pl.LightningModule): def __init__(self): super().__init__() # Linear self.layer_1 = torch.nn.Linear(4, 32) self.layer_2 = torch.nn.Linear(32, 64) self.layer_3 = torch.nn.Linear(64, 32) self.layer_4 = torch.nn.Linear(32, 2) def forward(self, x): # Layer 1 x = self.layer_1(x) x = torch.relu(x) # Layer 2 x = self.layer_2(x) x = torch.relu(x) # Layer 3 x = self.layer_3(x) x = torch.relu(x) # Layer 4 x = self.layer_4(x) x = torch.relu(x) # Probability distribution over labels x = torch.log_softmax(x, dim=1) return x def cross_entropy_loss(self, logits, labels): return F.nll_loss(logits, labels) def training_step(self, train_batch, batch_idx): x, y = train_batch logits = self.forward(x) loss = self.cross_entropy_loss(logits, y) self.log('train_loss', loss) return loss def validation_step(self, val_batch, batch_idx): x, y = val_batch logits = self.forward(x) loss = self.cross_entropy_loss(logits, y) self.log('val_loss', loss) def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=1e-2) return optimizer class MyDataModule(pl.LightningDataModule): def __init__(self, X, y, train_size): super().__init__() self.X = X self.y = y self.train_size = train_size def setup(self, stage): X_train = torch.tensor(self.X[:self.train_size]) y_train = torch.tensor(self.y[:self.train_size], dtype=torch.long) X_test = torch.tensor(self.X[self.train_size:]) y_test = torch.tensor(self.y[self.train_size:], dtype=torch.long) self.train = TensorDataset(X_train, y_train) self.test = TensorDataset(X_test, y_test) def train_dataloader(self): return DataLoader(self.train, batch_size=16) def val_dataloader(self): return DataLoader(self.test, batch_size=16) X = np.array([ [1, 1, 1, 1], [2, 4, 6, 24], [-1, -2, -1, -192], [-191, -3, -2, -7], [102, 12, 16, 200], [7, 9, 13, 177], ] * 200, dtype='float32') y = np.array([ 0, 0, 1, 1, 0, 0 ] * 200, dtype='int32') data_module = MyDataModule(X, y, train_size=16) early_stopping = EarlyStopping('val_loss', patience=20) model = SimpleClassifier() trainer = pl.Trainer(max_epochs=1000, callbacks=[early_stopping]) trainer.fit(model, data_module) model(torch.tensor([[1, 1, 1, 79]], dtype=torch.float)).argmax()Harmony PI 21.1 Sprint 1 DemoIn Sprint 1 of PI 21.1, Harmony added the following1. allowing a request to specify collections by short name (HARMONY-229)2. providing an indication of how many granules will be processed in the request response (HARMONY-306)3. applying scaling and offsetting to the Zarr data returned (HARMONY-664)This notebook provides a basic workflow to demonstrate this functrionality. For more a general introduction and tutorial, see [Harmony API Introduction](./Harmony%20Api%20Introduction.ipynb). Useful helpers for making the calls found in this notebook can be found under the [docs/notebook-helpers](./notebook-helpers) folder. Prerequisites1. Install Python 3. This notebook is tested to work in 3.8 but should work in most recent 3.x versions.2. Install Jupyter: pip install jupyterlab3. Setup your ~/.netrc for Earthdata Login as described in Harmony API Introduction Set Up AWSOnce you have Zarr links, you can access them with your AWS credentials to the Harmony account. Obtain the credentials and make sure your default AWS account uses them. One way to do this is to edit `~/.aws/credentials` to have the following section:```[default]aws_access_key_id = YOUR_HARMONY_ACCESS_KEY_IDaws_secret_access_key = YOUR_HARMONY_SECRET_ACCESS_KEY```Restart your Jupyter kernel after completing this step Setup notebook environment and importsWe need to set up general-purpose imports and authentication# %load_ext autoreload # %autoreload # %matplotlib inline import sys # Install dependencies into the Jupyter Kernel !{sys.executable} -m pip install -q -r notebook_helpers/requirements.txt !{sys.executable} -m pip install s3fs zarr # Import libraries used throughout the notebook import json from pprint import pprint from time import sleep from notebook_helpers import get, post, show, get_data_urls, show_async, show_async_condensed, print_async_status, show_shapeRequest Subsetted Zarr Data Using ShortNameThis request asks for variable subsetting of L2 data with output in the Zarr format.harmony_root = 'https://harmony.uat.earthdata.nasa.gov' asyncConfig = { 'short-name': 'harmony_example_l2', 'ogc-api-coverages_version': '1.0.0', 'variable': 'all', 'format': 'application/x-zarr', 'time': '("2020-01-10T00:00:00.000Z":"2020-01-13T00:00:00.000Z")', 'max_results': '2' } async_url = harmony_root+'/{short-name}/ogc-api-coverages/{ogc-api-coverages_version}/collections/{variable}/coverage/rangeset?subset=time{time}&maxResults={max_results}&format={format}'.format(**asyncConfig) async_response = get(async_url)Display numInputGranules field in responseprint(async_response.json()["numInputGranules"])Wait for results and get data linksshow_async_condensed(async_response, show_results=False) job_results = get(async_response.url).json() data_links = [link['href'] for link in job_results['links'] if link.get('rel') == 'data']Open a Zarr fileimport s3fs import zarr fs = s3fs.S3FileSystem(client_kwargs={'region_name':'us-west-2'}) store = fs.get_mapper(root=data_links[0], check=False) zarr_file = zarr.open(store) zarr_file['blue_var'][0][100]Getting the catalog, and combining it with the subimg_data cat to get the NMS per cluster.Cat=Table.read('M33_catalog_with_Quick_BestFit_CMD_Estimates.fits') subimg_data=Table.read('phatm33_subimg-meta.fits') subimg_data.rename_column('SUBIMG', 'IMG') subimg_data.rename_column('FILENAME', 'SUBIMG_old') subimg_data.rename_column('RA', 'IMG_RA') subimg_data.rename_column('DEC', 'IMG_DEC') si_lst=list(subimg_data['SUBIMG_old']) for i in range(len(si_lst)): si_lst[i]= si_lst[i].rstrip() new_col=Column(si_lst, name='SUBIMG') subimg_data.remove_column('SUBIMG_old') subimg_data.add_column(new_col, index=4) Cat_w_nms=join(Cat, subimg_data) Cat_w_Est=Cat_w_nms[np.where(Cat_w_nms['Quick_BestFit_LogAge'] > 0)] #Trim to exclude clusters younger than 7.0 and older than 8.5 Trimmed=Cat_w_Est[np.where((Cat_w_Est['Quick_BestFit_LogAge'] > 7.0 ) & (Cat_w_Est['Quick_BestFit_LogAge'] < 8.5))] #Only using the sample in my age range masses=np.array(Trimmed['Quick_BestFit_LogMass']) ages=np.array(Trimmed['Quick_BestFit_LogAge']) nmses=np.array(np.log10(Trimmed['NMS'])) #Setting the Mlim to a fixed value which is the median 50% compleness for the full sample med_mlim=2.9677 med_mlim fixed_mlim_masses=masses[np.where(masses>med_mlim)] use_med_mlim=10**med_mlim def pobs(M, mlim): k=6.3665 y=(1.+ exp(-k*(M-mlim)))**(-1) return y def lnobs_like(M, mlim): k=6.3665 return -np.log(1.+ exp(-k*(M-mlim))) def Shecter_Z(M, mlim, alpha, M_c): x = M/M_c k=6.3665 pobs= 1./(1.+ exp((-k)*(np.log10(M)-mlim))) return (x**alpha) * exp(-x) * pobs def lnlike(theta, M, mlim): alpha, M_c = theta lin_M_c= 10.**M_c lin_M= 10**M x= lin_M/lin_M_c ln_pobs=lnobs_like(M, np.log10(mlim)) norm, err = quad(Shecter_Z, mlim, 1.e7, args=(np.log10(mlim), alpha, lin_M_c)) lnlike = np.sum((-x) + alpha*np.log(x) + ln_pobs - np.log(norm)) return lnlike def lnprior(theta): alpha, M_c = theta if -3 <= alpha <= -1 and 3 <= M_c <= 8: return 0.0 return -np.inf def lnprob(theta, M, mlim): lp = lnprior(theta) if not np.isfinite(lp): return -np.inf return lp + lnlike(theta, M, mlim) nll = lambda *args: -lnprob(*args) starting_point=np.array([-2., 4.]) fixed_mlim_result=opt.minimize(nll, x0=starting_point, args=(fixed_mlim_masses, use_med_mlim)) fixed_mlim_result['x']Testing embedding modelsIn this notebook we are going to setup our LSTM model for the word generation task.from __future__ import print_function import numpy as np import gensim import string from keras.callbacks import LambdaCallback from keras.layers.recurrent import LSTM from keras.layers.embeddings import Embedding from keras.layers import Dense, Activation from keras.models import Sequential from keras.utils.data_utils import get_fileUsing TensorFlow backend.Load modelsword_model = gensim.models.Word2Vec.load('./data/meta-n2v/meta-n2v100MB') pretrained_weights = word_model.wv.syn0 vocab_size, emdedding_size = pretrained_weights.shape print('Result embedding shape:', pretrained_weights.shape) print('Checking similar words:') for word in ['model']: most_similar = ', '.join('%s (%.2f)' % (similar, dist) for similar, dist in word_model.most_similar(word)[:8]) print(' %s -> %s' % (word, most_similar)) # Now for loading the corresponding amount of sentences we need to pass the number of sentences parameter from sentence_loader import lazy_load tokenised_sents, sents = lazy_load(chunk_size=10240) print(len(sents)) #ub = np.max([len(sent) for sent in tokenised_sents]) long_sents = 0 for sent in tokenised_sents: if len(sent) > 45: long_sents += 1 long_sents # Let this length be the upperbound. # reformat the sentences to have length no more than the maximum length sentences = [sentence for sentence in tokenised_sents if len(sentence) < 45] print(len(sentences)) print(sentences[0]) max_sentence_len = 45 def word2idx(word): return word_model.wv.vocab[word].index def idx2word(idx): return word_model.wv.index2word[idx] train_x = np.zeros([len(sentences), max_sentence_len], dtype=np.int32) train_y = np.zeros([len(sentences)], dtype=np.int32) for i, sentence in enumerate(sentences): for t, word in enumerate(sentence[:-1]): train_x[i, t] = word2idx(word) train_y[i] = word2idx(sentence[-1]) print('train_x shape:', train_x.shape) print('train_y shape:', train_y.shape)train_x shape: (70, 45) train_y shape: (70,)Trainingmodel = Sequential() model.add(Embedding(input_dim=vocab_size, output_dim=emdedding_size, weights=[pretrained_weights])) model.add(LSTM(units=emdedding_size)) model.add(Dense(units=vocab_size)) model.add(Activation('softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy') # implement a random sampling mechanism def sample(preds, temperature=1.0): if temperature <= 0: return np.argmax(preds) preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) def generate_next(text, num_generated=10): word_idxs = [word2idx(word) for word in text.lower().split()] for i in range(num_generated): prediction = model.predict(x=np.array(word_idxs)) idx = sample(prediction[-1], temperature=0.7) word_idxs.append(idx) return ' '.join(idx2word(idx) for idx in word_idxs) def on_epoch_end(epoch, _): print('\nGenerating text after epoch: %d' % epoch) texts = [ 'chess', 'is', 'fantasy', 'by', 'steve' ] for text in texts: sample = generate_next(text) print('%s... -> %s' % (text, sample)) model.fit(train_x, train_y, batch_size=128, epochs=20, callbacks=[LambdaCallback(on_epoch_end=on_epoch_end)]) %%file lstm_model.py from __future__ import print_function import numpy as np from keras.callbacks import LambdaCallback from keras.layers.recurrent import LSTM from keras.layers.embeddings import Embedding from keras.layers import Dense, Activation from keras.models import Sequential def sample(preds, temperature=1.0): if temperature <= 0: return np.argmax(preds) preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) def eval_on_lstm(tokenised_sents, word_model, max_sentence_len, test_ratio=0.2): sentences = [sentence for sentence in tokenised_sents if len(sentence) < max_sentence_len] pretrained_weights = word_model.wv.syn0 vocab_size, emdedding_size = pretrained_weights.shape def word2idx(word): try: idx = word_model.wv.vocab[word].index except: print("word: {} not in vocab using default word card\n".format(word)) idx = 0 return idx def idx2word(idx): return word_model.wv.index2word[idx] total = len(sentences) train_size = int(total * (1 - test_ratio)) test_size = total - train_size train_x = np.zeros([train_size, max_sentence_len], dtype=np.int32) train_y = np.zeros([train_size], dtype=np.int32) test_x = np.zeros([test_size, max_sentence_len], dtype=np.int32) test_y = np.zeros([test_size], dtype=np.int32) for i, sentence in enumerate(sentences[:train_size]): for t, word in enumerate(sentence[:-1]): train_x[i, t] = word2idx(word) train_y[i] = word2idx(sentence[-1]) for i, sentence in enumerate(sentences[train_size:]): for t, word in enumerate(sentence[:-1]): test_x[i, t] = word2idx(word) test_y[i] = word2idx(sentence[-1]) model = Sequential() model.add(Embedding(input_dim=vocab_size, output_dim=emdedding_size, weights=[pretrained_weights])) model.add(LSTM(units=emdedding_size)) model.add(Dense(units=vocab_size)) model.add(Activation('softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy') def generate_next(text, num_generated=10): word_idxs = [word2idx(word) for word in text.lower().split()] for i in range(num_generated): prediction = model.predict(x=np.array(word_idxs)) idx = sample(prediction[-1], temperature=0.7) word_idxs.append(idx) return ' '.join(idx2word(idx) for idx in word_idxs) def on_epoch_end(epoch, _): print('\nGenerating text after epoch: %d' % epoch) texts = [ 'chess', 'is', 'fantasy', 'by', 'steve', 'lasting' ] for text in texts: sample = generate_next(text) print('%s... -> %s' % (text, sample)) model.fit(train_x, train_y, batch_size=128, epochs=20, callbacks=[LambdaCallback(on_epoch_end=on_epoch_end)]) scores = model.evaluate(test_x, test_y, verbose=0) print("Accuracy of the model is {}".format(scores)) from lstm_model import eval_on_lstm eval_on_lstm(tokenised_sents,word_model, 40) # now lets test the 1Mb model of syntactic_n2v from sentence_loader import lazy_load tokenised_sents, sents = lazy_load(chunk_size=1048576/2) len(sents) from lstm_model import eval_on_lstm import gensim word_model = gensim.models.Word2Vec.load('./data/syncode/syncode_model_1MB') eval_on_lstm(tokenised_sents,word_model, 40) word_model = gensim.models.Word2Vec.load('./data/w2v/w2v_model_1MB') eval_on_lstm(tokenised_sents,word_model, 40) %%file evaluate_models.py from sentence_loader import lazy_load from lstm_model import eval_on_lstm import gensim sizes = [1,2,4,8] tokenized_sents = [] sents = [] for size in sizes: ts, s = lazy_load(chunk_size=(10240)*size) tokenized_sents.extend(ts) sents.extend(s) word_model = gensim.models.Word2Vec.load('./data/syncode/syncode_model_'+str(size)+'MB') eval_on_lstm(tokenized_sents,word_model, 40) print "\n done with syncode "+ str(size)+"\n" word_model = gensim.models.Word2Vec.load('./data/w2v/w2v_model_'+str(size)+'MB') eval_on_lstm(tokenized_sents,word_model, 40) print "\n done with word2vec "+ str(size)+"\n"Overwriting evaluate_models.pyDemo 2: The objectives of this demo are as follows:- Simulate a single stochastic trajectory of the Ricker model going through a Flip bifurcation- Compute bootstrapped versions of segments of the time-series over a rolling window- Compute EWS of the bootstrapped time-series- Compute and display confidence intervals of the ensemble of EWS- Run time < 3min Import the standard Python libraries and ewstools# We will require the following standard Python packages for this analysis import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # This is the package we use to compute the early warning signals import ewstools.core as ewstoolsSimulate the Ricker model Here we simulate a single trajectory of the Ricker model going through a Fold bifurcation. We will use this data to demonstrate the process of computing EWS. Alternatively, you could import your own data here. The importnat thing is that we end up with a Pandas DataFrame indexed by time. **Set simulation parameters**dt = 1 # time-step (using 1 since discrete-time system) t0 = 0 # starting time tmax = 1000 # end time tburn = 100 # burn-in period preceding start-time seed = 0 # random number generation seed (set for reproducibility)**Define model**We use the Ricker model with a Holling Type II harvesting term and additive white noise. It is given by$$ N_{t+1} = N_t e^{(r(1-N_t/K) + \sigma\epsilon_t} ) - F\frac{N_t^2}{N_t^2 + h^2}$$where $N_t$ is the population size at time $t$, $r$ is the intrinsic growth rate, $K$ is the carrying capacity, $F$ is the maximum rate of harvesting, $h$ is the half saturation constant of the harvesting term, $\sigma$ is the noise amplitude, and $\epsilon_t$ is a normal random variable with zero mean and unit variance.# Define the model def de_fun(x,r,k,f,h,xi): return x*np.exp(r*(1-x/k)+xi) - f*x**2/(x**2+h**2)**Set model parameters**f = 0 # harvesting rate k = 10 # carrying capacity h = 0.75 # half-saturation constant of harvesting function bl = 0.5 # bifurcation parameter (growth rate) low bh = 2.3 # bifurcation parameter (growth rate) high bcrit = 2 # bifurcation point (computed using XPPAUT) sigma = 0.02 # noise intensity x0 = 0.8 # initial condition**Initialisation**# Initialise arrays for time and state values t = np.arange(t0,tmax,dt) x = np.zeros(len(t)) # Bifurcation parameter values (increasing linearly in time) b = pd.Series(np.linspace(bl,bh,len(t)),index=t) # bifurcation parameter values over time (linear increase) # Compute time at which bifurcation is crossed tcrit = b[b > bcrit].index[1] # Array of noise values (normal random variables with variance sigma^2 dt) dW_burn = np.random.normal(loc=0, scale=sigma*np.sqrt(dt), size = int(tburn/dt)) # burn-in period dW = np.random.normal(loc=0, scale=sigma*np.sqrt(dt), size = len(t)) # monitored period**Run simulation**# Run burn-in period starting from intiial condition x0 for i in range(int(tburn/dt)): x0 = de_fun(x0,bl,k,f,h,dW_burn[i]) # State value post burn-in period. Set as starting value. x[0]=x0 # Run simulation using recursion for i in range(len(t)-1): x[i+1] = de_fun(x[i],b.iloc[i],k,f,h,dW[i]) # Make sure that state variable stays >= 0 if x[i+1] < 0: x[i+1] = 0 # Store array data in a DataFrame indexed by time sim_data = {'Time': t, 'x': x} df_traj = pd.DataFrame(sim_data) df_traj.set_index('Time', inplace=True)We now have a DataFrame df_traj, with our trajectory, indexed by time. We can check it out with a simple plot, using the commanddf_traj.plot();Bootstrap the time-series over a rolling windowTo obtain a more reliable estimate of the statistical metrics that consitute EWS in this system, we bootstrap the detrended time-series within each position of the rolling window. Specifically, we use a block-bootstrapping method where blocks of points are sampled randomly with replacement. The size of the block for each sample is taken from an exponential distribution with a chosen parameter. The block sizes used should be large enough to retain the significant temporal correlations in the time-series. **Set bootstrapping parameters**rw = 0.4 # rolling window span = 0.5 # Lowess span block_size = 20 # characteristic size of blocks used to resample time-series bs_type = 'Stationary' # type of bootstrapping n_samples = 3 # number of bootstrapping samples to take roll_offset = 20 # rolling window offset**Compute block-bootstrapped samples**We now construct a Dataframe of bootstrapped samples of the time-series, using the function *roll_bootstrap* within the *ewstools* package. Note that documentation of each function can be obtained using help(*function_name*)df_samples = ewstools.roll_bootstrap(df_traj['x'], span = span, roll_window = rw, roll_offset = roll_offset, upto = tcrit, n_samples = n_samples, bs_type = bs_type, block_size = block_size )For illustraion, here are 3 bootstrapped samples from the time-series within the rolling window at $t=459$.df_samples.loc[459].loc[1:3]['x'].unstack(level=0).plot();Compute EWS of the ensemble of bootstrap time-seriesNow we send each bootstrapped time-series through *ews_compute*. Note that detrending and extracting segments of the time-series has already been done, so there is no need to smooth the bootstrapped data, or use a rolling window. **EWS parameters**ews = ['var','ac','smax','aic'] lags = [1,2,3] # autocorrelation lag times ham_length = 40 # number of data points in Hamming window ham_offset = 0.5 # proportion of Hamming window to offset by upon each iteration pspec_roll_offset = 20 # offset for rolling window when doing spectrum metrics sweep = 'False' # whether to sweep over optimisation parameters**Initialisation**# List to store EWS DataFrames list_df_ews = [] # List to store power spectra list_pspec = [] # Extract time and sample values to loop over # Time values tVals = np.array(df_samples.index.levels[0]) # Sample values sampleVals = np.array(df_samples.index.levels[1])**Run ews_compute for each bootstrapped sample (takes a few minutes)**# Loop over time (at end of rolling window) for t in tVals: # Loop over samples for sample in sampleVals: # Extract series for this time and sample number series_temp = df_samples.loc[t].loc[sample]['x'] ews_dic = ewstools.ews_compute(series_temp, roll_window = 1, # effectively no rolling window band_width = 1, # effectively no detrending ews = ews, lag_times = lags, upto='Full', ham_length = ham_length, ham_offset = ham_offset, sweep = sweep) # The DataFrame of EWS df_ews_temp = ews_dic['EWS metrics'] # Include columns for sample value and realtime df_ews_temp['Sample'] = sample df_ews_temp['Time'] = t # Drop NaN values df_ews_temp = df_ews_temp.dropna() # Append list_df_ews list_df_ews.append(df_ews_temp) # Output power spectrum for just one of the samples (ow large file size) df_pspec_temp = ews_dic['Power spectrum'][['Empirical']].dropna() list_pspec.append(df_pspec_temp) # Print update print('EWS for t=%.2f complete' % t) # Concatenate EWS DataFrames. Index [Realtime, Sample] df_ews_boot = pd.concat(list_df_ews).reset_index(drop=True).set_index(['Time','Sample']) df_pspec_boot = pd.concat(list_pspec)EWS for t=399.00 complete EWS for t=419.00 complete EWS for t=439.00 complete EWS for t=459.00 complete EWS for t=479.00 complete EWS for t=499.00 complete EWS for t=519.00 complete EWS for t=539.00 complete EWS for t=559.00 complete EWS for t=579.00 complete EWS for t=599.00 complete EWS for t=619.00 complete EWS for t=639.00 complete EWS for t=659.00 complete EWS for t=679.00 complete EWS for t=699.00 complete EWS for t=719.00 complete EWS for t=739.00 complete EWS for t=759.00 complete EWS for t=779.00 complete EWS for t=799.00 complete EWS for t=819.00 completePlot EWS with 95% confidence intervalsWe use the Seaborn package here to make plots of the ensemble EWS as mean values with 95% confidence intervals. **Variance**sns.relplot(x='Time', y='Variance', data=df_ews_boot.reset_index()[['Time','Variance']], kind='line', height=3, aspect=2);**Autocorrelation**# Structure the data for Seaborn plot_data=df_ews_boot.reset_index()[['Time','Lag-1 AC','Lag-2 AC','Lag-3 AC']].melt(id_vars='Time', value_vars=('Lag-1 AC','Lag-2 AC','Lag-3 AC'), var_name='EWS', value_name='Magnitude') sns.relplot(x='Time', y='Magnitude', hue='EWS', data=plot_data, kind='line', height=3, aspect=2);**Smax**sns.relplot(x='Time', y='Smax', data=df_ews_boot.reset_index()[['Time','Smax']], kind='line', height=3, aspect=2);**AIC weights**# Structure the data for Seaborn plot_data=df_ews_boot.reset_index()[['Time','AIC fold','AIC hopf','AIC null']].melt(id_vars='Time', value_vars=('AIC fold','AIC hopf','AIC null'), var_name='EWS', value_name='Magnitude') sns.relplot(x='Time', y='Magnitude', hue='EWS', data=plot_data, kind='line', height=3, aspect=2);Training models# --------------------------------------------------------------------------------- #Models svm = SVC(kernel = 'rbf', gamma = 0.1, C = 10.0, random_state = 1) dt = DecisionTreeClassifier(criterion="entropy", random_state = 1) log_reg = LogisticRegression(penalty='l2', C = 10, random_state = 1) ranfor = RandomForestClassifier(n_estimators=1000, max_depth=10, n_jobs=-1, ccp_alpha=0.1) xgbclass = xgb.XGBClassifier(n_estimators=1000,max_depth=15, learning_rate=0.05, n_jobs=-1,random_state=42, colsample_bytree=0.5,gamma=1) ada = AdaBoostClassifier(n_estimators=1000,learning_rate=0.1) models =[svm,dt,log_reg,ranfor,xgbclass,ada] score_table =[] for model in models: score_table.append(train(model,X_train,y_train)) df = pd.DataFrame(score_table,columns=['Model','Train Accuracy','Test Accuracy','Precision','Recall','F1']) df = df.set_index('Model') df.sort_values('F1',ascending=False) df.sort_values('F1',ascending=False)Neural Networks#Reproducing same results SEED = 2019 #Torch torch.manual_seed(SEED) #Cuda algorithms torch.backends.cudnn.deterministic = True TEXT = data.Field(tokenize='spacy',batch_first=True,include_lengths=True) LABEL = data.LabelField(dtype = torch.float,batch_first=True) fields = [(None, None), ('text',TEXT),('label', LABEL)] #loading custom dataset training_data=data.TabularDataset(path = 'processed_text.csv',format = 'csv',fields = fields,skip_header = True) #print preprocessed text print(vars(training_data.examples[0])) vars(training_data.examples[0]) train_data, valid_data = training_data.split(split_ratio=0.7, random_state = random.seed(SEED)) vocab = "glove.840B.300d" #initialize glove embeddings TEXT.build_vocab(train_data,min_freq=3,vectors = vocab) LABEL.build_vocab(train_data) #No. of unique tokens in text print("Size of TEXT vocabulary:",len(TEXT.vocab)) #No. of unique tokens in label print("Size of LABEL vocabulary:",len(LABEL.vocab)) #Commonly used words print(TEXT.vocab.freqs.most_common(10)) #Word dictionary print(TEXT.vocab.stoi) #check whether cuda is available device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #set batch size BATCH_SIZE = 64 #Load an iterator train_iterator, valid_iterator = data.BucketIterator.splits( (train_data, valid_data), batch_size = BATCH_SIZE, sort_key = lambda x: len(x.text), sort_within_batch=True, device = device) class classifier(nn.Module): #define all the layers used in model def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout): #Constructor super().__init__() #embedding layer self.embedding = nn.Embedding(vocab_size, embedding_dim) #lstm layer self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout, batch_first=True) #dense layer self.fc = nn.Linear(hidden_dim * 2, output_dim) #activation function self.act = nn.Sigmoid() def forward(self, text, text_lengths): #text = [batch size,sent_length] embedded = self.embedding(text) #embedded = [batch size, sent_len, emb dim] #packed sequence packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths.cpu(),batch_first=True) packed_output, (hidden, cell) = self.lstm(packed_embedded) #hidden = [batch size, num layers * num directions,hid dim] #cell = [batch size, num layers * num directions,hid dim] #concat the final forward and backward hidden state hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1) #hidden = [batch size, hid dim * num directions] dense_outputs=self.fc(hidden) #Final activation function outputs=self.act(dense_outputs) return outputs----------------------------Hyperparameters--------------------------#define hyperparameters size_of_vocab = len(TEXT.vocab) embedding_dim = 300 num_hidden_nodes = 256 num_output_nodes = 1 num_layers = 8 bidirection = True dropout = 0.3 lr = 0.00001 #instantiate the model model = classifier(size_of_vocab, embedding_dim, num_hidden_nodes,num_output_nodes, num_layers, bidirectional = True, dropout = dropout) #architecture print(model) #No. of trianable parameters def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') #Initialize the pretrained embedding pretrained_embeddings = TEXT.vocab.vectors model.embedding.weight.data.copy_(pretrained_embeddings) print(pretrained_embeddings.shape) #define optimizer and loss optimizer = optim.AdamW(model.parameters(),lr=lr) criterion = nn.BCELoss() #define metric def binary_accuracy(preds, y): #round predictions to the closest integer rounded_preds = torch.round(preds) correct = (rounded_preds == y).float() acc = correct.sum() / len(correct) return acc #push to cuda if available model = model.to(device) criterion = criterion.to(device) def train(model, iterator, optimizer, criterion): #initialize every epoch epoch_loss = 0 epoch_acc = 0 #set the model in training phase model.train() for batch in iterator: #resets the gradients after every batch optimizer.zero_grad() #retrieve text and no. of words text, text_lengths = batch.text #convert to 1D tensor predictions = model(text, text_lengths).squeeze() #compute the loss loss = criterion(predictions, batch.label) #compute the binary accuracy acc = binary_accuracy(predictions, batch.label) #backpropage the loss and compute the gradients loss.backward() #update the weights optimizer.step() #loss and accuracy epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) def evaluate(model, iterator, criterion): #initialize every epoch epoch_loss = 0 epoch_acc = 0 #deactivating dropout layers model.eval() #deactivates autograd with torch.no_grad(): for batch in iterator: #retrieve text and no. of words text, text_lengths = batch.text #convert to 1d tensor predictions = model(text, text_lengths).squeeze() #compute loss and accuracy loss = criterion(predictions, batch.label) acc = binary_accuracy(predictions, batch.label) #keep track of loss and accuracy epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) N_EPOCHS = 60 model_name = 'LSTM' run_name = f'{model_name}_hidden_{num_hidden_nodes}_epochs_{N_EPOCHS}_batch_size{BATCH_SIZE}_lr_{lr}_num_layers{num_layers}_vocab_{vocab}_{optimizer.__class__.__name__}_dropout_{dropout}' wandb.init(project="LGBT", entity="cs519",name=run_name) wandb.watch(model) best_valid_loss = float('inf') for epoch in range(N_EPOCHS): #train the model train_loss, train_acc = train(model, train_iterator, optimizer, criterion) #evaluate the model valid_loss, valid_acc = evaluate(model, valid_iterator, criterion) wandb.log({ "Epoch": epoch, "Train Loss": train_loss, "Valid Loss": valid_loss}) #save the best model if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), f'{run_name}.pt') print(f'Epoch {epoch+1}') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%') print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%') #load weights path=f'{run_name}.pt' model.load_state_dict(torch.load(path)); model.eval(); #inference import spacy nlp = spacy.load('en') def predict(model, sentence): tokenized = [tok.text for tok in nlp.tokenizer(sentence)] #tokenize the sentence indexed = [TEXT.vocab.stoi[t] for t in tokenized] #convert to integer sequence length = [len(indexed)] #compute no. of words tensor = torch.LongTensor(indexed).to(device) #convert to tensor tensor = tensor.unsqueeze(1).T #reshape in form of batch,no. of words length_tensor = torch.LongTensor(length) #convert to tensor prediction = model(tensor, length_tensor) #prediction return prediction.item()Picking a Link FunctionGeneralized linear models usually tranform a linear model of the predictors by using a [link function](https://en.wikipedia.org/wiki/Generalized_linear_modelLink_function). In logistic regression, the link function is the [sigmoid](https://en.wikipedia.org/wiki/Sigmoid_function). We can implement this really easily.def sigmoid(scores): return 1 / (1 + np.exp(-scores))Maximizing the Likelihood To maximize the likelihood, I need a way to compute the likelihood and the gradient of the likelihood. Fortunately, the likelihood (for binary classification) can be reduced to a fairly intuitive form by switching to the log-likelihood. We're able to do this without affecting the weights parameter estimation because log transformation are [monotonic](https://en.wikipedia.org/wiki/Monotonic_function).For anyone interested in the derivations of the functions I'm using, check out Section 4.4.1 of Hastie, Tibsharani, and Friedman's [Elements of Statistical Learning](http://statweb.stanford.edu/~tibs/ElemStatLearn/). For those less mathematically inclined, (Univesity of Washington) details one possible derivation of the log-likelihood in a series of short lectures on [Coursera](https://www.coursera.org/learn/ml-classification/lecture/1ZeTC/very-optional-expressing-the-log-likelihood) using indicator functions. Calculating the Log-LikelihoodThe log-likelihood can be viewed as as sum over all the training data. Mathematically,损失函数$$\begin{equation}ll = \sum_{i=1}^{N}y_{i}\beta ^{T}x_{i} - log(1+e^{\beta^{T}x_{i}})\end{equation}$$ 越小越好where $y$ is the target class, $x_{i}$ represents an individual data point, and $\beta$ is the weights vector.I can easily turn that into a function and take advantage of matrix algebra.#分类问题的损失函数 def log_likelihood(features, target, weights): scores = np.dot(features, weights) ll = np.sum( target*scores - np.log(1 + np.exp(scores)) ) return llCalculating the Gradient![image.png](attachment:image.png)Now I need an equation for the gradient of the log-likelihood. By taking the derivative of the equation above and reformulating in matrix form, the gradient becomes: $$\begin{equation}\bigtriangledown ll = X^{T}(Y - Predictions)\end{equation}$$Again, this is really easy to implement. It's so simple I don't even need to wrap it into a function. The gradient here looks very similar to the output layer gradient in a neural network (see my [post](https://beckernick.github.io/neural-network-scratch/) on neural networks if you're curious).This shouldn't be too surprising, since a neural network is basically just a series of non-linear link functions applied after linear manipulations of the input data. Building the Logistic Regression FunctionFinally, I'm ready to build the model function. I'll add in the option to calculate the model with an intercept, since it's a good option to have.权重的迭代公式:![image.png](attachment:image.png)#梯度下降最优化:取误差函数的极小值 def logistic_regression(features, target, num_steps, learning_rate, add_intercept = False): if add_intercept: #intercept:有截距b intercept = np.ones((features.shape[0], 1)) #features.shape返回(60,3)[0]返回60。生成60行,1列的1 features = np.hstack((intercept, features)) #在feature前加上一列1 np.vstack():在竖直方向上堆叠;np.hstack():在水平方向上平铺 weights = np.zeros(features.shape[1]) #features.shape[1]返回3,3个权重 for step in range(num_steps): #迭代多少步 scores = np.dot(features, weights) predictions = sigmoid(scores) # Update weights with log likelihood gradient output_error_signal = target - predictions gradient = np.dot(features.T, output_error_signal) weights += learning_rate * gradient # Print log-likelihood every so often if step % 10000 == 0: print(log_likelihood(features, target, weights)) return weightsTime to do the regression.weights = logistic_regression(features, label, num_steps = 50000, learning_rate = 5e-5, add_intercept=True) #收敛了 print(weights) def predict(features, weights): global mean global std features = (features - mean)/std intercept = np.ones((features.shape[0], 1)) features = np.hstack((intercept, features)) scores = np.dot(features, weights) predictions = sigmoid(scores) return predictions student1 = np.array([[188, 85, 2]]) print(predict(student1, weights)) student2 = np.array([[165, 50, 25]]) print(predict(student2, weights))[0.76002054]Inference Data Cookbook`InferenceData` is the central data format for ArviZ. `InferenceData` itself is just a container that maintains references to one or more `xarray.Dataset`. Below are various ways to generate an `InferenceData` object. See [here](XarrayforArviZ.ipynb) for more on xarray.import arviz as az import numpy as npFrom 1d numpy arraysize = 100 dataset = az.convert_to_inference_data(np.random.randn(size)) print(dataset) dataset.posteriorInference data with groups: > posteriorFrom nd numpy arrayshape = (1, 2, 3, 4, 5) dataset = az.convert_to_inference_data(np.random.randn(*shape)) print(dataset) dataset.posteriorInference data with groups: > posteriorFrom a dictionarydatadict = { 'a': np.random.randn(100), 'b': np.random.randn(1, 100, 10), 'c': np.random.randn(1, 100, 3, 4), } dataset = az.convert_to_inference_data(datadict) print(dataset) dataset.posteriorInference data with groups: > posteriorFrom dictionary with coords and dimsdatadict = { 'a': np.random.randn(100), 'b': np.random.randn(1, 100, 10), 'c': np.random.randn(1, 100, 3, 4), } coords = {'c1' : np.arange(3), 'c2' : np.arange(4), 'b1' : np.arange(10)} dims = {'b' : ['b1'], 'c' : ['c1', 'c2']} dataset = az.convert_to_inference_data(datadict, coords=coords, dims=dims) print(dataset) dataset.posteriorInference data with groups: > posteriorFrom pymc3import pymc3 as pm draws = 500 chains = 2 eight_school_data = {'J': 8, 'y': np.array([28., 8., -3., 7., -1., 1., 18., 12.]), 'sigma': np.array([15., 10., 16., 11., 9., 11., 10., 18.]) } with pm.Model() as model: mu = pm.Normal('mu', mu=0, sd=5) tau = pm.HalfCauchy('tau', beta=5) theta_tilde = pm.Normal('theta_tilde', mu=0, sd=1, shape=eight_school_data['J']) theta = pm.Deterministic('theta', mu + tau * theta_tilde) pm.Normal('obs', mu=theta, sd=eight_school_data['sigma'], observed=eight_school_data['y']) trace = pm.sample(draws, chains=chains) prior = pm.sample_prior_predictive() posterior_predictive = pm.sample_posterior_predictive(trace, 500, model) data = az.from_pymc3( trace=trace, prior=prior, posterior_predictive=posterior_predictive, coords={'school': np.arange(eight_school_data['J'])}, dims={'theta': ['school'], 'theta_tilde': ['school']}, ) dataAuto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 4 jobs) NUTS: [theta_tilde, tau, mu] Sampling 2 chains: 100%|██████████| 2000/2000 [00:01<00:00, 1407.23draws/s] There was 1 divergence after tuning. Increase `target_accept` or reparameterize. 100%|██████████| 500/500 [00:00<00:00, 1951.70it/s]From pystanimport pystan schools_code = ''' data { int J; real y[J]; real sigma[J]; } parameters { real mu; real tau; real theta_tilde[J]; } transformed parameters { real theta[J]; for (j in 1:J) theta[j] = mu + tau * theta_tilde[j]; } model { mu ~ normal(0, 5); tau ~ cauchy(0, 5); theta_tilde ~ normal(0, 1); y ~ normal(theta, sigma); } generated quantities { vector[J] log_lik; vector[J] y_hat; for (j in 1:J) { log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]); y_hat[j] = normal_rng(theta[j], sigma[j]); } } ''' stan_model = pystan.StanModel(model_code=schools_code) fit = stan_model.sampling(data=eight_school_data, iter=draws, warmup=0, chains=chains) data = az.from_pystan(posterior=fit, posterior_predictive='y_hat', observed_data=['y'], log_likelihood='log_lik', coords={'school': np.arange(eight_school_data['J'])}, dims={'theta': ['school'], 'y': ['school'], 'log_lik': ['school'], 'y_hat': ['school'], 'theta_tilde': ['school'] } ) dataINFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_e7aeb8b836685a923269e6171e7377cd NOW. /home/tbayes/miniconda3/envs/arviz3.6/lib/python3.6/site-packages/Cython/Compiler/Main.py:367: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /tmp/tmpehtg_iwy/stanfit4anon_model_e7aeb8b836685a923269e6171e7377cd_1721201981547910013.pyx tree = Parsing.p_module(s, pxd, full_module_name)From pyroimport torch import pyro import pyro.distributions as dist import pyro.poutine as poutine from pyro.infer.mcmc import MCMC, NUTS pyro.enable_validation(True) pyro.set_rng_seed(0) draws = 1000 warmup_steps = 0 eight_school_data = {'J' : 8, 'y' : torch.tensor([28, 8, -3, 7, -1, 1, 18, 12]).type(torch.Tensor), 'sigma' : torch.tensor([15, 10, 16, 11, 9, 11, 10, 18]).type(torch.Tensor) } def model(sigma): eta = pyro.sample('eta', dist.Normal(torch.zeros(eight_school_data['J']), torch.ones(eight_school_data['J']))) mu = pyro.sample('mu', dist.Normal(torch.zeros(1), 10 * torch.ones(1))) tau = pyro.sample('tau', dist.HalfCauchy(scale=25 * torch.ones(1))) theta = mu + tau * eta return pyro.sample("obs", dist.Normal(theta, sigma)) def conditioned_model(model, sigma, y): return poutine.condition(model, data={"obs": y})(sigma) nuts_kernel = NUTS(conditioned_model, adapt_step_size=True) posterior = MCMC(nuts_kernel, num_samples=draws, warmup_steps=warmup_steps).run(model, eight_school_data['sigma'], eight_school_data['y']) pyro_data = az.from_pyro(posterior) pyro_dataINFO:pyro.infer.mcmc.mcmc:Starting MCMC using kernel - NUTS ... INFO:pyro.infer.mcmc.mcmc:Iteration: 50 [SAMPLE] INFO:pyro.infer.mcmc.mcmc:Step size: 0.125390 Acceptance rate: 0.860000 INFO:pyro.infer.mcmc.mcmc:Iteration: 100 [SAMPLE] INFO:pyro.infer.mcmc.mcmc:Step size: 0.789698 Acceptance rate: 0.910000 INFO:pyro.infer.mcmc.mcmc:Iteration: 150 [SAMPLE] INFO:pyro.infer.mcmc.mcmc:Step size: 0.282786 Acceptance rate: 0.920000 INFO:pyro.infer.mcmc.mcmc:Iteration: 200 [SAMPLE] INFO:pyro.infer.mcmc.mcmc:Step size: 0.105557 Acceptance rate: 0.930000 INFO:pyro.infer.mcmc.mcmc:Iteration: 250 [SAMPLE] INFO:pyro.infer.mcmc.mcmc:Step size: 0.527265 Acceptance rate: 0.944000 INFO:pyro.infer.mcmc.mcmc:Iteration: 300 [SAMPLE] INFO:pyro.infer.mcmc.mcmc:Step size: 0.200404 Acceptance rate: 0.940000 INFO:pyro.infer.mcmc.mcmc:Iteration: 350 [SAMPLE] INFO:pyro.infer.mcmc.mcmc:Step size: 0.286618 Acceptance rate: 0.945714 INFO:pyro.infer.mcmc.mcmc:Iteration: 400 [SAMPLE] INFO:pyro.infer.mcm[...]From emceeimport emcee eight_school_data = {'J': 8, 'y': np.array([28., 8., -3., 7., -1., 1., 18., 12.]), 'sigma': np.array([15., 10., 16., 11., 9., 11., 10., 18.]) } def log_prior_8school(theta,J): mu = theta[0] tau = theta[1] eta = theta[2:] # Half-cauchy prior if tau<0: return -np.inf hwhm = 25 prior_tau = -np.log(tau**2+hwhm**2) prior_mu = -(mu/10)**2 # normal prior, loc=0, scale=10 prior_eta = -np.sum(eta**2) # normal prior, loc=0, scale=1 return prior_mu + prior_tau + prior_eta def log_likelihood_8school(theta,y,sigma): mu = theta[0] tau = theta[1] eta = theta[2:] return -np.sum(((mu + tau * eta - y) / sigma)**2) def lnprob_8school(theta,J,y,sigma): prior = log_prior_8school(theta,J) if prior <= -np.inf: return -np.inf like = log_likelihood_8school(theta,y,sigma) return like+prior nwalkers = 40 ndim = eight_school_data['J']+2 draws = 1500 pos = np.random.normal(size=(nwalkers,ndim)) pos[:,1] = np.absolute(pos[:,1]) sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob_8school, args=(eight_school_data['J'], eight_school_data['y'], eight_school_data['sigma'] ) ) sampler.run_mcmc(pos, draws) # define variable names, it cannot be inferred from emcee var_names = ['mu','tau']+['eta{}'.format(i) for i in range(eight_school_data['J'])] emcee_data = az.from_emcee(sampler, var_names = var_names) emcee_dataAdditional data cleaning for ml# import libraries import pandas as pd import numpy as np from path import Path import warnings warnings.filterwarnings('ignore') #load data data = Path('./Resources/cleaned_stroke_dataset.csv') stroke_df = pd.read_csv(data) stroke_df.head() # drop index stroke_df = stroke_df.drop('work_type', axis=1) stroke_df = stroke_df.drop('bmi', axis=1) stroke_df = stroke_df.drop('avg_glucose_level', axis=1) stroke_df = stroke_df.drop('Residence_type', axis=1) stroke_df.head() #reorder dataframe stroke_df = stroke_df.reindex(['age', 'gender', 'ever_married', 'smoking_status', 'hypertension', 'heart_disease', 'stroke'], axis=1) #rename stroke_df.rename(columns={'age': 'Age', 'gender': 'Gender', 'ever_married': 'Ever Married', 'smoking_status': 'Smoking Status', 'hypertension': 'Hypertension', 'heart_disease': 'Heart Disease', 'stroke': 'Stroke'}, inplace=True) stroke_df.head() # export clean dataset stroke_df.to_csv('ml_clean_stroke_dataset.csv', index=False)Linear Regression with closed form solution Import necessary librariesimport numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.io as sio %matplotlib inlineDefine the function to compute cost functiondef computeTheta(X, Y): """ Calculates theta using the closed form solution. This method uses the following formula to calculate the value of regression parameters: theta = ((X.T*X)^(-1))*(X.T*Y) where: X is a matrix storing the input data points, Y is the observed outputs, X.T represents the transpose of a matrix X and theta is a matrix storing the regression params Parameters ---------- X : Nxd matrix N is the number of input samples and d is the number of features Y : Nx1 matrix The matrix storing the actual outputs Returns ------- dx1 matrix The calculated regression parameters """ inversePart = np.power(X.T*X, -1) # rest = X.T*Y return inversePart*restImport datadata = sio.loadmat("dataset1.mat") data = pd.DataFrame(np.hstack((data['X_trn'], data['Y_trn']))) data.columns = ['X_trn', 'Y_trn'] data.describe() data.plot(kind="scatter", x="X_trn", y="Y_trn", figsize=(12, 8))Add a column of 1s to Xdata.insert(0, "ones", 1) X = data.iloc[:, 0:2] X.head() X = np.matrix(X.values) Y = data.iloc[:, 2:] Y.head() Y = np.matrix(Y.values) theta = computeTheta(X, Y) print(theta)[[ 196.5580325 ] [ 117.95493298]]bicycles- [Tags] 525- [PostLinks] 6,140- [Badges] 80,935- [Users] 40,571- [Votes] 283,664- [Comments] 131,281- [Posts] 56,860- [PostHistory] 146,878 coffee- [Tags] 115- [PostLinks] 602- [Comments] 4,365- [Badges] 10,852- [Votes] 20,663- [Posts] 3,936- [Users] 8,256- [PostHistory] 10,178 ukrainian- [Tags] 120- [PostLinks] 399- [Badges] 6,248- [Users] 3,080- [Comments] 6,954- [Votes] 28,867- [Posts] 5,069- [PostHistory] 16,102import numpy as np import pandas as pd import matplotlib.pyplot as plt from IPython.display import display %run common.ipynbZaładowanie danychbicycles_posts_df = read_stackexchange(ModelType.POSTS, ForumType.BICYCLES) coffee_posts_df = read_stackexchange(ModelType.POSTS, ForumType.COFFEE) ukrainian_posts_df = read_stackexchange(ModelType.POSTS, ForumType.UKRAINIAN) def analyze_v1(posts): # PostTypeId: 1 -> Question, 2 -> Answer questions = posts.loc[posts.PostTypeId == 1, ['Id', 'CreationDate', 'Title', 'OwnerUserId']] answers = posts.loc[posts.PostTypeId == 2, ['Id', 'ParentId', 'CreationDate', 'Body', 'OwnerUserId']] questions['CreationDate_datetime'] = pd.to_datetime(questions['CreationDate']) answers['CreationDate_datetime'] = pd.to_datetime(answers['CreationDate']) df = questions.join(answers.set_index('ParentId'), on = 'Id', lsuffix='_question', rsuffix='_answer', how = 'inner') df['diff_time'] = df['CreationDate_datetime_answer'] - df['CreationDate_datetime_question'] #df['diff_hours']=df['diff_hours']/np.timedelta64(1,'h') # Aggregate diff_time by Id_question df = df.groupby('Id_question').agg({'diff_time': ['min', 'max']}) df.columns = ['_'.join(col) for col in df.columns.values] # Get (min, max) from min and max return df.agg({'diff_time_min': ['min', 'max'], 'diff_time_max': ['min', 'max']}) analyze_v1(bicycles_posts_df) analyze_v1(coffee_posts_df) analyze_v1(ukrainian_posts_df)Otrzymujemy ciekawe wyniki - zerowe i ujemne. Zacznijmy od ujemnych.Po krótkim dochodzeniu, znajdujemy winowajcę:def nostradamus(): questions = bicycles_posts_df.loc[bicycles_posts_df.PostTypeId == 1, ['Id', 'CreationDate', 'Title']] answers = bicycles_posts_df.loc[bicycles_posts_df.PostTypeId == 2, ['Id', 'ParentId', 'CreationDate']] questions['CreationDate_datetime'] = pd.to_datetime(questions['CreationDate']) answers['CreationDate_datetime'] = pd.to_datetime(answers['CreationDate']) df = questions.join(answers.set_index('ParentId'), on = 'Id', lsuffix='_question', rsuffix='_answer', how = 'inner') df['diff_time'] = df['CreationDate_datetime_answer'] - df['CreationDate_datetime_question'] #df['diff_hours']=df['diff_hours']/np.timedelta64(1,'h') return df.loc[df['Id_question'] == 10069, ['Title', 'CreationDate_question', 'CreationDate_answer', 'diff_time']] nostradamus()Rzeczywiście użytkownik [coco](https://bicycles.stackexchange.com/users/4394/coco), znany dalej jako Nostradamus zadał (jedyne w swojej karierze na *bicycles.stackexchange.com*) pytanie w 2012 roku, a następnie (chociaż chciałoby się powiedzieć *poprzednio*) użytownicy [](https://bicycles.stackexchange.com/users/1584/daniel-r-hicks) oraz [Angelo](https://bicycles.stackexchange.com/users/1998/angelo) odpowiedzieli w 2011 roku.> asked Jun 26 '12 at 15:56>> coco> answered Sep 9 '11 at 12:13>> > answered Sep 9 '11 at 13:09>> AngeloDowód podróży w czasie można znaleźć dalej wiszący na forum: https://bicycles.stackexchange.com/questions/10069/does-drafting-cause-resistance-to-the-lead-rider Teraz przejdźmy do wyników zerowych.Poniższa analiza pokazuje, że wszystkie odpowiedzi, które zostały najwidoczniej napisane poniżej 1 tysięcznej sekundy po zamieszczeniu pytania należą do tego samego autora, który zadał pytanie. Znalezienie powodu dla tego fenomenu jest trywialne i pozostawiamy je czytelnikom jako ćwiczenie.def zeroday(posts): # PostTypeId: 1 -> Question, 2 -> Answer questions = posts.loc[posts.PostTypeId == 1, ['Id', 'CreationDate', 'Title', 'OwnerUserId']] answers = posts.loc[posts.PostTypeId == 2, ['Id', 'ParentId', 'CreationDate', 'Body', 'OwnerUserId']] questions['CreationDate_datetime'] = pd.to_datetime(questions['CreationDate']) answers['CreationDate_datetime'] = pd.to_datetime(answers['CreationDate']) df = questions.join(answers.set_index('ParentId'), on = 'Id', lsuffix='_question', rsuffix='_answer', how = 'inner') df['diff_time'] = df['CreationDate_datetime_answer'] - df['CreationDate_datetime_question'] df['IsSame'] = df['OwnerUserId_question'] == df['OwnerUserId_answer'] df = df.loc[ df['CreationDate_datetime_question'] == df['CreationDate_datetime_answer'], ['Id_question', 'Title', 'IsSame'] ] return df zeroday(bicycles_posts_df) zeroday(coffee_posts_df) zeroday(ukrainian_posts_df)Wersja 2W tej wersji pomijamy wyniki niedodatniedef analyze_v2(posts): # PostTypeId: 1 -> Question, 2 -> Answer questions = posts.loc[posts.PostTypeId == 1, ['Id', 'CreationDate', 'Title', 'OwnerUserId']] answers = posts.loc[posts.PostTypeId == 2, ['Id', 'ParentId', 'CreationDate', 'Body', 'OwnerUserId']] questions['CreationDate_datetime'] = pd.to_datetime(questions['CreationDate']) answers['CreationDate_datetime'] = pd.to_datetime(answers['CreationDate']) df = questions.join(answers.set_index('ParentId'), on = 'Id', lsuffix='_question', rsuffix='_answer', how = 'inner') df['diff_time'] = df['CreationDate_datetime_answer'] - df['CreationDate_datetime_question'] #df['diff_hours']=df['diff_hours']/np.timedelta64(1,'h') df = df.loc[df['CreationDate_datetime_answer'] > df['CreationDate_datetime_question']] # Aggregate diff_time by Id_question df = df.groupby('Id_question').agg({'diff_time': ['min', 'max']}) df.columns = ['_'.join(col) for col in df.columns.values] # Get (min, max) from min and max return df.agg({'diff_time_min': ['min', 'max'], 'diff_time_max': ['min', 'max']}) for forum_type in ForumType: posts_df = read_stackexchange(ModelType.POSTS, forum_type) print(forum_type.value) display(analyze_v2(posts_df))bicyclesCar racinghttps://gym.openai.com/envs/CarRacing-v0/ Install dependanciesDacă ceva nu merge, renunțați la `> /dev/null` pentru a vă asigura că instalarea a mers blană.!pip install gym pyvirtualdisplay > /dev/null 2>&1 !apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1 !apt-get update > /dev/null 2>&1 !apt-get install cmake > /dev/null 2>&1 !pip install --upgrade setuptools 2>&1 !pip install ez_setup > /dev/null 2>&1 !pip install gym[atari] > /dev/null 2>&1 !pip install box2d-py > /dev/null 2>&1Requirement already up-to-date: setuptools in /usr/local/lib/python3.6/dist-packages (41.0.1)Imports and Helper functionsimport gym from gym import logger as gymlogger from gym.wrappers import Monitor gymlogger.set_level(40) #error only import tensorflow as tf import numpy as np import random import matplotlib import matplotlib.pyplot as plt %matplotlib inline import math import glob import io import base64 from IPython.display import HTML from IPython import display as ipythondisplay import gym from gym import logger as gymlogger from gym.wrappers import Monitor gymlogger.set_level(40) #error only import tensorflow as tf import numpy as np from collections import namedtuple from itertools import count from PIL import Image import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torchvision.transforms as T import random import matplotlib import matplotlib.pyplot as plt %matplotlib inline import math import glob import io import base64 from IPython.display import HTML from IPython import display as ipythondisplay from pyvirtualdisplay import Display from pyvirtualdisplay import Display display = Display(visible=0, size=(1400, 900)) display.start() # : https://star-ai.github.io/Rendering-OpenAi-Gym-in-Colaboratory/ def show_video(): mp4list = glob.glob('video/*.mp4') if len(mp4list) > 0: mp4 = mp4list[0] video = io.open(mp4, 'r+b').read() encoded = base64.b64encode(video) ipythondisplay.display(HTML(data=''''''.format(encoded.decode('ascii')))) else: print("Could not find video") def wrap_env(env): env = Monitor(env, './video', force=True) return envCarRacing - vizualizareenv = wrap_env(gym.make("CarRacing-v0")) observation = env.reset() while True: env.render() action = env.action_space.sample() observation, reward, done, info = env.step(action) if done: break; env.close() show_video()Track generation: 1143..1433 -> 290-tiles trackCarRacing - fără vizualizareenv = wrap_env(gym.make("CarRacing-v0")) env.action_space = [[1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, 0, 0.8]] NR_ACTIONS = len(env.action_space) env.reset() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward')) class ReplayMemory(object): def __init__(self, capacity): self.capacity = capacity self.memory = [] self.position = 0 def push(self, *args): """Saves a transition.""" if len(self.memory) < self.capacity: self.memory.append(None) self.memory[self.position] = Transition(*args) self.position = (self.position + 1) % self.capacity def sample(self, batch_size): return random.sample(self.memory, batch_size) def __len__(self): return len(self.memory) class DQN(nn.Module): def __init__(self, h, w, outputs): super(DQN, self).__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2) self.bn1 = nn.BatchNorm2d(16) self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2) self.bn2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2) self.bn3 = nn.BatchNorm2d(32) # Number of Linear input connections depends on output of conv2d layers # and therefore the input image size, so compute it. def conv2d_size_out(size, kernel_size = 5, stride = 2): return (size - (kernel_size - 1) - 1) // stride + 1 convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w))) convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h))) linear_input_size = convw * convh * 32 self.head = nn.Linear(linear_input_size, outputs) # Called with either one element to determine next action, or a batch # during optimization. Returns tensor([[left0exp,right0exp]...]). def forward(self, x): x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) x = F.relu(self.bn3(self.conv3(x))) return self.head(x.view(x.size(0), -1)) resize = T.Compose([T.ToPILImage(), T.Resize((96, 96), interpolation=Image.CUBIC), # TODO adjust sizes T.ToTensor()]) def get_screen(): screen = env.render(mode='rgb_array').transpose((2, 0, 1)) # Resize, and add a batch dimension (BCHW) return resize(screen).unsqueeze(0).to(device) # Set up parameters, select_action and optimize_model functions BATCH_SIZE = 128 GAMMA = 0.999 EPS_START = 0.9 EPS_END = 0.05 EPS_DECAY = 200 TARGET_UPDATE = 10 # Get screen size so that we can initialize layers correctly based on shape # returned from AI gym. Typical dimensions at this point are close to 3x40x90 # which is the result of a clamped and down-scaled render buffer in get_screen() env.reset() init_screen = get_screen() _, _, screen_height, screen_width = init_screen.shape print(init_screen.shape) # Get number of actions from gym action space #n_actions = env.action_space.n # old n_actions = NR_ACTIONS # new policy_net = DQN(screen_height, screen_width, n_actions).to(device) target_net = DQN(screen_height, screen_width, n_actions).to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.RMSprop(policy_net.parameters()) memory = ReplayMemory(10000) steps_done = 0 def select_action(state): global steps_done sample = random.random() eps_threshold = EPS_END + (EPS_START - EPS_END) * \ math.exp(-1. * steps_done / EPS_DECAY) steps_done += 1 if sample > eps_threshold: with torch.no_grad(): # t.max(1) will return largest column value of each row. # second column on max result is index of where max element was # found, so we pick action with the larger expected reward. return policy_net(state).max(1)[1].view(1, 1) else: return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long) episode_durations = [] def optimize_model(): if len(memory) < BATCH_SIZE: return transitions = memory.sample(BATCH_SIZE) # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for # detailed explanation). This converts batch-array of Transitions # to Transition of batch-arrays. batch = Transition(*zip(*transitions)) # Compute a mask of non-final states and concatenate the batch elements # (a final state would've been the one after which simulation ended) non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.uint8) non_final_next_states = torch.cat([s for s in batch.next_state if s is not None]) state_batch = torch.cat(batch.state) action_batch = torch.cat(batch.action) reward_batch = torch.cat(batch.reward) # Compute Q(s_t, a) - the model computes Q(s_t), then we select the # columns of actions taken. These are the actions which would've been taken # for each batch state according to policy_net state_action_values = policy_net(state_batch).gather(1, action_batch) # Compute V(s_{t+1}) for all next states. # Expected values of actions for non_final_next_states are computed based # on the "older" target_net; selecting their best reward with max(1)[0]. # This is merged based on the mask, such that we'll have either the expected # state value or 0 in case the state was final. next_state_values = torch.zeros(BATCH_SIZE, device=device) next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach() # Compute the expected Q values expected_state_action_values = (next_state_values * GAMMA) + reward_batch # Compute Huber loss loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1)) # Optimize the model optimizer.zero_grad() loss.backward() for param in policy_net.parameters(): param.grad.data.clamp_(-1, 1) optimizer.step() # NEW TESTING VERSION import time num_episodes = 15 for i_episode in range(num_episodes): # Initialize the environment and state env.reset() last_screen = get_screen() current_screen = get_screen() state = current_screen - last_screen start = time.time() for t in count(): action = select_action(state) #observation, reward, done, info = env.step(action.item()) # old - from tutorial observation, reward, done, info = env.step(env.action_space[action.item()]) # new reward = torch.tensor([reward], device=device).float() # Observe new state last_screen = current_screen current_screen = get_screen() if not done: next_state = current_screen - last_screen else: next_state = None # Store the transition in memory memory.push(state, action, next_state, reward) # Move to the next state state = next_state # Perform one step of the optimization (on the target network) optimize_model() if done: episode_durations.append(t + 1) if len(episode_durations) == 20: print("Average ep. length (last 20 eps.): ", np.mean(episode_durations)) episode_durations.clear() print('Took', time.time() - start, 'seconds for 1 episode') start = time.time() break # Update the target network, copying all weights and biases in DQN if i_episode % TARGET_UPDATE == 0: target_net.load_state_dict(policy_net.state_dict()) print('Complete') env.close() # show_video() class DDQN(nn.Module): def __init__(self, h, w, outputs): super(DDQN, self).__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2) self.bn1 = nn.BatchNorm2d(16) self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2) self.bn2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2) self.bn3 = nn.BatchNorm2d(32) # Number of Linear input connections depends on output of conv2d layers # and therefore the input image size, so compute it. def conv2d_size_out(size, kernel_size = 5, stride = 2): return (size - (kernel_size - 1) - 1) // stride + 1 convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w))) convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h))) linear_input_size = convw * convh * 32 self.head1 = nn.Linear(linear_input_size, outputs) linear_input_size_inter = convw * convh self.head21 = nn.Linear(linear_input_size, linear_input_size_inter) self.head22 = nn.Linear(linear_input_size_inter, outputs) # Called with either one element to determine next action, or a batch # during optimization. Returns tensor([[left0exp,right0exp]...]). def forward(self, x): x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) x = F.relu(self.bn3(self.conv3(x))) a = self.head1(x.view(x.size(0), -1)) b = self.head21(x.view(x.size(0), -1)) c = self.head22(b) return a + c resize = T.Compose([T.ToPILImage(), T.Resize((96, 96), interpolation=Image.CUBIC), # TODO adjust sizes T.ToTensor()]) def get_screen(): screen = env.render(mode='rgb_array').transpose((2, 0, 1)) # Resize, and add a batch dimension (BCHW) return resize(screen).unsqueeze(0).to(device) # Set up parameters, select_action and optimize_model functions BATCH_SIZE = 128 GAMMA = 0.999 EPS_START = 0.9 EPS_END = 0.05 EPS_DECAY = 200 TARGET_UPDATE = 10 # Get screen size so that we can initialize layers correctly based on shape # returned from AI gym. Typical dimensions at this point are close to 3x40x90 # which is the result of a clamped and down-scaled render buffer in get_screen() env.reset() init_screen = get_screen() _, _, screen_height, screen_width = init_screen.shape print(init_screen.shape) # Get number of actions from gym action space #n_actions = env.action_space.n # old n_actions = NR_ACTIONS # new policy_net = DDQN(screen_height, screen_width, n_actions).to(device) target_net = DDQN(screen_height, screen_width, n_actions).to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.RMSprop(policy_net.parameters()) memory = ReplayMemory(10000) steps_done = 0 def select_action(state): global steps_done sample = random.random() eps_threshold = EPS_END + (EPS_START - EPS_END) * \ math.exp(-1. * steps_done / EPS_DECAY) steps_done += 1 if sample > eps_threshold: with torch.no_grad(): # t.max(1) will return largest column value of each row. # second column on max result is index of where max element was # found, so we pick action with the larger expected reward. return policy_net(state).max(1)[1].view(1, 1) else: return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long) episode_durations = [] def optimize_model(): if len(memory) < BATCH_SIZE: return transitions = memory.sample(BATCH_SIZE) # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for # detailed explanation). This converts batch-array of Transitions # to Transition of batch-arrays. batch = Transition(*zip(*transitions)) # Compute a mask of non-final states and concatenate the batch elements # (a final state would've been the one after which simulation ended) non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.uint8) non_final_next_states = torch.cat([s for s in batch.next_state if s is not None]) state_batch = torch.cat(batch.state) action_batch = torch.cat(batch.action) reward_batch = torch.cat(batch.reward) # Compute Q(s_t, a) - the model computes Q(s_t), then we select the # columns of actions taken. These are the actions which would've been taken # for each batch state according to policy_net state_action_values = policy_net(state_batch).gather(1, action_batch) # Compute V(s_{t+1}) for all next states. # Expected values of actions for non_final_next_states are computed based # on the "older" target_net; selecting their best reward with max(1)[0]. # This is merged based on the mask, such that we'll have either the expected # state value or 0 in case the state was final. next_state_values = torch.zeros(BATCH_SIZE, device=device) next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach() # Compute the expected Q values expected_state_action_values = (next_state_values * GAMMA) + reward_batch # Compute Huber loss loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1)) # Optimize the model optimizer.zero_grad() loss.backward() for param in policy_net.parameters(): param.grad.data.clamp_(-1, 1) optimizer.step() # NEW TESTING VERSION import time num_episodes = 15 for i_episode in range(num_episodes): # Initialize the environment and state env.reset() last_screen = get_screen() current_screen = get_screen() state = current_screen - last_screen start = time.time() for t in count(): action = select_action(state) #observation, reward, done, info = env.step(action.item()) # old - from tutorial observation, reward, done, info = env.step(env.action_space[action.item()]) # new reward = torch.tensor([reward], device=device).float() # Observe new state last_screen = current_screen current_screen = get_screen() if not done: next_state = current_screen - last_screen else: next_state = None # Store the transition in memory memory.push(state, action, next_state, reward) # Move to the next state state = next_state # Perform one step of the optimization (on the target network) optimize_model() if done: episode_durations.append(t + 1) if len(episode_durations) == 20: print("Average ep. length (last 20 eps.): ", np.mean(episode_durations)) episode_durations.clear() print('Took', time.time() - start, 'seconds for 1 episode') start = time.time() break # Update the target network, copying all weights and biases in DDQN if i_episode % TARGET_UPDATE == 0: target_net.load_state_dict(policy_net.state_dict()) print('Complete') env.close() # show_video()Track generation: 1099..1378 -> 279-tiles track Took 49.952375173568726 seconds for 1 episode Track generation: 1100..1379 -> 279-tiles track Took 51.98528051376343 seconds for 1 episode Track generation: 1275..1598 -> 323-tiles track Took 54.00662970542908 seconds for 1 episode Track generation: 1087..1370 -> 283-tiles track Took 52.510228395462036 seconds for 1 episode Track generation: 1211..1518 -> 307-tiles track Took 53.164055824279785 seconds for 1 episode Track generation: 1160..1454 -> 294-tiles track Took 52.34375 seconds for 1 episode Track generation: 1108..1394 -> 286-tiles track Took 53.41212344169617 seconds for 1 episode Track generation: 1241..1564 -> 323-tiles track Took 55.53093338012695 seconds for 1 episode Track generation: 1252..1569 -> 317-tiles track Took 53.23687934875488 seconds for 1 episode Track generation: 1131..1418 -> 287-tiles track Took 52.6376428604126 seconds for 1 episode Track generation: 1353..1695 -> 342-tiles track Took 54.39814209938049 second[...]Pre-processingdef preprocess(text): stop_words = stopwords.words('english') lemmatizer=nltk.stem.WordNetLemmatizer() # tokenazation tokens_list = [] for sent in nltk.sent_tokenize(text, language='english'): for word in nltk.word_tokenize(sent, language='english'): tokens_list.append(word) output = [] for Token in tokens_list: # to lowercase token=Token.lower() # punctuation removal for punc in string.punctuation: token=token.replace(punc,'') # number digits removal for digit in string.digits: token=token.replace(digit,'') # lemmatzation token = lemmatizer.lemmatize(token) # stop words removal if (token != "") and (token not in stop_words): output.append(token) return output %%time test_x=test_x.apply(preprocess) %%time train_x=train_x.apply(preprocess) # list to str train_x=train_x.apply(lambda x: ''.join(i+' ' for i in x)) test_x=test_x.apply(lambda x: ''.join(i+' ' for i in x))Save pre-processed datatrain_set_pp = pd.DataFrame(columns=["Class Index", "Description"]) test_set_pp = pd.DataFrame(columns=["Class Index", "Description"]) train_set_pp["Class Index"] = train_y train_set_pp["Description"] = train_x test_set_pp["Class Index"] = test_y test_set_pp["Description"] = test_x train_set_pp.to_csv("dataset_train_pp.csv") test_set_pp.to_csv("dataset_test_pp.csv") train_set_pp.head()DistArray Julia Set===================The Julia set, for a given complex number $c$, is the set of points $z$such that $|z_{i}|$ remains bounded where $z_{i+1} = z_{i}^2 + c$.This can be plotted by counting how many iterations are required for $|z_{i}|$ to exceed a cutoff.Depending on the value of $c$, the Julia set may be connected and containa lot of points, or it could be disconnected and contain fewer points.The points in the set will require the maximum iteration count, sothe connected sets will usually take longer to compute. First, some imports.from __future__ import print_function import numpy import matplotlib.pyplot as plt # DistArray imports from distarray.globalapi import Context, Distribution from distarray.globalapi.distarray import DistArray # inline plots %matplotlib inline # bigger figures plt.rcParams.update({'figure.figsize': (10, 10)})Julia set kernels-----------------To avoid round-trips between the client and engines, our strategy will be to push a function to each engine to compute that engine's local section of the Julia set. Here we have two options for this "kernel": a version that avoids NumPy fancy indexing, and one that uses it.def numpy_julia_calc(z, c, z_max, n_max): """Calculate the Julia set using NumPy. Parameters ---------- z : NumPy array array of complex values whose iterations we will count. c : complex Complex number to add at each iteration. z_max : float Magnitude of complex value that we assume goes to infinity. n_max : int Maximum number of iterations. """ z = numpy.asarray(z) counts = numpy.zeros_like(z, dtype=numpy.int32) hits = numpy.zeros_like(z, dtype=numpy.bool) mask = numpy.zeros_like(z, dtype=numpy.bool) n = 0 while not numpy.all(hits) and n < n_max: z = z * z + c mask = (abs(z) > z_max) & (~hits) counts[mask] = n hits |= mask z[hits] = 0 n += 1 counts[~hits] = n_max return counts def fancy_numpy_julia_calc(z, c, z_max, n_max): """Calculate the Julia set using NumPy fancy indexing. Parameters ---------- z : NumPy array array of complex values whose iterations we will count. c : complex Complex number to add at each iteration. z_max : float Magnitude of complex value that we assume goes to infinity. n_max : int Maximum number of iterations. """ z = numpy.asarray(z) counts = numpy.zeros_like(z, dtype=numpy.int32) hits = numpy.zeros_like(z, dtype=numpy.bool) mask = numpy.zeros_like(z, dtype=numpy.bool) n = 0 while not numpy.all(hits) and n < n_max: z[~hits] = z[~hits] * z[~hits] + c mask = (abs(z) > z_max) & (~hits) counts[mask] = n hits |= mask n += 1 counts[~hits] = n_max return countsCoordinating functions----------------------Here we have functions that create a DistArray representing the complex plane and functions that coordinate applying the kernel on each distributed section of the DistArray.def create_complex_plane(context, resolution, dist, re_ax, im_ax): """Create a DistArray containing points on the complex plane. Parameters ---------- context : DistArray Context resolution : 2-tuple The number of points along Re and Im axes. dist : 2-element sequence or dict dist_type for of the DistArray Distribution. re_ax : 2-tuple The (lower, upper) range of the Real axis. im_ax : 2-tuple The (lower, upper) range of the Imaginary axis. """ import numpy as np def fill_complex_plane(arr, re_ax, im_ax, resolution): """Fill in points on the complex coordinate plane.""" re_step = float(re_ax[1] - re_ax[0]) / resolution[0] im_step = float(im_ax[1] - im_ax[0]) / resolution[1] for i in arr.distribution[0].global_iter: for j in arr.distribution[1].global_iter: arr.global_index[i, j] = complex(re_ax[0] + re_step * i, im_ax[0] + im_step * j) # Create an empty distributed array. distribution = Distribution(context, (resolution[0], resolution[1]), dist=dist) complex_plane = context.empty(distribution, dtype=np.complex64) context.apply(fill_complex_plane, (complex_plane.key, re_ax, im_ax, resolution)) return complex_plane def local_julia_calc(la, c, z_max, n_max, kernel): """Calculate the number of iterations for the point to escape. Parameters ---------- la : LocalArray LocalArray of complex values whose iterations we will count. c : complex Complex number to add at each iteration. z_max : float Magnitude of complex value that we assume goes to infinity. n_max : int Maximum number of iterations. kernel : function Kernel to use for computation of the Julia set. Options are 'fancy', 'numpy', or 'cython'. """ from distarray.localapi import LocalArray counts = kernel(la, c, z_max, n_max) res = LocalArray(la.distribution, buf=counts) return proxyize(res) # noqa def distributed_julia_calc(distarray, c, z_max, n_max, kernel=fancy_numpy_julia_calc): """Calculate the Julia set for an array of points in the complex plane. Parameters ---------- distarray : DistArray DistArray of complex values whose iterations we will count. c : complex Complex number to add at each iteration. z_max : float Magnitude of complex value that we assume goes to infinity. n_max : int Maximum number of iterations. kernel: function Kernel to use for computation of the Julia set. Options are 'fancy', 'numpy', or 'cython'. """ context = distarray.context iters_key = context.apply(local_julia_calc, (distarray.key, c, z_max, n_max), {'kernel': kernel}) iters_da = DistArray.from_localarrays(iters_key[0], context=context, dtype=numpy.int32) return iters_daUsing DistArray to explore the Julia set----------------------------------------context = Context() # handles communicating with the engines # create the complex plane resolution = (512, 512) dist = 'cc' # different distributions of the data among the engines # one character per dimension # 'c' stands for 'cyclic', 'b' for block re_ax = (-1.5, 1.5) im_ax = (-1.5, 1.5) complex_plane = create_complex_plane(context, resolution, dist, re_ax, im_ax) # calculate the Julia set z_max = 2.0 n_max = 100 c = complex(0.285, 0.01) iters_da = distributed_julia_calc(complex_plane, c, z_max, n_max, kernel=numpy_julia_calc) # plot it! plt.matshow(iters_da.toarray(), cmap='hot') context.close()! pip install nltk ! python -m textblob.download_corpora ! pip install -U textblob pip install --upgrade gensim import csv import pandas as pd import spacy import nltk import gensim import gensim.downloader from textblob.classifiers import NaiveBayesClassifier from textblob import TextBlob from collections import Counter from nltk.corpus import stopwords from gensim.test.utils import common_texts from gensim.models import Word2Vec from gensim.models import KeyedVectors nltk.download('stopwords') glove_vector = gensim.downloader.load('glove-twitter-25') raw_reviews = [] reviews_filename = '/content/drive/MyDrive/reviews.csv.txt' with open(reviews_filename, 'r') as reviews_csvfile: csvreader = csv.reader(reviews_csvfile) next(csvreader) next(csvreader) for i in range(10397): row = next(csvreader) if (len(row) >= 5): row.pop(0) row.pop(-1) row[2] = ''.join(row[2].split()) if row[2].replace('.', '', 1).isdigit(): row[2] = float(row[2]) row[1] = row[1].rstrip() if not row[1] == "" and isinstance(row[2], float): raw_reviews.append(row) table = pd.DataFrame(data = raw_reviews, columns = ['ID', 'Comments', 'Recommend']) reviews = [] for col, row in table.iterrows(): reviews.append(row['Comments']) table review_pos_neg = [] # stores overall positivity or negativity of reviews nlp = spacy.load("en_core_web_sm") # 80/20 split: 80% of data allocated for training, # 20% for testing the model. train = [] for col, row in table.iloc[0:2882].iterrows(): if row['Recommend'] < 3: train.append([row['Comments'], "negative"]) else: train.append([row['Comments'], "positive"]) for col, row in table.iloc[1802:3602].iterrows(): if row['Recommend'] < 3: print(row['Comments']) model = NaiveBayesClassifier(train) test = reviews[1802:3602] negative_reviews = 0 for col, row in table.iloc[1802:3602].iterrows(): if row['Recommend'] < 3: print(row['Comments']) negative_reviews += 1 for review in test: review_aspects = [] sentences = review.split(". ") index = 0 for sentence in sentences: chopped_sentence = nlp(sentence) # format for tokens (linking verb, ..., ADV, ADV, ADJ) if "was" in sentence or "is" in sentence or "were" in sentence or "are" in sentence: for token in chopped_sentence: token_describer = "" if token.pos_ == 'ADJ': adverbs = "" for adverb in token.children: if adverb.pos_ == 'ADV': adverbs += adverb.text + " " token_describer = adverbs + token.text review_aspects.append({'sentence number': index, 'description': token_describer}) if not token_describer: review_aspects.pop() index += 1 # analyze aspects to choose exceptionally polarizing ones polar_aspects = [] for aspect in review_aspects: if TextBlob(aspect['description']).sentiment.polarity >= 0.1: polar_aspects.append(aspect) # classify each sentence with flagged aspects analyzed_review = TextBlob(review, classifier=model) var = False analyzed_sentences = analyzed_review.sentences classifications = [] # stores whether each review is positive or negative for sentence in analyzed_sentences: for aspect in polar_aspects: if aspect['description'] in sentence: var = True if var == True: classifications.append(sentence.classify()) if len(classifications) != 0: # print(Counter(classifications).most_common(1)[0][0], review) review_pos_neg.append([review, Counter(classifications).most_common(1)[0][0]]) # collect dictionary (final_list) from solution 1 raw_reviews = [] reviews_filename = '/content/drive/MyDrive/labeled_data.csv' with open(reviews_filename, 'r') as reviews_csvfile: csvreader = csv.reader(reviews_csvfile) next(csvreader) for i in range(1000): # 10399 row = next(csvreader) if int(row[3]) != 0: review = row[-1] review_arr = review.split(":") raw_reviews.append(review_arr[-1]) review_words = [] for review in raw_reviews: review_arr = review.split(" ") for review_word in review_arr: if "\"" not in review_word and review_word != "" and not "&" in review_word and review_word != "-" and review_word != "love" and "I" not in review_word and "'" not in review_word and review_word != "got": review_words.append(review_word) stop_words = set(stopwords.words('english')) with open('/content/drive/MyDrive/common_words.txt','r') as file: common_words = file.read() words_list = [word for word in review_words if not word in stop_words and not word in common_words] final_list = [] for word in Counter(words_list).most_common(9): final_list.append(word[0]) # use word2vec to identify words to related to words in dictionary # (used pseudo-recursive loop) for word in final_list: related_words = glove_vector.most_similar(word) for entry in related_words: final_list.append(entry[0]) if len(final_list) == 100: break for review in review_pos_neg: for word in final_list: if word in review[0]: review[-1] = "negative" negative_reviews_pred = 0 for row in review_pos_neg: if row[-1] == "negative": negative_reviews_pred += 1 print("There were " + str(negative_reviews_pred) + " reviews to check.") print("There were " + str(negative_reviews) + " negative reviews in actuality.")There were 25 reviews to check. There were 102 negative reviews in actuality.Analyze proposals resultssess = KB.get_session() with sess.as_default(): proposals = tf.identity(rpn_proposal_rois) # <--- this uses the results from the model bx_area = (proposals[...,2]-proposals[...,0])*(proposals[...,3]-proposals[...,1]) print(' proposals :', tf.shape(proposals).eval()) print(' box area : ', tf.shape(bx_area).eval()) selected_idxs = tf.where(tf.less_equal(bx_area, (2/(128*128))) ) print('selected bx:', tf.shape(selected_idxs).eval()) print(selected_idxs.eval()) selected_area = tf.gather_nd(bx_area , selected_idxs) selected_proposals = tf.gather_nd(proposals, selected_idxs) print('selected proposals shape', tf.shape(selected_proposals).eval()) print(selected_proposals[0:30].eval()) print('selected area shape', tf.shape(selected_area).eval()) print(selected_area[0:30].eval())Analyze bounding box areaswith sess.as_default(): print(' boxes :', boxes.shape) for i in [0,10,17,25,26,34,39]: print(i, ' non-clipped ', boxes[0, i].eval()) bx_area = (boxes[...,2]-boxes[...,0])*(boxes[...,3]-boxes[...,1]) print(' box area : ', bx_area.shape) np.set_printoptions(linewidth=130,precision=4,threshold=4096) print(bx_area[:, :20].eval(session=sess)) small_idxs = tf.where(bx_area < 1) print('small bx:', tf.shape(small_idxs).eval()) print(small_idxs[0:10].eval()) small_area = tf.gather_nd(bx_area, small_idxs) small_boxes = tf.gather_nd(boxes, small_idxs) print('small boxes shape', tf.shape(small_boxes).eval()) print(small_boxes[0:30].eval()) print('small area shape', tf.shape(small_area).eval()) print(small_area[0:30].eval())Setup tensors to be passed to `detections_target_graph()` - Detection Target Layerimport mrcnn.utils as utils from mrcnn.detect_tgt_layer import overlaps_graph # sess = KB.get_session() # with sess.as_default(): try: sess.close() print('session was deleted ') except: print('Session was not defined ') pass sess = tf.InteractiveSession() image_id = 1 proposals = KB.identity(rpn_proposal_rois)[image_id] gt_class_ids = KB.identity(input_gt_class_ids)[image_id] gt_boxes = KB.cast(KB.identity(input_gt_bboxes_norm), dtype='float32')[image_id] # gt_masks = KB.identity(input_gt_masks) print('rpn_roi_proposals') print(proposals.dtype, gt_class_ids.dtype, gt_boxes.dtype) print(proposals.shape) print(proposals.eval()) print('gt_class_ids') print(gt_class_ids.shape) print(gt_class_ids.eval()) print('gt_boxes') print(gt_boxes.shape) print(gt_boxes.eval()) # proposals = rpn_proposal_rois[1] # gt_class_ids = input_gt_class_ids[1] # gt_boxes = input_normlzd_gt_boxes[1] # gt_masks = input_gt_masks[1] # config = model.configBonus Content - Classes and Objects in PythonThese notes are an adaptation of the notes found https://python.swaroopch.com/oop.htmlNow it's possible that throughout the notebooks you were confused about what I meant when I wrote `list` object, or `Series` object, or `ndarray` object. What is an object precisely in python?Objects are data types that can contain both data (called attributes) and functions (also called methods). Objects are instances of a predefined class. A class is essentially an object recipe. In writing up a class we provide the computer a set of instructions as to what the object should be, should have, should be able to do, and how to create one.If this is confusing don't worry, right now we are dealing with somewhat abstract concepts. Everything should become more clear once we work through some examples.## We'll start by making an empty class ## to define a class you write 'class' followed by the name of the ## class and finished with a colon class Empty: ## Normally classes are filled with attributes and methods ## however, this class is empty so we will just write pass pass ## Once we've defined an Empty class we can make an Empty object ## By calling Empty() e = Empty() print(e)In the above code chunk we defined `e` as an instance of the `Empty` class, thus `e` is an `Empty` object.However, empty objects are a little boring. Let's spice things up a bit. MethodsLet's make a class that can do some things. We'll define a class called `Dog`. This class will be just what it sounds like. All dogs have a name so when we make a `Dog` object let's make sure that we have to give it a name. `__init__`This is where the `__init__` method of a class comes into play. This method tells your computer how we need to initialize an instance of this class.## We're starting to make a Dog class class Dog: ## We can define an __init__ method that will run anytime we define a new ## instance of this class. Note that self has to be included in any object ## method def __init__(self, name): ## Here we are saying that when we create the Dog object, it will have ## a name attribute given by the input name self.name = str(name) ## Let's also define a method named woof. See that we still must put in self def woof(self): print("Woof.") ## Let's make a Dog object! ## Call Dog(name) Fido = Dog('Fido') ## What does Fido sound like? ## You can call the woof method with the ## Dog object's name .woof() Fido.woof() print() ## We can access the Dog object's name attribute ## with object.name print("Good boy " + Fido.name) ## You Code ## Define a Cat Class ## Make sure it has a name attribute ## and a meow method ## You Code ## Make a Cat object, print its name and make it meowClass Variables vs. Object VariablesAbove we saw that every `Dog` and `Cat` has a `name` variable. `name` is a variable that belongs to an instance of the `Dog` class. Anytime you make a `Dog` object it will have a `name` variable.`name` is an example of what is called an object variable. These variables are owned by each instance of the class.When we define a class we can also create what are known as class variables. These variables belong to the class itself. This may be confusing so let's return to our `Dog` class to see an example.Note that when we run the code below we will be overwriting the previous Dog class from above. So we will have to redefine Fido.## Redifining our Dog class to include a class variable class Dog: ## Here we initialize a class variable, NumDogs ## NumDogs will keep track of the number of Dog objects ## that have been created NumDogs = 0 ## We'll ammend our __init__ method to add 1 to NumDogs anytime we make a ## new Dog object def __init__(self,name): ## Here we are saying that when we create the Dog object, it will have ## a name attribute given by the input name self.name = name ## When we make a new Dog we'll increase NumDogs by 1 Dog.NumDogs = Dog.NumDogs + 1 ## A method named woof. See that we put in self def woof(self): print("Woof.") ## Let's Check to make sure it worked ## You can check a class variable by doing ## class_name.class_variable print("There are " + str(Dog.NumDogs) + " dogs.") ## Make Fido anew Fido = Dog('Fido') print("Now there is " + str(Dog.NumDogs) + " dog.") ## You Code ## Rewrite your Cat class to include a class variable ## that tracks the number of meows by all the cats. ## You Code ## Make two cats. Have the first one meow twice, ## how many total meows are there? ## Now have the second one meow three times, how many total meows are there?Class Methods vs. Object MethodsIn a similar vein there are object methods and class methods. `Fido.woof()` and the `cat_name.meow()` method are two examples of object methods. These are methods that belong to each instance of a class, like `Fido`.On the flip side we have methods that belong to the class itself. We don't currently have an example of a class method so let's write one for the `Dog` class.## Redifining our Dog class to include a class method class Dog: ## Here we initialize class variables NumDogs = 0 ## We added a new class variable NumWoofs NumWoofs = 0 ## We'll ammend our __init__ method to add 1 to NumDogs anytime we make a ## new Dog object def __init__(self,name): ## Here we are saying that when we create the Dog object, it will have ## a name attribute given by the input name self.name = name ## When we make a new Dog we'll increase NumDogs by 1 Dog.NumDogs = Dog.NumDogs + 1 ## woof is an object method, we can tell because the argument is self def woof(self): ## Each time a dog woofs we'll count it Dog.NumWoofs = Dog.NumWoofs + 1 print("Woof.") ## The following methods are class methods, class methods require @classmethod ## above them and cls as an arguement. Note that @classmethod is called a decorator @classmethod def HowMany(cls): ## HowMany tells us howmany Dog objects there are return Dog.NumDogs @classmethod def HowManyWoofs(cls): ## HowManyWoofs tells us how many woofs have happened return Dog.NumWoofs # We can check that this worked Fido = Dog('Fido') Spot = Dog('Spot') Millie = Dog('Millie') Fido.woof() Spot.woof() Millie.woof() Fido.woof() Fido.woof() print("There are " + str(Dog.HowMany()) + " dogs.") print("These dogs have woofed " + str(Dog.HowManyWoofs()) + " times.") print("Settle down a little bit dogs.") ## You Code ## Write a class method for Cat that returns the total number of meows.InheritanceThe last thing we'll mention in this notebook is the notion of inheritance. We've made a `Dog` class and a `Cat` class, however in the real world we know both of these animals are specific examples of pets.We can think of both of these classes as a subclass of a larger class, known as a base class or superclass, called `Pet`.We'll make this below.## Defining our base class Pet class Pet: ## define class variables NumberOf = 0 ## define how we intialize a Pet object def __init__(self,name,sex,age): self.name = str(name) self.sex = sex self.age = age Pet.NumberOf = Pet.NumberOf + 1 print("Just made pet, " + self.name) ## This object method will print the name and age of the pet def NameAndAge(self): print("This pet's name is " + self.name + ".") if self.sex == 'M': print("He is " + str(self.age) + " years old.") else: print("She is " + str(self.age) + " years old.") ## Now we'll make a subclass Dog ## by inputing the Pet class as an input ## this tells python that Dog should inherit ## all the attributes and functions of the Pet class class Dog(Pet): ## Define some class variables NumberOf = 0 NumberOfBorks = 0 ## Define __init__ for the Dog Class def __init__(self,name,sex,age,breed): ## Note we call the Pet __init__ method Pet.__init__(self,name,sex,age) Dog.NumberOf = Dog.NumberOf + 1 self.breed = breed print(self.name + " is a dog.") ## Define a bork method def bork(self): print("bork") Dog.NumberOfBorks = Dog.NumberOfBorks + 1 ## Now we'll make a subclass Cat class Cat(Pet): ## Define some class variables NumberOf = 0 NumberOfMews = 0 ## Define __init__ for the Cat Class def __init__(self,name,sex,age,breed): Pet.__init__(self,name,sex,age) Cat.NumberOf = Cat.NumberOf + 1 self.breed = breed print(self.name + " is a cat.") def mew(self): print("mew") Cat.NumberOfMews = Cat.NumberOfMews + 1 Fido = Dog("Fido","M",4,"Yorkshire Terrier") Frances = Dog("Frances","F",6,"Golden Retriever") MrMittens = Cat("Mr. Mittens","M",4,"Tabby Cat") MissButtons = Cat("Miss Buttons","F",7,"Siamese") print("There are " + str(Pet.NumberOf) + " pets.") print(str(Dog.NumberOf) + " are dogs.") print(str(Cat.NumberOf) + " ar cats.")Because `Cat` and `Dog` are subclasses of `Pet` every instance of `Cat` or `Dog` inherits the object variables and object methods of `Pet`. This is why the `__init__` methods of `Cat` and `Dog` include `Pet.__init__(self,name,sex,age)`. So we can access `name`, `sex`, and `age` variables even though they weren't explicitly assigned in the `Dog` and `Cat` classes. Think of it as `Cat` and `Dog` inheriting traits from their 'Parent' class `Pet`.print(Fido.name + " is a " + Fido.breed + ".\n\n") # We can also use a Pet object method MrMittens.NameAndAge() Frances.bork() Frances.bork() MrMittens.mew() MissButtons.mew() MissButtons.mew() print() print() print() Fido.bork() print() print("There have been " + str(Dog.NumberOfBorks) + " borks and " + str(Cat.NumberOfMews) + " mews.") if (Dog.NumberOfBorks + Cat.NumberOfMews) > 5: print("You guys sure are chatty today!") else: print("A normal ammount of borks and mews.") ## You code ## Make a fish subclass of Pets ## Give the fish a noise too!Providing an axisYou can also embed the interactive plot into an existing figurewith ioff: fig, (ax1, ax2) = plt.subplots(1,2,figsize=(10,5)) ax1.plot(np.sin(np.linspace(0,np.pi))) fig2, ax2, controls2 = interactive_imshow(f, param1=(-5,5), param2=(-3, 12), ax=ax2)Preventing colormap autoscalingThe if you do not specify vmin/vmax and your function does not return an RGB(A) image then the default behavior is to rescale the colormap for each parameter change. This can disabled using this `autoscale_cmap` argument.fig3, ax3, controls3 = interactive_imshow(f, param1 = (-5,5), param2 = (-3,12), autoscale_cmap=False)vmin and vmax: thresholding an imageYou can also pass `vmin` and `vmax` as functions. Additionally you do not need to use a function to provide the image, you can also provide an arrayimg = plt.imread("https://matplotlib.org/3.3.1/_images/stinkbug.png") def vmin(min_, max_): return min(min_, max_) def vmax(min_, max_): return max(min_, max_) fig4, ax4, controls4 = interactive_imshow(img, vmin=vmin, vmax = vmax, min_= (0,.7), max_= (.3,1))Extracting embedding features from face dataIn this notebook, we aim to extract embedding features from images using face recogntion extractors.As an example, we use MOBIO dataset, and extract Arcface features from the face images:##### CHANGE YOUR DATABASE HERE from bob.bio.face.config.database.mobio_male import database annotation_type = database.annotation_type fixed_positions = database.fixed_positions memory_demanding = True dask_client = None from bob.bio.face.embeddings.mxnet import arcface_insightFace_lresnet100 pipeline = arcface_insightFace_lresnet100(annotation_type=annotation_type, fixed_positions=fixed_positions, memory_demanding=memory_demanding) transformer = pipeline.transformer from bob.pipelines import wrap features_dir = "features" #Path to store extracted features # Wrapping with CHECKPOINT and DASK transformer = wrap(["checkpoint","dask"], transformer, features_dir=features_dir) # Printing the setup of the transformer print(transformer)Pipeline(steps=[('ToDaskBag', ToDaskBag()), ('samplewrapper-1', DaskWrapper(estimator=CheckpointWrapper(estimator=SampleWrapper(estimator=FaceCrop(annotator=BobIpMTCNN(), cropped_image_size=(112, 112), cropped_positions={'leye': (55, 81), 'reye': (55, 42)}), [...]As an example, we consider 10 samples from this database and extract features for these samples:# get 10 samples from database samples = database.all_samples()[:10] # Setting the DASK client # HERE MAKE ABSOLUTELLY SURE THAT YOU DO `SETSHELL grid` # BEFORE STARTING THE NOTEBOOK from dask.distributed import Client from bob.pipelines.distributed.sge import SGEMultipleQueuesCluster cluster = SGEMultipleQueuesCluster(min_jobs=1) dask_client = Client(cluster) features = transformer.transform(samples) if dask_client is not None: features = features.compute(scheduler=dask_client)In the following cells, we convert the extracted features to `numpy.array` and check the size of features.import numpy as np from bob.pipelines import SampleBatch np_features = np.array(SampleBatch(features)) np_features # KILL THE SGE WORKERS dask_client.shutdown()tornado.application - ERROR - Exception in callback functools.partial(. at 0x7f3470e7c8c0>, exception=ValueError('invalid operation on non-started TCPListener')>) Traceback (most recent call last): File "/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/tornado/ioloop.py", line 743, in _run_callback ret = callback() File "/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/tornado/tcpserver.py", line 327, in gen.convert_yielded(future), lambda f: f.result() File "/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py", line 451, in _handle_stream logger.debug("Incoming connection from %r to %r", address, self.contact_address) File "/idiap/user/t[...][Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)\appendixInstallation, Python, NumPy, and FilterPy#format the book %matplotlib inline from __future__ import division, print_function from book_format import load_style load_style()This book is written in Jupyter Notebook, a browser based interactive Python environment that mixes Python, text, and math. I choose it because of the interactive features - I found Kalman filtering nearly impossible to learn until I started working in an interactive environment. It is difficult to form an intuition about many of the parameters until you can change them and immediately see the output. An interactive environment also allows you to play 'what if' scenarios. "What if I set $\mathbf{Q}$ to zero?" It is trivial to find out with Jupyter Notebook.Another reason I choose it is because most textbooks leaves many things opaque. For example, there might be a beautiful plot next to some pseudocode. That plot was produced by software, but software that is not available to the reader. I want everything that went into producing this book to be available to you. How do you plot a covariance ellipse? You won't know if you read most books. With Jupyter Notebook all you have to do is look at the source code.Even if you choose to read the book online you will want Python and the SciPy stack installed so that you can write your own Kalman filters. There are many different ways to install these libraries, and I cannot cover them all, but I will cover a few typical scenarios. Installing the SciPy Stack This book requires IPython, Jupyter, NumPy, SciPy, SymPy, and Matplotlib. The SciPy stack of NumPy, SciPy, and Matplotlib depends on third party Fortran and C code, and is not trivial to install from source code. The SciPy website strongly urges using a pre-built installation, and I concur with this advice.I use the Anaconda distribution from Continuum Analytics. This is an excellent distribution that combines all of the packages listed above, plus many others. Installation is very straightforward, and it can be done alongside other Python installations you might already have on your machine. It is free to use. You may download it from here: http://continuum.io/downloads I strongly recommend using the latest Python 3 version that they provide.There are other choices for installing the SciPy stack. You can find instructions here: http://scipy.org/install.htmlMany Linux distributions come with these packages preinstalled. However, they are often somewhat dated and they will need to be updated as the book depends on recent versions of all. Updating a specific Linux installation is beyond the scope of this book. An advantage of the Anaconda distribution is that it does not modify your local Python installation, so you can install it and not break your linux distribution. Installing FilterPyFilterPy is a Python library that implements all of the filters used in this book, and quite a few others. Installation is easy using `pip`. Issue the following from the command prompt: pip install filterpy FilterPy is written by me, and the latest development version is always available at https://github.com/rlabbe/filterpy. Downloading and Running the Book The book is stored in a github repository. From the command line type the following: git clone https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python.git If you do not have git installed, browse to https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python where you can download the book via your browser.Now, from the command prompt change to the directory that was just created, and then run Jupyter notebook: cd Kalman-and-Bayesian-Filters-in-Python juptyer notebookA browser window should launch showing you all of the chapters in the book. Browse to the first chapter by clicking on it, then open the notebook in that subdirectory by clicking on the link.More information about running the notebook can be found here:http://jupyter-notebook-beginner-guide.readthedocs.org/en/latest/execute.html Using Juptyer Notebook A complete tutorial on Jupyter Notebook is beyond the scope of this book. Many are available online. In short, Python code is placed in cells. These are prefaced with text like `In [1]:`, and the code itself is in a boxed area. If you press CTRL-ENTER while focus is inside the box the code will run and the results will be displayed below the box. Like this:print(3+7.2)10.2If you have this open in Jupyter Notebook now, go ahead and modify that code by changing the expression inside the print statement and pressing CTRL+ENTER. The output should be changed to reflect what you typed in the code cell. SymPy SymPy is a Python package for performing symbolic mathematics. The full scope of its abilities are beyond this book, but it can perform algebra, integrate and differentiate equations, find solutions to differential equations, and much more. For example, we use use it to compute the Jacobian of matrices and expected value integral computations.First, a simple example. We will import SymPy, initialize its pretty print functionality (which will print equations using LaTeX). We will then declare a symbol for SymPy to use.import sympy sympy.init_printing(use_latex='mathjax') phi, x = sympy.symbols('\phi, x') phiNotice how it prints the symbol `phi` using LaTeX. Now let's do some math. What is the derivative of $\sqrt{\phi}$?sympy.diff('sqrt(phi)')We can factor equationssympy.factor(phi**3 -phi**2 + phi - 1)and we can expand them.((phi+1)*(phi-4)).expand()You can evauate an equation for specific values of its variables:w =x**2 -3*x +4 print(w.subs(x, 4)) print(w.subs(x, 12))8 112You can also use strings for equations that use symbols that you have not defined:x = sympy.expand('(t+1)*2') xNow let's use SymPy to compute the Jacobian of a matrix. Given the function$$h=\sqrt{(x^2 + z^2)}$$find the Jacobian with respect to x, y, and z.x, y, z = sympy.symbols('x y z') H = sympy.Matrix([sympy.sqrt(x**2 + z**2)]) state = sympy.Matrix([x, y, z]) H.jacobian(state)Now let's compute the discrete process noise matrix $\mathbf Q$ given the continuous process noise matrix $$\mathbf Q = \Phi_s \begin{bmatrix}0&0&0\\0&0&0\\0&0&1\end{bmatrix}$$The integral is $$\mathbf Q = \int_0^{\Delta t} \mathbf F(t)\mathbf Q\mathbf F^T(t)\, dt$$where $$\mathbf F(\Delta t) = \begin{bmatrix}1 & \Delta t & {\Delta t}^2/2 \\ 0 & 1 & \Delta t\\ 0& 0& 1\end{bmatrix}$$dt = sympy.symbols('\Delta{t}') F_k = sympy.Matrix([[1, dt, dt**2/2], [0, 1, dt], [0, 0, 1]]) Q = sympy.Matrix([[0,0,0], [0,0,0], [0,0,1]]) sympy.integrate(F_k*Q*F_k.T,(dt, 0, dt))https://www.youtube.com/watch?v=EYnC4ACIt2gpip install quandl import quandl import numpy as np from sklearn.linear_model import LinearRegression from sklearn.svm import SVR from sklearn.model_selection import train_test_split df = quandl.get("WIKI/FB") print(df.head()) # Check the current directory and print it out import os print( os.getcwd() ) # Mount the Google drive to talk with the root of colab from google.colab import drive drive.mount('/content/gdrive') # Check the current directory and print it out print( os.getcwd() ) # Save dataframe to the root df.to_csv('data.csv') # Transfer data from root to Google Drive !cp data.csv "/content/gdrive/My Drive" # an example of writing a string with open('/content/gdrive/My Drive/file.txt', 'w') as f: f.write("content") # end this example # Call the dataframe from the google drive df2 = pd.read_csv("/content/gdrive/My Drive/data.csv") df = df2 df=df.set_index('Date') df df = df[['Adj. Close']] print(df.head()) # A variable for predicting 'n' days out into the future forecast_out = 30 # Create another column (the target or dependent variable) shifted 'n' units up df['Prediction'] = df[['Adj. Close']].shift(-forecast_out) # Print(df.head()) print(df.head()) print(df.tail()) ### Create the independent data set (X) ### # Convert the datagrame to a numpy array X = np.array(df.drop(['Prediction'],1)) # Remove the last 'n' row X = X[:-forecast_out] print(X) ### Create the dependent data set (y) ### # Convert the datagrame to a numpy array (All of the values including the NaN's) y = np.array(df['Prediction']) # Get all of the y values except the last 'n'rows y = y[:-forecast_out] print(y) # Split the data into 80% training and 20% testing x_train,x_test,y_train,y_test = train_test_split(X,y, test_size = 0.2) # Create and train the Support Vector Machine (Regressor) svr_rbf = SVR(kernel='rbf',C=1e3, gamma=0.1) svr_rbf.fit(x_train, y_train) # Testing Model: Score returns the coefficient of determination R^2 of the prediction # The best possible score is 1.0 svm_confidence = svr_rbf.score(x_test,y_test) print("svm confidence: ", svm_confidence) # Create and train the Linear Regression Model lr = LinearRegression() # Train the Model lr.fit(x_train, y_train) # Testing Model: Score returns the coefficient of determination R^2 of the prediction # The best possible score is 1.0 lr_confidence = lr.score(x_test,y_test) print("lr confidence: ", lr_confidence) # Set x_forecast equal to the last 30 rows of the original data set from Adj. Close column x_forecast = np.array(df.drop(['Prediction'],1))[-forecast_out:] print(x_forecast) # Print linear regression model predictions for the next 'n' days lr_prediction = lr.predict(x_forecast) print(lr_prediction) # Print support vector regression model predictions for the next 'n' days svm_prediction = svr_rbf.predict(x_forecast) print(svm_prediction)[177.08725448 183.52431676 183.968949 181.34157664 179.97736407 181.89736695 182.98873701 187.33400668 188.99127232 185.48474075 182.31168336 179.90662712 180.59378605 184.41358125 183.78705399 187.75842837 186.37400524 189.29443067 188.81948259 185.90916244 188.24348173 187.91000754 189.15295677 176.49104306 172.03461532 173.28766983 168.74029459 163.18239153 163.85944517 155.90659115] [176.9772862 181.21669313 179.89413781 174.5914663 171.63664029 177.8475503 181.89126625 187.20647005 179.04419262 181.17386159 180.11086532 171.6882011 171.89274521 178.83637012 180.45731349 185.31177969 186.33961294 178.72933492 179.52391138 183.72822511 182.38192958 184.43024422 178.79049423 178.3644724 171.31480923 171.95800365 171.84232589 167.34492454 167.89151319 161.63256574]Sample notebook in R for accessing the Agrimetrics GraphQL API to obtain geospatial data from multiple measures within a bounded region Agrimetrics' GraphQL API provides a convenient interface through which data hosted in the Agrimetrics Data Platform can be queried. Since some of these datasets are geospatially indexed (geospatialMeasures), querying the API with a region of interest, whether a polygon or point, will yield data that are implicitly geospatially related. Since all geospatialMeasures data are queried in the same manner - for a given region, return all selected data for the specified datasets - to reduce some of the boiler-plate code that would otherwise be needed, some helper functions have been written and included in this repository. This notebook aims to demonstrate how to query for and visualise data using these helper functions.In this notebook, you will find a few GraphQL queries that have been copied directly from the Query Builder [webapp](https://app.agrimetrics.co.uk/graph-explorer). The queries used here have been crafted within the webapp and pasted into the notebook to demonstrate how you might generate quick visualisations of data of interest. Should there be an alternate set of data of interest, replace the queries below as needed. RequirementsYou will need a subscription key to run this example. Your subscription key is available through the Agrimetrics developer portal:* Log in at https://developer.agrimetrics.co.uk/.* Open your profile through the menu on the top right of the page with your name on it.* Copy one of your subscription keys from the "Field Explorer Subscription" section.* Depending on your subscription (trial vs. paid) amount of data available in this demo may vary SetupIn the following cell `utils`, `transformData`, and `graphqlClient` helper functions have been imported. These files are included in the [Github repository](https://github.com/agrimetrics/api-examples) along with this notebook. If you want to tweak the notebook in any way and host it locally, be sure to copy the notebook along with these helper function source files.# To reduce time when installing packages, use all cores available options("stringsAsFactors" = FALSE) # disable warnings options(warn=-1) # source the helper functions used to simplify access to graphQL data # utils.R - some core utilities source("utils.R") # transformData.R - simplifies the transformation of JSON response objects served by Agrimetrics' graphQL Query API # into more useful dataframes source("transformData.R") # graphqlClient.R - a thin wrapper around the [ghql](https://cloud.r-project.org/web/packages/ghql/index.html) library # providing a connection with which the queries in this notebook are executed. source("graphqlClient.R") # installPackages is a helper function optimising time needed to install packages. It does so by making use of # all your machine's CPU cores. packagesRequired <- c('stringr', 'dplyr', 'jsonlite', 'geojsonsf', 'sf', 'tmap', 'ggplot2') installPackages(packagesRequired) library(geojsonsf) library(sf) library(ggplot2) library(tmap)Get a connection to Agrimetrics Query APIThe following configures the GraphQL client used for all queries within this notebook. Once Jupyter has been installed along with the R kernel, the only other requirement is your API key. You can provide your API key in one of two ways:1. If you are running the Jupyter server locally, add it to your environment variables when you run it (preferred option): ```bash $ API_KEY=abcdefghijklmnopqrstuvwxyz jupyter notebook ```2. or set the apiKey variable in the notebook: ```R apiKey <- "" ```*Important*: Note that using the second method above will embed your API key in the evaluated notebook, so it should not be distributed or published afterwards.apiKey <- Sys.getenv("API_KEY", "UNSET") # use the helper function to acquire a connection to the Agrimetrics Query API. This connection is # used throughout the notebook for all subsequent queries. connection <- getConnection(apiKey)[1] "Connection acquired: https://api.agrimetrics.co.uk/graphql"GraphQL queryGraphQL queries have their own query language. The query selects which elements of the schema are to be returned.For a more comprehensive description of the query language, see the [GraphQL introduction](https://graphql.org/learn/). For interactive online documentation and exploration of the GraphQL schema, visit the [Graph Explorer tool](https://app.agrimetrics.co.uk//graph-explorer). Search AreaFor demonstration purposes, the queries throughout will be performed for a region covering Rothamsted Farm, one of our Founding Partners, for which we make most of our data free of charge.# For all the following queries, we will be requesting data for an area of Surrey and Sussex polygon <- c( "type"="Polygon", "coordinates"=list(list(list( c(-0.401073, 51.80076), c(-0.356222, 51.80076), c(-0.356222, 51.819771), c(-0.401073, 51.819771), c(-0.401073, 51.80076) ))) )Query for soil and temperature dataThe [Query Builder](https://app.agrimetrics.co.uk/graph-explorer) serves as a great interactive tool to aid in crafting queries. The following query was produced in this way, retrieving data for soilPh, Abundance of Invertebrates and monthly temperature values. The query has been tailored to demonstrate some of the basic concepts available when writing queries; [variables](https://graphql.org/learn/queries/variables) and [fragments](https://graphql.org/learn/queries/fragments) feature in the query to both reduce some code duplication and provide a means of parameterisation of the query.testQuery <- ' fragment monthlyWeatherMeasure on GeospatialTimeSeriesMeasure { datapoints { date value } } fragment location on GeospatialMeasure { location { shape } } query getGridsIntersectingPolygon ($polygon: LocationFilter!, $startDate: Date!, $endDate: Date!) { geospatialMeasures(geoFilter: {location: $polygon, operation: INTERSECTS}) { soilPH { unit value ...location } soilTotalAbundanceOfInvertebrates { unit value ...location } temperatureMeanMonthly (where: { datapoints: { date: { GE: $startDate, LE: $endDate } } }) { ...monthlyWeatherMeasure ...location } temperatureMaxMonthly (where: { datapoints: { date: { GE: $startDate, LE: $endDate } } }) { ...monthlyWeatherMeasure ...location } temperatureMinMonthly (where: { datapoints: { date: { GE: $startDate, LE: $endDate } } }) { ...monthlyWeatherMeasure ...location } } }' variables <- list(polygon=polygon, startDate="2017-01-01", endDate="2018-12-01") # use library function to get data from Query API / GraphQL API response <- getData(connection, testQuery, variables) # convert the data into a dataFrame data <- geospatialMeasuresToDataFrame(response) print(attributes(data)) print(str(data))$names [1] "soilPH" "soilTotalAbundanceOfInvertebrates" [3] "temperatureMeanMonthly" "temperatureMaxMonthly" [5] "temperatureMinMonthly" List of 5 $ soilPH :'data.frame': 8 obs. of 3 variables: ..$ unit : chr [1:8] "http://data.agrimetrics.co.uk/units/ph" "http://data.agrimetrics.co.uk/units/ph" "http://data.agrimetrics.co.uk/units/ph" "http://data.agrimetrics.co.uk/units/ph" ... ..$ value : num [1:8] 6.07 7.05 7.05 7.23 8.21 ... ..$ location:'data.frame': 8 obs. of 1 variable: .. ..$ shape:'data.frame': 8 obs. of 2 variables: .. .. ..$ coordinates:List of 8 .. .. .. ..$ : num [1, 1:5, 1:2] -0.406 -0.406 -0.391 -0.392 -0.406 ... .. .. .. ..$ : num [1, 1:5, 1:2] -0.392 -0.392 -0.377 -0.377 -0.392 ... .. .. .. ..$ : num [1, 1:5, 1:2] -0.392 -0.391 -0.377 -0.377 -0.392 ... .. .. .. ..$ : num [1, 1:5, 1:2] -0.377 -0.377 -0.363 -0.363 -0.377 ... .. .. .. ..$ : num [1, 1:5, 1[...]Prepare data for visualisationThe helper function `griddedTemporalMeasuresToDataFrame` can be used to transform the data into a geospatial dataframe. The source for this function can be found within `transformData.R` within this folder of the repository.# transform to a dataframe griddedMonthlyWeatherData <- griddedTemporalMeasuresToDataFrame( response, c("temperatureMeanMonthly", "temperatureMinMonthly", "temperatureMaxMonthly") ) print(attributes(griddedMonthlyWeatherData)) # use the geojson_sf library to generate geometries for each of the 5km grids returned weatherGridShapeGeoms <- griddedMonthlyWeatherData$location %>% geojson_sf # create a spatial features collection monthlyTemperature <- st_sf( cbind( "date"=griddedMonthlyWeatherData$Date, "temperatureMeanMonthly"=griddedMonthlyWeatherData$temperatureMeanMonthly, "temperatureMinMonthly"=griddedMonthlyWeatherData$temperatureMinMonthly, "temperatureMaxMonthly"=griddedMonthlyWeatherData$temperatureMaxMonthly, weatherGridShapeGeoms ) ) print(head(monthlyTemperature)) # Since this data is timeseries, to simplify the plots we will only display min, mean and max for January 2018 filteredDataByDate <- filter(monthlyTemperature, as.Date(date) == as.Date("2018-01-01")) # Generate an interactive leaflet map of all grids tmap_mode("plot") tmap_leaflet( tm_shape(filteredDataByDate, name="Max Temp") + tm_polygons( col="temperatureMaxMonthly", palette="YlOrRd" ) + tm_shape(filteredDataByDate, name="Min Temp") + tm_polygons( col="temperatureMinMonthly", palette="YlGnBu" ) + tm_shape(filteredDataByDate, name="Mean Temp") + tm_polygons( col="temperatureMeanMonthly", palette="Oranges" ) + tm_scale_bar() )$names [1] "Date" "location" "temperatureMeanMonthly" [4] "temperatureMinMonthly" "temperatureMaxMonthly" $row.names [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 $class [1] "data.frame" Simple feature collection with 6 features and 4 fields geometry type: POLYGON dimension: XY bbox: xmin: -0.407 ymin: 51.7769 xmax: -0.3329 ymax: 51.8228 CRS: 4326 date temperatureMeanMonthly temperatureMinMonthly temperatureMaxMonthly 1 2017-01-01 3.1 -0.1 6.4 2 2017-02-01 5.9 3.3 8.6 3 2017-03-01 8.8 4.9 12.7 4 2017-04-01 9.0 4.2 13.9 5 2017-05-01 13.2 8.7 17.9 6 2017-06-01 16.8 12.0 [...]![grid_temperatures.png](./grid_temperatures.png) Output to shapefileShapefiles impose some limitations including the string length of the attributes' names. See [here](http://resources.arcgis.com/en/help/main/10.1/index.html//005600000013000000) for more details.# Duplicate the geospatial dataframe as it will be tweaked for output as a shapefile tempDf <- monthlyTemperature # shapefiles cannot have attributes with names longer than 10 characters so the columns are renamed here. names(tempDf) <- c('DATE', 'MEAN', 'MIN', 'MAX', 'geometry') # the dataframe is written to weatherData.shp st_write(tempDf, dsn=paste0("weatherData.shp"), delete_layer = TRUE)Writing layer `weatherData' to data source `weatherData.shp' using driver `ESRI Shapefile' Writing 24 features with 4 fields and geometry type Polygon.Plotting the temperature dataSince the data acquired is of a timeseries format, a x-y chart is used to provide a basic visualisation of this data.# plot the dataframe as timeseries chart df <- monthlyTemperature df$date <- as.Date(df$date) ggplot(data=monthlyTemperature, aes(x = date)) + geom_line(aes(y = temperatureMeanMonthly, group=1), color="orange") + geom_line(aes(y = temperatureMinMonthly, group=1), color="blue") + geom_line(aes(y = temperatureMaxMonthly, group=1), color="red") + labs( x = "Date", y = "Temperature (°C)", title = "Historical Temperature Data" ) + theme(text = element_text(size=12), axis.text.x = element_text(angle = 75))Plotting soil dataSoil information is data derived from modelling appropriately distributed samples across the UK. The data is available at a 1km grid from the CEH. See [here](https://app.agrimetrics.co.uk/catalog/data-sets/c246affc-9566-4f16-8156-81e0f8f71d79/overview) for more details.# get the Total Abundance of Invertebrates for the soil grids within the searched area soilTAOI <- data$soilTotalAbundanceOfInvertebrates # magic one liner to convert the GeoJSON location object into a list of simple features geometry <- soilTAOI$location$shape %>% toJSON %>% geojson_sf # bind the data to these geometries soilTAOIData <- st_sf(cbind(soilTAOI$value, geometry)) # create a static plot, colouring the cells by Total Abundance Of Invertebrates ggplot(soilTAOIData) + geom_sf(aes(fill = soilTAOI.value)) # get the Total Abundance of Invertebrates for the soil grids within the searched area soilPH <- data$soilPH # magic one liner to convert the GeoJSON location object into a dataframe of simple features geometry <- soilPH$location$shape %>% toJSON %>% geojson_sf # create a soilData "simple feature collection" soilPHData <- st_sf(cbind(soilPH$value, geometry)) # create a static plot, colouring the cells by pH ggplot(soilPHData) + geom_sf(aes(fill = soilPH.value))Combining the different datasets in a single plotWithout performing any analysis of the data, this cell simply presents all the following geospatial data in a single plot:* soil pH* total abundance of invertebrates* monthly mean temperature for January 2018 (using previously filtered dataframe)# Putting it all together # we are only going to plot the mean temperature along with soil data tmap_mode("plot") tmap_leaflet( tm_shape(filteredDataByDate) + tm_polygons("temperatureMeanMonthly", title="Temperature Mean Monthly (°C)", palette="-Greys", alpha=0.5) + tm_shape(soilPHData) + tm_polygons("soilPH.value", title="Soil pH", palette="-Greys", alpha=0.5) + tm_shape(soilTAOIData) + tm_polygons("soilTAOI.value", title="Total Abundance Of Invertebrates", palette="-Greys", alpha=0.5) )tmap mode set to plottingfunctional arguments: 1. *args(arguments) 2. **kwargs(keyword arguments)def my_func(a,b): return sum(a,b) my_func(2,3) def my_func(*args): return sum(args) my_func(2,3,4,5)*args can be replaced by any other keywords like *sit. however ,conventionally we use args#**kwargs def myfunc(**kwargs): print(kwargs) myfunc(breakfast=30,lunch=40,dinner=60) def myfunc(**kwargs): if 'breakfast' in kwargs: print(f"the cost of breakfast is {kwargs['breakfast']}") elif'lunch' in kwargs: print(f"the cost of breakfast is {kwargs['lunch']}") else: print(f"the cost of breakfast is {kwargs['dinner']}") myfunc(breakfast=30,lunch=40,dinner=60) # usings both functional arguments def myfunc(*args,**kwargs): print('I would like to have {} {}'.format(args[0],kwargs['lunch'])) myfunc(5,7,9,12,breakfast='idli',lunch='paratha')I would like to have 5 parathamap functiondef mysquare(num): return(num**2) mylist=[1,2,3,4] # objective is to get square of each object defined in my list map(mysquare,mylist) list(map(mysquare,mylist))filter functiondef check_even(num): if num%2==0: return num filter(check_even,[1,2,3,4]) list(filter(check_even,[1,2,3,4]))lambda expressionhow a function is converted to lambda expression step by stepmy_square=lambda num:num**2 my_square(2) mylist=[1,2,3,4] list(map(lambda num:num**2,mylist)) list(filter(lambda num:num%2==0,[1,2,3,4]))assignment: reverse a string 'silicon' using lambda expressionx=100 def xyz(): x=70 return x xyz()oopclass student(): def __init__(self,name,regd): self.name=name self.regd=regd x=student('xyz',12345) x x.name x.regd"Working with Data Structures"> "Looking at Maine COVID data using only dictionaries, list comprehension and matplotlib."- toc: false- badges: true- comments: true- author: - categories: [learning, python]import csv import os os.chdir('/Users/antoniojurlina/Projects/learning_python/data/') file_location = "MaineCovidData.txt" countycases = [] with open(file_location) as co: co_reader = csv.reader(co, delimiter='\t') header=next(co_reader) for line in co_reader: countycases.append(line) county = [countycases[x] for x in range(0, 80, 5)] county = [item for l in county for item in l] county = [i.replace(' County', '') for i in county] cases = [countycases[x] for x in range(1, 80, 5)] cases = [item for l in cases for item in l] cases = [int(i.replace(',', '')) for i in cases] cases_new = [countycases[x] for x in range(2, 80, 5)] cases_new = [item for l in cases_new for item in l] cases_new = [int(i.replace('+', '')) for i in cases_new] deaths = [countycases[x] for x in range(3, 80, 5)] deaths = [item for l in deaths for item in l] deaths = [int(i.replace(',', '')) for i in deaths] deaths_new = [countycases[x] for x in range(4, 80, 5)] deaths_new = [item for l in deaths_new for item in l] deaths_new = [int(i.replace('+', '')) for i in deaths_new] cases_updated = [a + b for a, b in zip(cases, cases_new)] deaths_updated = [a + b for a, b in zip(deaths, deaths_new)] cases_by_county = dict(zip(county, cases)) cases_by_county_updated = dict(zip(county, cases_updated)) deaths_by_county_updated = dict(zip(county, deaths_updated)) cases_deaths_by_county = dict(zip(county, (zip(cases_updated, deaths_updated))))Bar chart of the second dictionary showing the cases updated with the increment:import matplotlib.pyplot as plt res = dict(reversed(list(cases_by_county_updated.items()))) %matplotlib inline plt.style.use('ggplot') x_pos = [i for i, _ in enumerate(res.keys())] plt.bar(x_pos, res.values(), color='orange') plt.xlabel("County") plt.ylabel("Cases") plt.title("Maine Covid Cases by County") plt.xticks(x_pos, res.keys(), rotation =90) plt.show()Bar chart that shows the deaths updated with the daily increment:res = dict(reversed(list(deaths_by_county_updated.items()))) %matplotlib inline plt.style.use('ggplot') x_pos = [i for i, _ in enumerate(res.keys())] plt.bar(x_pos, res.values(), color='red') plt.xlabel("County") plt.ylabel("Deaths") plt.title("Maine Covid Deaths by County") plt.xticks(x_pos, res.keys(), rotation =90) plt.show()So, we have 2 duplicate songs in 2 separate albumsdf_gb_song = df.groupby('song').count() df_gb_song[df_gb_song.text != 1] df[df.song == 'All_You_Need_Is_Love.txt'] df[df.song == 'Yellow_Submarine.txt']We have to consider this fact when building models -> to not use same texts twice! Just for fun: let's count frequencies of search pattern words in one of the songssong = list(df[(df['song'] == 'All_You_Need_Is_Love.txt') & (df['album'] == 'MagicalMysteryTour')].text)[0] rep = {",": "", ".": "", "(": "", ")": ""} # define desired replacements here rep = dict((re.escape(k), v) for k, v in rep.items()) pattern = re.compile("|".join(rep.keys())) song = pattern.sub(lambda m: rep[re.escape(m.group(0))], song) words = np.array([word.lower() for word in song.split(' ')]) unique, counts = np.unique(words, return_counts=True) freq_df = pd.DataFrame({"word": unique, "count": counts}) freq_df.sort_values('count', ascending=False).head(10)1. Logistic Regressionlm = LogisticRegression() lm.fit(X_train, y_train) X_test = scaler.transform(df_test[features]) y_test = df_test['target'] preds = lm.predict(X_test) # Threshold = 0.5 pd.DataFrame(confusion_matrix(df_test['target'], preds),\ columns=['Predict-not changing (0)','Predict-change (1)'],\ index=['Not changing (0)','Changing (1)']) roc_auc_score(y_test,preds) # Cross-validation from sklearn.model_selection import cross_val_score cv_scores = cross_val_score(lm,X_test, y_test, cv=5, scoring='roc_auc') print('Scores:', cv_scores) print('Mean:', np.mean(cv_scores))Scores: [0.74089266 0.7017615 0.69201734 0.69766532 0.69147265] Mean: 0.7047618917177652Explore threshold valuesdef make_confusion_matrix(model, threshold=0.5): # Predict class 1 if probability of being in class 1 is greater than threshold # (model.predict(X_test) does this automatically with a threshold of 0.5) y_predict = (model.predict_proba(X_test)[:, 1] >= threshold) fraud_confusion = confusion_matrix(y_test, y_predict) plt.figure(dpi=80) sns.heatmap(fraud_confusion, cmap=plt.cm.Blues, annot=True, square=True, fmt='d', xticklabels=['Not Looking', 'Looking'], yticklabels=['Not Looking', 'Looking']); plt.xlabel('prediction') plt.ylabel('actual') # Let's see how our confusion matrix changes with changes to the cutoff! from ipywidgets import interactive, FloatSlider interactive(lambda threshold: make_confusion_matrix(lm, threshold), threshold=(0.0,1.0,0.02)) df_test['proba_change'] = lm.predict_proba(X_test)[:, 1] fpr, tpr, thresholds = roc_curve(df_test['target'], df_test['proba_change']) def plot_roc(true, probas): auc = roc_auc_score(true, probas) plt.plot(fpr, tpr, marker='o') plt.xlabel('1 - Specificity (FPR)') plt.ylabel('Sensitivity (TPR)'); plt.title(f"Area Under the ROC Curve: {round(auc, 3)}"); plot_roc(df_test['target'], lm.predict_proba(X_test)[:, 1]) mask = tpr > 0.95 thresholds[mask].max() plot_roc(df_test['target'], lm.predict_proba(X_test)[:, 1] >= 0.113) preds_low_th = (lm.predict_proba(X_test)[:, 1] >= 0.2) # Threshold = 0.146 pd.DataFrame(confusion_matrix(df_test['target'], preds_low_th),\ columns=['Predict-not changing (0)','Predict-change (1)'],\ index=['Not changing (0)','Changing (1)']) print(classification_report(df_test['target'],np.array(preds_low_th))) # Precision, recall curve precision, recall, thresholds = precision_recall_curve(y_test, lm.predict_proba(X_test)[:,1]) plt.plot(thresholds, precision[1:],label='Precision') plt.plot(thresholds, recall[1:],label='Recall'); # Plot random guess precision, recall currve no_skill = len(y_test[y_test==1]) / len(y_test) plt.plot([0,1], [no_skill,no_skill], linestyle='--', label='Random') plt.legend(loc='lower left') plt.xlabel('Threshold (above this probability, label as Looking for change)'); plt.title('Precision and Recall Curves'); plt.plot(recall, precision, marker='.', label='Logistic') # Plot random guess precision, recall currve no_skill = len(y_test[y_test==1]) / len(y_test) plt.plot([0,1], [no_skill,no_skill], linestyle='--', label='Random') plt.xlabel('Recall') plt.ylabel('Precision') plt.legend(); # ROC AUC score score_default = roc_auc_score(y_test, lm.predict(X_test)) # Threshold = 0.5 score_low_th = roc_auc_score(y_test, lm.predict_proba(X_test)[:, 1] > 0.113) # Threshold = 0.113 print('ROC AUC score (threshold = 0.5):', score_default) print('ROC AUC score (threshold = 0.113):', score_low_th) # F1 score score_default = f1_score(y_test, lm.predict(X_test)) # Threshold = 0.5 score_low_th = f1_score(y_test, lm.predict_proba(X_test)[:, 1] > 0.113) # Threshold = 0.113 print('F1 score (threshold = 0.5):', score_default) print('F1 score (threshold = 0.113):', score_low_th) # Log loss score score_default = log_loss(y_test, lm.predict(X_test)) # Threshold = 0.5 score_low_th = log_loss(y_test, lm.predict_proba(X_test)[:, 1] > 0.113) # Threshold = 0.113 print('Log loss (threshold = 0.5):', score_default) print('Log loss (threshold = 0.113):', score_low_th)Log loss (threshold = 0.5): 8.014204741948493 Log loss (threshold = 0.113): 23.134529345396922. KNNfrom sklearn import preprocessing le = preprocessing.LabelEncoder() from sklearn.neighbors import KNeighborsClassifier knn_3 = KNeighborsClassifier(n_neighbors=3) knn_3.fit(X_train,y_train) pred = knn_3.predict(X_test) print('Accuracy score:',accuracy_score(y_test,pred),'\n') print('Classification report:\n\n',classification_report(y_test,pred)) knn_5 = KNeighborsClassifier(n_neighbors=5) knn_5.fit(X_train,y_train) pred = knn_5.predict(X_test) print('Accuracy score:',accuracy_score(y_test,pred),'\n') print('Classification report:\n\n',classification_report(y_test,pred)) knn_7 = KNeighborsClassifier(n_neighbors=7) knn_7.fit(X_train,y_train) pred = knn_7.predict(X_test) print('Accuracy score:',accuracy_score(y_test,pred),'\n') print('Classification report (kNN):\n\n',classification_report(y_test,pred)) # knn (n=3) y_scores = knn_3.predict_proba(X_test) fpr, tpr, threshold = roc_curve(y_test, y_scores[:, 1]) roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, 'b', label = 'AUC_knn3 = %0.2f' % roc_auc) # knn (n=5) y_scores = knn_5.predict_proba(X_test) fpr, tpr, threshold = roc_curve(y_test, y_scores[:, 1]) roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, 'g', label = 'AUC_knn5 = %0.2f' % roc_auc) # knn (n=7) y_scores = knn_7.predict_proba(X_test) fpr, tpr, threshold = roc_curve(y_test, y_scores[:, 1]) roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, 'k', label = 'AUC_knn7 = %0.2f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.title('Receiver Operating Characteristic') plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.title('ROC Curve of kNN') plt.show()3. Naive Bayesfrom sklearn.naive_bayes import GaussianNB nb = GaussianNB() nb.fit(X_train,y_train) pred = nb.predict(X_test) print('Accuracy score:',accuracy_score(y_test,pred),'\n') print('Classification report (Naive Bayes):\n\n',classification_report(y_test,pred)) # Naive Bayes y_scores = nb.predict_proba(X_test) fpr, tpr, threshold = roc_curve(y_test, y_scores[:, 1]) roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, 'b', label = 'AUC_nb = %0.2f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.title('Receiver Operating Characteristic') plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.title('ROC Curve of Naive Bayes') plt.show()INF 8215 - Intelligence artif.: méthodes et algorithmes Automne 2018 - TP3 - Machine Learning Data processing tutorial with pandas and scikit-learn Déroulement du pré-traitement des donnéesLe but de la suite de ce TP est de vous faire une version simplifiée d'un projet complet de machine learning:1. Nettoyage des données, traitement des valeurs manquantes2. Mise en forme des données pour pouvoir les utiliser dans les algorithmes de machine learning3. Feature engineering transformation ou combinaisons de features entre elles4. Comparaison des performances des différents choix effectués lors du traitement des données Scikit-learnhttp://scikit-learn.org/stable/Il s'agit d'une bibliothèque de machine learning et data mining, elle propose des outils pour l'analyse et le traitement des données, des algorithmes classiques de machine learning comme les réseaux de neuronnes, la régression logistique, les SVM ou autre, enfin des outils permettant de comparer les modèles entre eux comme la cross validation. PandasUne bibliothèque permettant de stocker des données et de les manipuler facilementLes deux éléments de base de pandas sont le dataframe et la serie.https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.DataFrame.html Quelques commandes utilesSoit **df** un DataFrame* **pd.read_csv(filename)** permet de charger les données depuis un csv* **df.head()** permet d'afficher les 5 premières lignes d'un dataframe* **df.info()** permet d'afficher des informations par colonne du dataframe* **df = pd.DataFrame(data, columns = [name1,...,namen])** permet de créer un DataFrame en spécifiant le nom des colonnes* ** df = df.drop(columns = [name1,...,namen])** permet de supprimer les colonnes dont le nom est mentionné dans le tableau* ** df = pd.concat([df1,df2], axis = 1)** permet de concatener deux dataset dans le sens des colonnes* **df = df.astype(float)** permet de transformer le type des données du dataframe en float* ** df.corr()** permet d'obtenir la matrice de corrélation d'un dataframe colonne par colonne Data leakageComme mentionné dans la partie **train/test set**, quand on transforme les données, il faut apprendre les transformations sur l'ensemble d'entraînement et pas sur l'ensemble de test.Par exemple, admettons que l'on veuille appliquer du **one-hot-encoding** sur une **feature** de type **catégorique** (ensemble de valeurs discrètes).Si quand on applique l'algorithme sur l'ensemble de test, on tombe sur une nouvelle catégorie qui n'était pas dans l'ensemble d'entraînement, il ne faut pas créer une nouvelle catégorie dans l'algorithme de **one-hot-encoding**. Sinon le **pipeline** d'entraînement dispose de données sur l'ensemble de test, c'est la **fuite de données** ou **data leakage**Une solution est d'avoir au préalable ajouté une classe "inconnue" lors de l'entraînement. Types de features et transformations usuelles associées avec sklearn Pour présenter les transformations usuelles avec sklearn, nous allons utiliser un exemple basique. Créons un DataFrame, contenant trois features(colonnes):* Une catégorique textuelle (un ensemble discret de valeurs discrètes sous forme de texte)* Une catégorique numérique ( un ensemble discret de valeurs entières)* Deux numériques continues ( n'importe quel nombre réel)import numpy as np import pandas as pd toy_data = [["certainly yes",1,np.nan,2153.5], ["maybe yes",1,0.5,5361.2], ["certainly no",3,6.3,6945.3], [np.nan,2,2.1,3215.2], ["certainly yes",np.nan,0.12,9856.1], ["maybe yes",1,6.9,4561.2], ["maybe no",np.nan,13.0,9453.7], ["certainly yes",np.nan,0.23,3125.0], ["maybe yes",3,5.2,2547.4,], ["maybe no",np.nan,2.0,np.nan], ] column_names = ["textual","categorical","numerical_small","numerical_high"] toy_dataframe = pd.DataFrame(toy_data, columns = column_names)Séparation en ensemble d'entraînement et de testfrom sklearn.model_selection import train_test_split toy_train, toy_test= train_test_split(toy_dataframe, test_size=0.2, random_state=42) toy_train,toy_test = toy_train.reset_index(drop = True), toy_test.reset_index(drop = True) toy_train.head(10) toy_train.info() RangeIndex: 8 entries, 0 to 7 Data columns (total 4 columns): textual 7 non-null object categorical 4 non-null float64 numerical_small 7 non-null float64 numerical_high 7 non-null float64 dtypes: float64(3), object(1) memory usage: 328.0+ bytesComme vous pouvez l'observer, ce dataframe n'est pas utilisable tel quel avec des algorithmes classiques de machine learning.Les principaux problèmes sont les suivants:* Les colonnes contiennent des valeurs nulles (NaN)* La première colonne est sous forme de texte* Les deux dernières colonnes sont non normalisées. * La deuxième colonne n'est pas encodée sous le format one-hot Importance de la normalisationLa normalisation permet d'exprimer la colonne sous-forme d'une distribution normale centrée en 0 et de variance 1, c'est-à-dire des valeurs comprises entre -1 et 1. L'intérêt de réduire ainsi les features est de les mettre toutes **à la même échelle**. En effet, certains algorithmes de classification auront tendance à considérer la colonne **numerical_small** comme négligeable par rapport à la colonne **numerical_high**, et ainsi à l'**ignorer lors de l'apprentissage**. Importance du one-hot-encodingUne feature catégorique peut prendre un nombre restreint de valeurs entières. Par exemple la colonne **categorical** contient des valeurs dans l'ensemble [1,2,3].La sémantique derrière cette représentation est la suivante: à chaque exemple(ligne) du dataset, on associe une catégorie parmi les catégories possibles ([1,2,3]).Néanmoins, certains algorithmes de machine learning auront tendance à prendre en compte l'ordre numérique de ces catégories.C'est-à-dire prendre en compte le fait que 2 est après 1, alors que cette relation ne fait pas de sens dans ce cas là.Pour pallier à ce problème, on utilise le one-hot-encoding vu dans la première partie.Ainsi, on remplacera une feature catégorique pouvant prendre 3 valeurs différentes, par 3 features catégoriques pouvant prendre les valeurs 0 ou 1. Valeurs manquantesPour les valeurs manquantes, on peut utiliser l'objet SimpleImputer de sklearn.SimpleImputer est un objet de type **transformer**, il est munis de deux méthodes:* fit: permet d'apprendre la transformation à effectuer* transform: permet d'appliquer la transformation à effectuer* fit_transform: applique les deux méthodes précédentes à la suite**Remarque: pour éviter la fuite de donnée, on utilise fit_transfom sur l'ensemble d'entrainement et transform sur l'ensemble de test (cf l'exemple suivant **from sklearn.impute import SimpleImputer # Imputers initialisation textual_imputer = SimpleImputer(strategy = 'constant', fill_value = 'maybe yes') categorical_imputer = SimpleImputer(strategy = 'constant', fill_value = 1.0) small_imputer = SimpleImputer(strategy = 'mean') high_imputer = SimpleImputer(strategy = 'mean') #Train set toy_train["textual"] = textual_imputer.fit_transform(toy_train["textual"].values.reshape(-1,1)) toy_train["categorical"] = categorical_imputer.fit_transform(toy_train["categorical"].values.reshape(-1,1)) toy_train["numerical_small"] = small_imputer.fit_transform(toy_train["numerical_small"].values.reshape(-1,1)) toy_train["numerical_high"] = high_imputer.fit_transform(toy_train["numerical_high"].values.reshape(-1,1)) #Test set toy_test["textual"] = textual_imputer.transform(toy_test["textual"].values.reshape(-1,1)) toy_test["categorical"] = categorical_imputer.transform(toy_test["categorical"].values.reshape(-1,1)) toy_test["numerical_small"] = small_imputer.transform(toy_test["numerical_small"].values.reshape(-1,1)) toy_test["numerical_high"] = high_imputer.transform(toy_test["numerical_high"].values.reshape(-1,1)) toy_train.head()Transformation des features Textual Cette commande pandas permet d'observer les différentes valeurs possibles pour une feature, ainsi que leur proportion(toy_train["textual"].value_counts()/len(toy_train))[:10]En observant la distribution des valeurs prises par cette colonne, plusieurs décisions peuvent être prises:* Exprimer cette variable comme une variable catégorique pouvant prendre les valeurs [0,1,2,3].* Séparer la colonne en deux colonnes, **certain** et **positif** chacune pouvant prendre les valeurs [0,1].Dans le cadre de la démonstration, nous choisirons la deuxième option.Il est possible de définir nous-même des fonctions à appliquer sur une colonne. On utilise ensuite la méthode **apply** du dataframe combinée à l'utilisation de **lambda expression**.#Custom functions def parse_certain(text): certain, _ = text.split(" ") return certain def parse_positif(text): _, positif = text.split(" ") return positif # Train set certain_train = toy_train.apply(lambda row: pd.Series( parse_certain(row["textual"]) ), axis = 1 ) positif_train = toy_train.apply(lambda row: pd.Series( parse_positif(row["textual"]) ), axis = 1 ) certain_train.columns = ["certain"] positif_train.columns = ["positif"] new_columns = pd.concat([certain_train,positif_train], axis = 1) toy_train = toy_train.drop(columns = ["textual"]) toy_train = pd.concat([toy_train,pd.DataFrame(new_columns)], axis = 1) # Test set certain_test = toy_test.apply(lambda row: pd.Series( parse_certain(row["textual"]) ), axis = 1 ) positif_test = toy_test.apply(lambda row: pd.Series( parse_positif(row["textual"]) ), axis = 1 ) certain_test.columns = ["certain"] positif_test.columns = ["positif"] new_columns = pd.concat([certain_test,positif_test], axis = 1) toy_test = toy_test.drop(columns = ["textual"]) toy_test = pd.concat([toy_test,pd.DataFrame(new_columns)], axis = 1) toy_train.head()Label encoderLa transformation a fonctionné, cependant les deux nouvelles colonnes ne sont toujours pas exploitables dans un algorithme de machine learning. Le **LabelEncoder** de sklearn permet de convertir ces colonnes en colonnes catégoriques numériques.from sklearn.preprocessing import LabelEncoder # encoder initialisation certain_label = LabelEncoder() positif_label = LabelEncoder() #Train set toy_train["certain"] = certain_label.fit_transform(toy_train["certain"].values) toy_train["positif"] = positif_label.fit_transform(toy_train["positif"].values) #Test set toy_test["certain"] = certain_label.transform(toy_test["certain"].values.reshape(-1,1)) toy_test["positif"] = positif_label.transform(toy_test["positif"].values.reshape(-1,1)) toy_train.head()CategoricalPour cette colonne, nous allons uniquement appliquer le one-hot-encoding.from sklearn.preprocessing import OneHotEncoder # encoder initialisation categorical_encoder = OneHotEncoder(categories = 'auto', sparse = False) #Train set new_columns_train = categorical_encoder.fit_transform(toy_train["categorical"].values.reshape(-1,1)) toy_train = toy_train.drop(columns=["categorical"]) toy_train = pd.concat([toy_train,pd.DataFrame(new_columns_train)], axis = 1) #Test set new_columns_test = categorical_encoder.transform(toy_test["categorical"].values.reshape(-1,1)) toy_test = toy_test.drop(columns=["categorical"]) toy_test = pd.concat([toy_test,pd.DataFrame(new_columns_test)], axis = 1)Numerical small et Numerical highPour ces colonnes nous allons nous contenter de les normaliser.from sklearn.preprocessing import StandardScaler # Scalers initialisation small_scaler = StandardScaler() high_scaler = StandardScaler() #Train set toy_train["numerical_small"] = small_scaler.fit_transform(toy_train["numerical_small"].values.reshape(-1,1)) toy_train["numerical_high"] = high_scaler.fit_transform(toy_train["numerical_high"].values.reshape(-1,1)) #Test set toy_test["numerical_small"] = small_scaler.transform(toy_test["numerical_small"].values.reshape(-1,1)) toy_test["numerical_high"] = high_scaler.transform(toy_test["numerical_high"].values.reshape(-1,1)) toy_train.head()PipelineLe dataset résultant est utilisable pour l'apprentissage, et comme nous avons appliqué les transformations sur l'ensemble de test nous pouvons évaluer les performances de l'Algorithme sur ce dernier.Cependant, l'implémentation actuelle est fastidieuse et redondante. En effet, elle nécessite beaucoup de copier-coller. D'autre part, si on voulait effectuer une modification dans le processus de traitement des données et tester son impact sur la performance, il faudrait tout relancer manuellement sans rien oublier.Une solution existe dans **sklearn** pour pallier à ce problème, le **pipeline**.Il permet de lister la liste des transformations par colonne et de les appliquer en une seule fois.Ci-dessous, le même prétraitement des données mais cette fois en utilisant le pipeline.from preprocessing import TransformationWrapper from preprocessing import LabelEncoderP from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.compose import ColumnTransformer #textual pipeline_certain = Pipeline([ ('certain', TransformationWrapper(transformation = parse_certain)), ("encode",LabelEncoderP()), ]) pipeline_positif = Pipeline([ ('positif', TransformationWrapper(transformation = parse_positif)), ("encode",LabelEncoderP()), ]) pipeline_textual_u = Pipeline([ ("textual", SimpleImputer(strategy = 'constant', fill_value = 'maybe yes')), ('feats', FeatureUnion([ ('certain', pipeline_certain), ('positif', pipeline_positif) ])), ]) # categorical pipeline_categorical = Pipeline([ ("fillna", SimpleImputer(strategy = 'constant', fill_value = 1.0) ), ("encode",OneHotEncoder(categories = 'auto', sparse = False)) ]) # numerical pipeline_numerical = Pipeline([ ("fillna", SimpleImputer(strategy = 'mean') ), ("scaler",StandardScaler()) ]) full_pipeline = ColumnTransformer([ ("textual", pipeline_textual_u, ["textual"]), ("categorical", pipeline_categorical, ["categorical"]), ("numerical small", pipeline_numerical, ["numerical_small"]), ("numerical high", pipeline_numerical, ["numerical_high"]), ]) toy_test toy_train, toy_test= train_test_split(toy_dataframe, test_size=0.2, random_state=42) toy_train,toy_test = toy_train.reset_index(drop = True), toy_test.reset_index(drop = True) columns = ["certain","positif","cat0", "cat1","cat2","numerical small","numerical high"] toy_train = pd.DataFrame(full_pipeline.fit_transform(toy_train),columns= columns) toy_test = pd.DataFrame(full_pipeline.transform(toy_test),columns= columns) toy_train.head()Comme vous pouvez le remarquer, le code utilisant le pipeline est beaucoup plus compact et facile à modifier.De plus il s'exécute en une ligne, en utilisant **pipeline.fit_transform()** et les transformations peuvent être appliquées en une lignesur l'ensemble de test sans fuite de donnée avec **pipeline.transform()**. Explication du code du pipeline PipelineIl s'agit d'une suite d'opérations. Chaque opération est représenté par un tuple de la forme:**(transformation name, transformer(parameters))** ColumnTransformerPermet de sélectionner les colonnes sur lesquelles appliquer une transformation. La transformation peut être un transformer simple ou un pipeline. Il s'agit d'une suite de tuples de la forme:**(transformation name, transformer(parameters) or pipeline_name, [columns to be transformed])** TransformationWrapperIl s'agit d'une classe implémentée spécifiquement pour le tp, elle permet d'appliquer des fonctions définies par l'utilisateur dans le pipeline.Cet objet est un **transformer** , il est donc muni d'une méthode **fit** et d'une méthode **transform** comme chaque transformer de sklearn.Il prend deux paramètres:* **fitation**: la fonction utilisée pour apprendre la transformation à effectuer, elle prend en entrée un dataframe et renvoie une liste contenant les paramètres ou données apprises* **transformation** : la fonction qui applique la transformation, **elle prend en entrée une ligne de colonne à traiter**, et renvoie la valeur résultante de la transformation. Optionnellement, elle peut prendre un deuxième argument en entrée, la liste renvoyée par la fonction donnée dans le paramètre **fitation**.Par exemple: **("parse", TransformationWrapper(fitation = get_train_colors, transformation = parse_color))** LabelEncoderPDans sklearn 20.0 le transformer **LabelEncoder** présenté plus tôt dans le tutoriel est inutilisable dans le pipeline, **LabelEncoderP** est juste une surcharge qui permet d'utiliser ce transformer dans le pipeline.Elle est munie des mêmes fonctionnalités. Observer les données n premières lignes d'un DataFrametoy_dataframe.head(5)Types de données et valeurs manquantes par colonnetoy_dataframe.info() RangeIndex: 10 entries, 0 to 9 Data columns (total 4 columns): textual 9 non-null object categorical 6 non-null float64 numerical_small 9 non-null float64 numerical_high 9 non-null float64 dtypes: float64(3), object(1) memory usage: 392.0+ bytesNombre de valeurs nulles par colonnetoy_dataframe.isnull().sum()Valeurs différentes et nombre de valeurs différentes pour une colonne(toy_dataframe["categorical"].value_counts())Nombre de valeurs différentes pour une colonnetoy_dataframe["categorical"].nunique()Afficher la distribution des donnéestoy_dataframe.hist(figsize=(15,5))Installing and importing modules and libraries Importing built-in modulesWhen you install Python, you get a ton of functionality right out of the box, without importing anything.You also get access to other code modules that come bundled in what's called the "[standard library](https://docs.python.org/3/library/)" -- but to use these bits of functionality, you need to _import_ them into your script.Let's import the `time` module from the standard library, then use its `sleep()` method to pause the script for however many seconds we specify.import time time.sleep(2)Importing external librariesYou can also _install_ external Python libraries -- software written by people around the world to help Python developers accomplish different tasks. In this session, for instance, we're using [Jupyter notebooks](https://jupyter.org/) and the [`pandas`](https://pandas.pydata.org) data analysis library.To manage these dependencies, we're using a tool called [pipenv](https://docs.pipenv.org/basics/). This tool also manages our project's [_virtual environment_](https://realpython.com/python-virtual-environments-a-primer/) -- a way to isolate our project and its dependencies (`jupyter` and `pandas`) from other projects and their dependencies.Once we installed pipenv, we went to our computer's *terminal*, moved into the project directory that contains these files, and ran this command: `pipenv install jupyter pandas`.👉For more details on the *terminal* and starting a Python project from scratch, [see this notebook](Starting%20a%20new%20Python%20project%20from%20scratch.ipynb).Let's import pandas. When we import it, let's use the [`as`](https://docs.python.org/3/reference/simple_stmts.htmlthe-import-statement) keyword to refer to the library as `pd`, a convention that makes it quicker to type.import pandas as pdImporting local codeLet's pretend that you have a local Python file, `myfile.py`, that contains some things you'd like to import into this script.Surprise, you don't have to pretend! There *is* a file called `myfile.py` in this folder that contains some things we'd like to import into *this* script. Specifically, we'd like to import a dictionary called `codys_dog` that has some details about my dog Charlie.(This is Charlie:![charlie](../img/charlie.jpg "charlie"))from myfile import codys_dog codys_dogPendahuluan Karakter Optik ![Robot membaca koran](./images/ocr.jpg) Tantangan visi komputer yang umum adalah mendeteksi dan menafsirkan teks pada gambar. Pemrosesan seperti ini ini sering disebut sebagai *Pendahuluan Karakter Optik (OCR)*. Menggunakan Layanan Visi Komputer untuk Membaca Teks dalam Gambar Layanan kognitif **Visi Komputer** menyediakan dukungan untuk tugas-tugas OCR, termasuk: - API **OCR** dapat Anda gunakan untuk membaca teks dalam beberapa bahasa. API ini dapat digunakan secara sinkron, dan berfungsi dengan baik saat Anda perlu mendeteksi dan membaca sejumlah kecil teks dalam sebuah gambar. - API **Baca** yang dioptimalkan untuk dokumen yang lebih besar. API ini digunakan secara asinkron, dan dapat digunakan untuk teks cetak dan tulisan tangan. Anda dapat menggunakan layanan ini dengan membuat sumber daya **Visi Komputer** atau sumber daya **Cognitive Services**. Jika Anda belum melakukannya, buat sumber daya **Cognitive Services** di langganan Azure Anda. > **Catatan**: Jika Anda sudah memiliki sumber daya Cognitive Services, cukup buka halaman Mulai cepat di portal Microsoft Azure dan salin kunci dan titik akhirnya ke sel di bawah. Atau, ikuti langkah-langkah di bawah untuk membuatnya. 1. Di tab browser lain, buka portal Microsoft Azure di https://portal.azure.com, masuk menggunakan akun Microsoft Anda. 2. Klik tombol **&65291;Buat sumber daya**, cari *Cognitive Services*, dan buat sumber daya **Cognitive Services** dengan pengaturan berikut: - **Langganan**: *Langganan Azure Anda*. - **Grup sumber daya**: *Pilih atau buat grup sumber daya dengan nama unik*. - **Wilayah**: *Pilih wilayah yang tersedia*: - **Nama**: *Masukkan nama yang unik*. - **Tingkat Harga**: S0 - **Saya mengonfirmasi bahwa saya telah membaca dan memahami pemberitahuan tersebut**: Dipilih. 3. Tunggu penyebaran hingga selesai. Lalu, buka sumber daya layanan kognitif, dan di halaman **Ringkasan**, klik tautan untuk mengelola kunci layanan. Anda akan memerlukan titik akhir dan kunci untuk terhubung ke sumber daya layanan kognitif Anda dari aplikasi klien. Mendapatkan Kunci dan Titik Akhir untuk sumber daya Cognitive Services Anda Untuk menggunakan sumber daya layanan kognitif, aplikasi klien memerlukan titik akhir dan kunci autentikasi: 1. Di portal Azure, di halaman **Kunci dan Titik Akhir** untuk sumber daya layanan kognitif Anda, salin **Kunci1** untuk sumber daya dan tempel pada kode di bawah, menggantikan **YOUR_COG_KEY**. 2. Salin **titik akhir** untuk sumber daya dan tempel pada kode di bawah, menggantikan **YOUR_COG_ENDPOINT**. 3. Jalankan kode pada sel di bawah dengan mengeklik tombol **Jalankan sel** (&9655;) (ke sebelah kiri sel).cog_key = 'YOUR_COG_KEY' cog_endpoint = 'YOUR_COG_ENDPOINT' print('Ready to use cognitive services at {} using key {}'.format(cog_endpoint, cog_key))Setelah menyiapkan kunci dan titik akhir, Anda dapat menggunakan sumber daya layanan visi komputer untuk mengekstrak teks gambar. Mari kita mulai dengan API **OCR**, yang memungkinkan Anda menganalisis gambar secara sinkron dan membaca isi teksnya. Dalam hal ini, Anda memiliki gambar iklan untuk perusahaan ritel Northwind Traders fiktif yang menyertakan beberapa teks. Jalankan sel di bawah untuk membacanya.from azure.cognitiveservices.vision.computervision import ComputerVisionClient from msrest.authentication import CognitiveServicesCredentials import matplotlib.pyplot as plt from PIL import Image, ImageDraw import os %matplotlib inline # Get a client for the computer vision service computervision_client = ComputerVisionClient(cog_endpoint, CognitiveServicesCredentials(cog_key)) # Read the image file image_path = os.path.join('data', 'ocr', 'advert.jpg') image_stream = open(image_path, "rb") # Use the Computer Vision service to find text in the image read_results = computervision_client.recognize_printed_text_in_stream(image_stream) # Process the text line by line for region in read_results.regions: for line in region.lines: # Read the words in the line of text line_text = '' for word in line.words: line_text += word.text + ' ' print(line_text.rstrip()) # Open image to display it. fig = plt.figure(figsize=(7, 7)) img = Image.open(image_path) draw = ImageDraw.Draw(img) plt.axis('off') plt.imshow(img)Teks yang ditemukan dalam gambar diatur ke dalam struktur hierarki wilayah, garis, dan kata, dan kode membacanya untuk mengambil hasilnya. Di hasil, lihat teks yang dibaca di atas gambar. Menampilkan kotak pembatas Hasil tersebut juga mencakup koordinat *kotak pembatas* untuk baris teks dan masing-masing kata yang ditemukan dalam gambar. Jalankan sel di bawah ini untuk melihat kotak pembatas untuk baris teks pada gambar iklan yang Anda ambil di atas.# Open image to display it. fig = plt.figure(figsize=(7, 7)) img = Image.open(image_path) draw = ImageDraw.Draw(img) # Process the text line by line for region in read_results.regions: for line in region.lines: # Show the position of the line of text l,t,w,h = list(map(int, line.bounding_box.split(','))) draw.rectangle(((l,t), (l+w, t+h)), outline='magenta', width=5) # Read the words in the line of text line_text = '' for word in line.words: line_text += word.text + ' ' print(line_text.rstrip()) # Show the image with the text locations highlighted plt.axis('off') plt.imshow(img)Hasilnya, kotak pembatas untuk setiap baris teks ditampilkan sebagai persegi panjang pada gambar. Menggunakan API Baca API OCR yang Anda gunakan sebelumnya berfungsi dengan baik untuk gambar dengan sedikit teks. Saat Anda harus membaca lebih banyak teks, seperti dokumen yang dipindai, Anda dapat menggunakan API **Baca**. Hal ini memerlukan proses beberapa langkah: 1. Kirim gambar ke layanan Visi Komputer untuk dibaca dan dianalisis secara asinkron. 2. Tunggu hingga operasi analisis selesai. 3. Ambil hasil analisis tersebut. Jalankan sel berikut untuk menggunakan proses ini untuk membaca teks dalam surat yang dipindai ke pengelola toko Northwind Traders.from azure.cognitiveservices.vision.computervision import ComputerVisionClient from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes from msrest.authentication import CognitiveServicesCredentials import matplotlib.pyplot as plt from PIL import Image import time import os %matplotlib inline # Read the image file image_path = os.path.join('data', 'ocr', 'letter.jpg') image_stream = open(image_path, "rb") # Get a client for the computer vision service computervision_client = ComputerVisionClient(cog_endpoint, CognitiveServicesCredentials(cog_key)) # Submit a request to read printed text in the image and get the operation ID read_operation = computervision_client.read_in_stream(image_stream, raw=True) operation_location = read_operation.headers["Operation-Location"] operation_id = operation_location.split("/")[-1] # Wait for the asynchronous operation to complete while True: read_results = computervision_client.get_read_result(operation_id) if read_results.status not in [OperationStatusCodes.running]: break time.sleep(1) # If the operation was successfuly, process the text line by line if read_results.status == OperationStatusCodes.succeeded: for result in read_results.analyze_result.read_results: for line in result.lines: print(line.text) # Open image and display it. print('\n') fig = plt.figure(figsize=(12,12)) img = Image.open(image_path) plt.axis('off') plt.imshow(img)Tinjau hasilnya. Ada transkripsi lengkap surat tersebut, yang sebagian besar berisi teks cetak dengan tanda tangan tulisan tangan. Gambar asli surat ditampilkan di bawah hasil OCR (Anda harus menggulir ke bawah untuk melihatnya). Membaca teks tulisan tangan Pada contoh sebelumnya, permintaan untuk menganalisis gambar menentukan mode pengenalan teks yang mengoptimalkan operasi untuk teks yang *dicetak*. Perhatikan bahwa meskipun demikian, tanda tangan tulisan tangan ini bisa terbaca. Kemampuan untuk membaca teks tulisan tangan ini sangat bermanfaat. Misalnya, Anda menulis catatan yang berisi daftar belanja, dan ingin menggunakan aplikasi di ponsel untuk membaca catatan tersebut dan mentranskripsikan teks di dalamnya. Jalankan sel di bawah untuk melihat contoh operasi baca untuk daftar belanja tulisan tangan.from azure.cognitiveservices.vision.computervision import ComputerVisionClient from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes from msrest.authentication import CognitiveServicesCredentials import matplotlib.pyplot as plt from PIL import Image import time import os %matplotlib inline # Read the image file image_path = os.path.join('data', 'ocr', 'note.jpg') image_stream = open(image_path, "rb") # Get a client for the computer vision service computervision_client = ComputerVisionClient(cog_endpoint, CognitiveServicesCredentials(cog_key)) # Submit a request to read printed text in the image and get the operation ID read_operation = computervision_client.read_in_stream(image_stream, raw=True) operation_location = read_operation.headers["Operation-Location"] operation_id = operation_location.split("/")[-1] # Wait for the asynchronous operation to complete while True: read_results = computervision_client.get_read_result(operation_id) if read_results.status not in [OperationStatusCodes.running]: break time.sleep(1) # If the operation was successfuly, process the text line by line if read_results.status == OperationStatusCodes.succeeded: for result in read_results.analyze_result.read_results: for line in result.lines: print(line.text) # Open image and display it. print('\n') fig = plt.figure(figsize=(12,12)) img = Image.open(image_path) plt.axis('off') plt.imshow(img)import os import numpy as np import pandas as pd from getpass import getpass def access_kaggle(): """ Access Kaggle from Google Colab. If the /root/.kaggle does not exist then prompt for the username and for the Kaggle API key. Creates the kaggle.json access file in the /root/.kaggle/ folder. """ KAGGLE_ROOT = os.path.join('/root', '.kaggle') KAGGLE_PATH = os.path.join(KAGGLE_ROOT, 'kaggle.json') if '.kaggle' not in os.listdir(path='/root'): user = getpass(prompt='Kaggle username: ') key = getpass(prompt='Kaggle API key: ') !mkdir $KAGGLE_ROOT !touch $KAGGLE_PATH !chmod 666 $KAGGLE_PATH with open(KAGGLE_PATH, mode='w') as f: f.write('{"username":"%s", "key":"%s"}' %(user, key)) f.close() !chmod 600 $KAGGLE_PATH del user del key success_msg = "Kaggle is successfully set up. Good to go." print(f'{success_msg}') access_kaggle() !kaggle datasets download zynicide/wine-reviews --unzip -p datasets !ls -lh datasets df = pd.read_csv('datasets/winemag-data-130k-v2.csv', sep=",", index_col=0) df.shape df.head(3)IntroductionIn these exercises we'll apply groupwise analysis to our dataset.Run the code cell below to load the data before running the exercises. Exercises 1.Who are the most common wine reviewers in the dataset? Create a `Series` whose index is the `taster_twitter_handle` category from the dataset, and whose values count how many reviews each person wrote.df.columns df.groupby(by=['taster_twitter_handle']).size()2.What is the best wine I can buy for a given amount of money? Create a `Series` whose index is wine prices and whose values is the maximum number of points a wine costing that much was given in a review. Sort the values by price, ascending (so that `4.0` dollars is at the top and `3300.0` dollars is at the bottom).df.groupby(by=['price']).points.max().sort_index(ascending=True)3.What are the minimum and maximum prices for each `variety` of wine? Create a `DataFrame` whose index is the `variety` category from the dataset and whose values are the `min` and `max` values thereof.min_max_variety_df = df.groupby(by=['variety']).price.agg(['min', 'max']) min_max_variety_df4.What are the most expensive wine varieties? Create a variable `sorted_varieties` containing a copy of the dataframe from the previous question where varieties are sorted in descending order based on minimum price, then on maximum price (to break ties).sorted_varieties = min_max_variety_df.sort_values(by=['min', 'max'], ascending=[False, False]) sorted_varieties5.Create a `Series` whose index is reviewers and whose values is the average review score given out by that reviewer. Hint: you will need the `taster_name` and `points` columns.reviewer_mean_ratings = df.groupby(by=['taster_name']).points.mean() reviewer_mean_ratingsAre there significant differences in the average scores assigned by the various reviewers? Run the cell below to use the `describe()` method to see a summary of the range of values.reviewer_mean_ratings.describe()6.What combination of countries and varieties are most common? Create a `Series` whose index is a `MultiIndex`of `{country, variety}` pairs. For example, a pinot noir produced in the US should map to `{"US", "Pinot Noir"}`. Sort the values in the `Series` in descending order based on wine count.df.groupby(by=['country', 'variety']).size() # country_variety_counts = ____NMT-Keras tutorial 2. Creating and training a Neural Translation Model Now, we'll create and train a Neural Machine Translation (NMT) model. Since there is a significant number of hyperparameters, we'll use the default ones, specified in the `config.py` file. Note that almost every hardcoded parameter is automatically set from config if we run `main.py `.We'll create the so-called `'GroundHogModel'`. It is defined in the `model_zoo.py` file. See the `neural_machine_translation.pdf` for an overview of such system.If you followed the notebook `1_dataset_tutorial.ipynb`, you should have a dataset instance. Otherwise, you should follow that notebook first. First, we'll make some imports, load the default parameters and load the dataset.from config import load_parameters from nmt_keras.model_zoo import TranslationModel from keras_wrapper.cnn_model import loadModel from keras_wrapper.dataset import loadDataset from keras_wrapper.extra.callbacks import PrintPerformanceMetricOnEpochEndOrEachNUpdates params = load_parameters() dataset = loadDataset('datasets/Dataset_tutorial_dataset.pkl')[26/04/2017 13:51:24] <<< Loading Dataset instance from datasets/Dataset_tutorial_dataset.pkl ... >>>Since the number of words in the dataset may be unknown beforehand, we must update the params information according to the dataset instance:params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len['source_text'] params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len['target_text']Now, we create a TranslationModel instance:nmt_model = TranslationModel(params, model_type='GroundHogModel', model_name='tutorial_model', vocabularies=dataset.vocabulary, store_path='trained_models/tutorial_model/', verbose=True)[26/04/2017 13:50:11] <<< Building GroundHogModel Translation_Model >>>Now, we must define the inputs and outputs mapping from our Dataset instance to our modelinputMapping = dict() for i, id_in in enumerate(params['INPUTS_IDS_DATASET']): pos_source = dataset.ids_inputs.index(id_in) id_dest = nmt_model.ids_inputs[i] inputMapping[id_dest] = pos_source nmt_model.setInputsMapping(inputMapping) outputMapping = dict() for i, id_out in enumerate(params['OUTPUTS_IDS_DATASET']): pos_target = dataset.ids_outputs.index(id_out) id_dest = nmt_model.ids_outputs[i] outputMapping[id_dest] = pos_target nmt_model.setOutputsMapping(outputMapping)We can add some callbacks for controlling the training (e.g. Sampling each N updates, early stop, learning rate annealing...). For instance, let's build an Early-Stop callback. After each 2 epochs, it will compute the 'coco' scores on the development set. If the metric 'Bleu_4' doesn't improve during more than 5 checkings, it will stop. We need to pass some variables to the callback (in the extra_vars dictionary):extra_vars = {'language': 'en', 'n_parallel_loaders': 8, 'tokenize_f': eval('dataset.' + 'tokenize_none'), 'beam_size': 12, 'maxlen': 50, 'model_inputs': ['source_text', 'state_below'], 'model_outputs': ['target_text'], 'dataset_inputs': ['source_text', 'state_below'], 'dataset_outputs': ['target_text'], 'normalize': True, 'alpha_factor': 0.6, 'val': {'references': dataset.extra_variables['val']['target_text']} } vocab = dataset.vocabulary['target_text']['idx2words'] callbacks = [] callbacks.append(PrintPerformanceMetricOnEpochEndOrEachNUpdates(nmt_model, dataset, gt_id='target_text', metric_name=['coco'], set_name=['val'], batch_size=50, each_n_epochs=2, extra_vars=extra_vars, reload_epoch=0, is_text=True, index2word_y=vocab, sampling_type='max_likelihood', beam_search=True, save_path=nmt_model.model_path, start_eval_on_epoch=0, write_samples=True, write_type='list', verbose=True))Now we are almost ready to train. We set up some training parameters...training_params = {'n_epochs': 100, 'batch_size': 40, 'maxlen': 30, 'epochs_for_save': 1, 'verbose': 0, 'eval_on_sets': [], 'n_parallel_loaders': 8, 'extra_callbacks': callbacks, 'reload_epoch': 0, 'epoch_offset': 0}And train!nmt_model.trainNet(dataset, training_params)Size ScalesScales control how a plot maps data values to the visual values of anaesthetic.import pandas as pd from lets_plot import * LetsPlot.setup_html() df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv') p = ggplot(df, aes('cty', 'hwy')) + geom_point(aes(size='cyl'), shape=21, alpha=.2) p1 = p + ggtitle('Default') p2 = p + scale_size(range=[3, 6]) + ggtitle('With Scale') w, h = 400, 300 bunch = GGBunch() bunch.add_plot(p1, 0, 0, w, h) bunch.add_plot(p2, w, 0, w, h) bunch p1 = p + ggtitle('Default') p2 = p + scale_size_area() + ggtitle('With Scale') w, h = 400, 300 bunch = GGBunch() bunch.add_plot(p1, 0, 0, w, h) bunch.add_plot(p2, w, 0, w, h) bunch0. Module importsimport os import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # visualization library %matplotlib inline1. Data LoadingTRAIN_PATH = os.path.join("..", "input", "train.csv") TEST_PATH = os.path.join("..", "input", "test.csv") train = pd.read_csv(TRAIN_PATH) test = pd.read_csv(TEST_PATH) train.head()Show the columns in the dataset 2. Data explorationtrain.describe()It seems that there are outliers in the "trip_duration"train.isnull().sum() plt.subplots(figsize=(11,9)) plt.title("destribution of trip_duration values") train["trip_duration"].plot.box() train.info() Index: 1458644 entries, id2875421 to id1209952 Data columns (total 10 columns): vendor_id 1458644 non-null int64 pickup_datetime 1458644 non-null object dropoff_datetime 1458644 non-null object passenger_count 1458644 non-null int64 pickup_longitude 1458644 non-null float64 pickup_latitude 1458644 non-null float64 dropoff_longitude 1458644 non-null float64 dropoff_latitude 1458644 non-null float64 store_and_fwd_flag 1458644 non-null object trip_duration 1458644 non-null int64 dtypes: float64(4), int64(3), object(3) memory usage: 122.4+ MB3. Data preprocessing I want to ignore some values that I consider to be outliers and that can bias my prediction.And remove the case we have 0 passengertrain = train[train['passenger_count']>= 1]we don't take 0 passengertrain = train[train['trip_duration']>= 90 ] train = train[train['trip_duration']<= 10800 ]Let's take only : the duration that are less than 3hours, 3*3600 = 10800sand more than 1min30, 90swe will ignore 2112 lines + 15799 lines = 17911 lineswe will ignore 1.23% of the total of our data set we remove outliers from altitude and longetudeplt.subplots(figsize=(11,9)) plt.title("destribution of pickup_longitude values") train["pickup_longitude"].plot.box() train = train.loc[train['pickup_longitude']> -80] plt.subplots(figsize=(11,9)) plt.title("destribution of pickup_latitude values") train["pickup_latitude"].plot.box() train = train.loc[train['pickup_latitude']< 44] plt.subplots(figsize=(11,9)) plt.title("destribution of dropoff_longitude values") train["dropoff_longitude"].plot.box() train = train.loc[train['dropoff_longitude']> -90] plt.subplots(figsize=(11,9)) plt.title("destribution of dropoff_latitude values") train["dropoff_latitude"].plot.box() train = train.loc[train['dropoff_latitude']> 34]4. Features engineering We convert string to datetrain['pickup_datetime'] = pd.to_datetime(train['pickup_datetime'], format='%Y-%m-%d %H:%M:%S') test['pickup_datetime'] = pd.to_datetime(test['pickup_datetime'], format='%Y-%m-%d %H:%M:%S')Creation of a new column hourtrain['hour'] = train.loc[:,'pickup_datetime'].dt.hour; test['hour'] = test.loc[:,'pickup_datetime'].dt.hour;Features engineeringtrain['dist'] = (abs(train['pickup_latitude']-train['dropoff_latitude']) + abs(train['pickup_longitude']-train['dropoff_longitude'])) test['dist'] = (abs(test['pickup_latitude']-test['dropoff_latitude']) + abs(test['pickup_longitude']-test['dropoff_longitude'])) y_train = train["trip_duration"] X_train = train[["vendor_id", "passenger_count", "pickup_longitude", "pickup_latitude", "dropoff_longitude","dropoff_latitude", "dist", "hour" ]]5. Choice of a validation methodfrom sklearn.model_selection import train_test_split X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, random_state=42)6. Selection of modelsfrom sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import SGDRegressor from sklearn.linear_model import LinearRegression from sklearn.model_selection import ShuffleSplit sgd = SGDRegressor() sgd.fit(X_train, y_train) rfr = RandomForestRegressor(n_estimators=10,min_samples_leaf=100, min_samples_split=150) rfr.fit(X_train, y_train) cv = ShuffleSplit(n_splits=4, test_size=0.8, random_state=42) cv_scores = cross_val_score(knn, X_train, y_train, cv=cv, scoring= 'neg_mean_squared_log_error') cv_scores7. Training of the modelm = RandomForestRegressor(n_estimators=10,min_samples_leaf=100, min_samples_split=150) m.fit(X_train, y_train)8. PredictionX_test = test[["vendor_id", "passenger_count","pickup_longitude", "pickup_latitude","dropoff_longitude","dropoff_latitude","dist", "hour"]] prediction = m.predict(X_test) prediction test.head()9. Prediction submissionsmy_submission = pd.DataFrame({'id': test.id, 'trip_duration': prediction}) my_submission.head() my_submission.to_csv('submission.csv', index=False)Information maximiser Using neural networks, sufficient statistics can be obtained from data by maximising the Fisher information.The neural network takes some data ${\bf d}$ and maps it to a compressed summary $\mathscr{f}:{\bf d}\to{\bf x}$ where ${\bf x}$ can have the same size as the dimensionality of the parameter space, rather than the data space.To train the neural network a batch of simulations ${\bf d}_{\sf sim}^{\sf fid}$ created at a fiducial parameter value $\boldsymbol{\theta}^{\rm fid}$ are compressed by the neural network to obtain ${\bf x}_{\sf sim}^{\sf fid}$. From this we can calculate the covariance ${\bf C_\mathscr{f}}$ of the compressed summaries. We also use simulations ${\bf d}_{\sf sim}^{\sf fid+}$ created above the fiducial parameter value $\boldsymbol{\theta}^{\sf fid+}$ and simulations ${\bf d}_{\sf sim}^{\sf fid-}$ created below the fiducial parameter value $\boldsymbol{\theta}^{\sf fid-}$. These are compressed using the network and used to find mean of the summaries $\partial\boldsymbol{\mu}_\mathscr{f}/\partial\theta_\alpha\equiv\boldsymbol{\mu}_\mathscr{f},_\alpha$ where the numerical derivative is $({\bf x}_{\sf sim}^{\sf fid+}-{\bf x}_{\sf sim}^{\sf fid-})/(\boldsymbol{\theta}^{\sf fid+}-\boldsymbol{\theta}^{\sf fid-})$. We then use ${\bf C}_\mathscr{f}$ and $\boldsymbol{\mu}_\mathscr{f},_\alpha$ to calculate the Fisher information$${\bf F}_{\alpha\beta} = \boldsymbol{\mu}_\mathscr{f},^T_{\alpha}{\bf C}^{-1}_\mathscr{f}\boldsymbol{\mu}_\mathscr{f},_{\beta}.$$We want to maximise the Fisher information so to train the network we use the loss function$$\Lambda = -\frac{1}{2}|{\bf F}_{\alpha\beta}|.$$ When using this code please cite arXiv:1802.03537.The code in the paper can be downloaded as v1 or v1.1 of the code kept on zenodo:[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1175196.svg)](https://doi.org/10.5281/zenodo.1175196)The code presented below is version two (and is much more powerful) and is under constant development. This code is run using>`python-3.6.6`>`jupyter-4.4.0`>`tensorflow-1.10.1`>`numpy-1.14.5`>`tqdm==4.25.0`>`sys (native)`Although these precise versions may not be necessary, I have put them here to avoid possible conflicts. Load modules%matplotlib inline import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import IMNN/Users/charnock/.pyenv/versions/3.6.6/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6 return f(*args, **kwds)Generate dataIn this example we are going to use $n_{\bf d}=10$ data points of a 1D field of Gaussian noise with zero mean and unknown variance to see if the network can learn to summarise this variance.We start by defining a function to generate the data with the correct shape. The shape must be```data_shape = None + input shape```input_shape = [10]It is useful to define the generating function so that it only takes in the value of the parameter as its input since the function can then be used for ABC later.The data needs to be generated at a fiducial parameter value and at perturbed values just below and above the fiducial parameter for the numerical derivative.θ_fid = 1. Δθpm = 0.1The data at the perturbed values should have the shape```perturbed_data_shape = None + number of parameters + input shape```The generating function is defined so that the fiducial parameter is passed as a list so that many simulations can be made at once. This is very useful for the ABC function later.def generate_data(θ, train = False): if train: return np.moveaxis(np.random.normal(0., np.sqrt(θ), [1] + input_shape + [len(θ)]), -1, 0) else: return np.moveaxis(np.random.normal(0., np.sqrt(θ), input_shape + [len(θ)]), -1, 0)Training dataEnough data needs to be made to approximate the covariance matrix of the output summaries. The number of simulations needed to approximate the covariance is `n_s`. If the data is particularly large then it might not be possible to pass all the data into active memory at once and so several the simulations can be split into batches.For example if we wanted to make 2000 simulations, but estimate the covariance using 1000 simulations at a timewe would set```pythonn_s = 1000n_train = 2```We're going to use 1000 simulations to approximate the covariance and use only 1 combinationn_s = 1000 n_train = 1The training data can now be madet = generate_data(θ = [θ_fid for i in range(n_train * n_s)], train = False)By suppressing the sample variance between the simulations created at the lower and upper parameter values for the numerical derivative, far fewer simulations are needed. We choose to use 5% of the total number of simulations.derivative_fraction = 0.05 n_p = int(n_s * derivative_fraction)The sample variance can be supressed by choosing the same initial seed when creating the upper and lower simulations.seed = np.random.randint(1e6) np.random.seed(seed) t_m = generate_data(θ = [θ_fid - Δθpm for i in range(n_train * n_p)], train = True) np.random.seed(seed) t_p = generate_data(θ = [θ_fid + Δθpm for i in range(n_train * n_p)], train = True) np.random.seed()We also need to get the denominator of the derivative which is given by the difference between the perturbed parameter values $$\frac{\partial}{\partial\theta} = \frac{1}{2\Delta\theta_{\pm}}.$$This needs to be done for every parameter and kept in a numpy array of shape `[number of parameters]`.derivative_denominator = 1. / (2. * Δθpm) der_den = np.array([derivative_denominator])The fiducial simulations and simulations for the derivative must be collected in a dictionary to be stored on the GPU or passed to the training function.data = {"x_central": t, "x_m": t_m, "x_p":t_p}Test dataWe should also make some test data, but here we will use only one combination `n_train = 1`. This needs adding to the dictionarytt = generate_data([θ_fid for i in range(n_s)]) seed = np.random.randint(1e6) np.random.seed(seed) tt_m = generate_data([θ_fid - Δθpm for i in range(n_p)], train = True) np.random.seed(seed) tt_p = generate_data([θ_fid + Δθpm for i in range(n_p)], train = True) np.random.seed() data["x_central_test"] = tt data["x_m_test"] = tt_m data["x_p_test"] = tt_pData visualisationWe can plot the data to see what it looks like.fig, ax = plt.subplots(1, 1, figsize = (10, 6)) ax.plot(data['x_central'][np.random.randint(n_train * n_s)], label = "training data") ax.plot(data['x_central_test'][np.random.randint(n_s)], label = "test data") ax.legend(frameon = False) ax.set_xlim([0, 9]) ax.set_xticks([]) ax.set_ylabel("Data amplitude");It is also very useful to plot the upper and lower derivatives to check that the sample variance is actually supressed since the network learns extremely slowly if this isn't done properly.fig, ax = plt.subplots(2, 1, figsize = (10, 10)) plt.subplots_adjust(hspace = 0) training_index = np.random.randint(n_train * n_p) ax[0].plot(data['x_m'][training_index, 0], label = "lower training data", color = 'C0') ax[0].plot(data['x_p'][training_index, 0], label = "upper training data", color = 'C0', linestyle = 'dashed') test_index = np.random.randint(n_p) ax[0].plot(data['x_m_test'][test_index, 0], label = "lower test data", color = 'C1') ax[0].plot(data['x_p_test'][test_index, 0], label = "upper test data", color = 'C1') ax[0].legend(frameon = False) ax[0].set_xlim([0, 9]) ax[0].set_xticks([]) ax[0].set_ylabel("Data amplitude") ax[1].axhline(xmin = 0., xmax = 1., y = 0., linestyle = 'dashed', color = 'black') ax[1].plot(data['x_p'][training_index, 0] - data['x_m'][training_index, 0], color = 'C0') ax[1].plot(data['x_p_test'][test_index, 0] - data['x_m_test'][test_index, 0], color = 'C1') ax[1].set_xlim([0, 9]) ax[1].set_xticks([]) ax[1].set_ylabel("Difference between derivative data amplitudes");Initiliase the neural network Define network parametersThe network works with a base set of parameters which are> `'verbose'` - `bool` - whether to print out diagnostics> `'number of simulations'` - `int` - the number of simulations to use in any one combination> `'differentiation fraction'` - `float` - a fraction of the simulations to use for the numerical derivative> `'fiducial θ'` - `array` - fiducial parameters in an array> `'derivative denominator'` - `array` - denominator of the numerical derivative for each parameter> `'number of summaries'` - `int` - number of summaries the network makes from the data> `'input shape'` - `int` or `list` - the number of inputs or the shape of the input if image-like input> `'preload data'` - `dict` or `None` - the training (and test) data to preload as a TensorFlow constant in a dictionary, no preloading is done if `None`> `'calculate MLE'` - `bool` - whether to calculate the maximum likelihood estimate> `'prebuild'` - `bool` - whether to get the network to build a network or to provided your own> `'save file'` - `string` - a file name to save the graph (not saved if wrong type or not given)```pythonparameters = { 'verbose': True, 'number of simulations': n_s, 'differentiation fraction': derivative_fraction, 'fiducial θ': np.array([θ_fid]), 'derivative denominator': der_den, 'number of summaries': 1, 'input shape': input_shape, 'preload data': data, 'calculate MLE': True, 'prebuild': False,}```The module can also build simple convolutional or dense networks (or a mixture of the two), which can be trigger by setting `'prebuild': True`. Several parameters are required to allow the network to build the network. These are> `'wv'` - `float` - the variance with which to initialise the weights. If this is 0 or less, the network will determine the weight variance according to He initialisation> `'bb'` - `float` - the constant value with which to initialise the biases> `'activation'` - `TensorFlow function` - a native tensorflow activation function> `'α'` - `float` or `int` - an additional parameter, if needed, for the tensorflow activation function> `'hidden layers'` - `list` - the architecture of the network. each element of the list is a hidden layer. A dense layer can be made using an integer where thet value indicates the number of neurons. A convolutional layer can be built by using a list where the first element is an integer where the number describes the number of filters, the second element is a list of the kernel size in the x and y directions, the third elemnet is a list of the strides in the x and y directions and the final element is string of 'SAME' or 'VALID' which describes the padding prescription.Here is an example of the IMNN which uses 1000 simulations per combination and 50 upper and 50 lower simulations per derivative for a model with one parameter where we require one summary which are preloaded as a TensorFlow constant where we want to have access to the precomputed maximum likelihood estimate. The module will build the network which takes in an input array of shape `[10]` and allows the network to decide the weight initialisation, initialises the biases at `bb = 0.1` and uses `tf.nn.leaky_relu` activation with a negative gradient parameter of `α = 0.01`. The network architecture has two fully connected hidden layers with 128 neurons in each layer. The graph is saved into a file in the `data` folder called `model.meta`.parameters = { 'verbose': True, 'number of simulations': n_s, 'fiducial θ': np.array([θ_fid]), 'derivative denominator': der_den, 'differentiation fraction': derivative_fraction, 'number of summaries': 1, 'calculate MLE': True, 'prebuild': True, 'input shape': input_shape, 'preload data': data, 'save file': "data/model", 'wv': 0., 'bb': 0.1, 'activation': tf.nn.leaky_relu, 'α': 0.01, 'hidden layers': [128, 128] }Self-defined networkA self defined network can be used instead of letting the module build the network for you. This function needs to take in two input tensors, the first is the shape of the input with `None` in the first axis and the second tensor is a tensorflow float (which will be the dropout). Since the weights need to be shared between several corresponding networks each set of trainable variables must be defined in its own scope. An example of the above network defined outside of the module is```pythondef network(input_tensor, dropout): with tf.variable_scope('layer_1'): weights = tf.get_variable("weights", [10, 128], initializer = tf.random_normal_initializer(0., 1.)) biases = tf.get_variable("biases", [128], initializer = tf.constant_initializer(0.1)) x = tf.matmul(input_tensor, weights) x = tf.add(x, biases) x = tf.nn.leaky_relu(x, 0.01) x = tf.nn.dropout(x, dropout) with tf.variable_scope('layer_2'): weights = tf.get_variable("weights", [128, 128], initializer = tf.random_normal_initializer(0., 1.)) biases = tf.get_variable("biases", [128], initializer = tf.constant_initializer(0.1)) x = tf.matmul(x, weights) x = tf.add(x, biases) x = tf.nn.leaky_relu(x, 0.01) x = tf.nn.dropout(x, dropout) x = tf.reshape(x, (-1, 300)) with tf.variable_scope('layer_3'): weights = tf.get_variable("weights", [128, 1], initializer = tf.random_normal_initializer(0., np.sqrt(2. / 300))) biases = tf.get_variable("biases", [1], initializer = tf.constant_initializer(0.1)) x = tf.matmul(x, weights) x = tf.add(x, biases) x = tf.nn.leaky_relu(x, 0.01) x = tf.nn.dropout(x, dropout) return x``` Initialise the networkn = IMNN.IMNN(parameters = parameters)saving model as data/model.meta network architecture is [[10], 128, 128, 1].Build the networkTo build the network a learning rate, η, must be defined.η = 1e-3The `setup(η)` function initialises the input tensors, builds the network and defines the optimisation scheme. If a self-defined network function (`network(a, b)`) has been constructed this can be passed to the setup function```pythonn.setup(η = η, network = network)```n.setup(η = η)Tensor("x:0", shape=(?, 10), dtype=float32) Tensor("IMNN/layer_1/dense_1/mul:0", shape=(?, 128), dtype=float32) Tensor("IMNN/layer_2/dense_2/mul:0", shape=(?, 128), dtype=float32) Tensor("IMNN/layer_3/LeakyRelu:0", shape=(?, 1), dtype=float32) Tensor("output:0", shape=(?, 1), dtype=float32) Tensor("GatherNd:0", shape=(1000, 10), dtype=float32) Tensor("IMNN_1/layer_1/dense_1/mul:0", shape=(1000, 128), dtype=float32) Tensor("IMNN_1/layer_2/dense_2/mul:0", shape=(1000, 128), dtype=float32) Tensor("IMNN_1/layer_3/LeakyRelu:0", shape=(1000, 1), dtype=float32) Tensor("Reshape:0", shape=(50, 10), dtype=float32) Tensor("IMNN_1/layer_1_1/dense_1/mul:0", shape=(50, 128), dtype=float32) Tensor("IMNN_1/layer_2_1/dense_2/mul:0", shape=(50, 128), dtype=float32) Tensor("IMNN_1/layer_3_1/LeakyRelu:0", shape=(50, 1), dtype=float32) Tensor("Reshape_1:0", shape=(50, 10), dtype=float32) Tensor("IMNN_1/layer_1_2/dense_1/mul:0", shape=(50, 128), dtype=float32) Tensor("IMNN_1/layer_2_2/dense_2/mul:0", shape=[...]Changing minimisation schemeBy default the optimation scheme is```pythonn.backpropagate = tf.train.GradientDescentOptimizer(η).minimize(n.Λ)```To use any other training scheme, such as the `Adam` optimiser, it is sufficient to run```pythonn.backpropagate = tf.train.AdamOptimizer(η, β1, β2, ε).minimize(n.Λ)``` after `setup(η)` to override the default minimisation routine. Note that testing with Adam optimiser has found it to be incredibly unstable. If you want to continue to use the default minimisation routine but want to change the learning rate without reinitialising you can run ```pythonn.training_scheme(η = new_η)``` Train the networkWith the data we can now train the network. The function simply takes the number of epochs, `num_epochs`, the fraction of neurons kept when using dropout `keep_rate`, and the denominator for the derivative calculated earlier, `der_den`.num_epochs = 500 keep_rate = 0.8If the data has not been preloaded as a TensorFlow constant then it can be passed to the train function```pythontrain_F, test_F = n.train(num_epochs = num_epochs, n_train = n_train, keep_rate = keep_rate, data = data)```We can runn.train(num_epochs = num_epochs, n_train = n_train, keep_rate = keep_rate, data = data, history = True)100%|██████████| 500/500 [00:13<00:00, 36.29it/s, detF=5.34, detF_test=4.2]The train function automatically collects a dictionary of history elements when `history = True`. When `history = False` the dictionary only contains> `'F'` - the Fisher information at each epoch> `'det(F)'` - the determinant of the Fisher informationWhen `history = True` then the dictionary also contains> `'Λ'` - the loss function of the training data> `'μ'` - the mean of the fiducial simulations> `'C'` - the covariance of the fiducial simulations> `'det(C)'` - the determinant of the fiducial simulations> `'dμdθ'` - the mean of the numerical derivative of the simulationsTest version of each of these quantities is also calculated if there is provided test data> `'test F'` - the Fisher information of the test data> `'det(test F)'` - the determinant of the Fisher information from the test data> `'Λ'` - the loss function of the test data> `'μ'` - the mean of the fiducial test simulations> `'C'` - the covariance of the fiducial test simulations> `'det(C)'` - the determinant of the fiducial test simulations> `'dμdθ'` - the mean of the numerical derivative of the test simulationsfig, ax = plt.subplots(5, 1, sharex = True, figsize = (8, 14)) plt.subplots_adjust(hspace = 0) end = len(n.history["det(F)"]) epochs = np.arange(end) a, = ax[0].plot(epochs, n.history["det(F)"], label = 'Training data') b, = ax[0].plot(epochs, n.history["det(test F)"], label = 'Test data') ax[0].legend(frameon = False) ax[0].set_ylabel(r'$|{\bf F}_{\alpha\beta}|$') ax[1].plot(epochs, n.history["Λ"]) ax[1].plot(epochs, n.history["test Λ"]) ax[1].set_xlabel('Number of epochs') ax[1].set_ylabel(r'$\Lambda$') ax[1].set_xlim([0, len(epochs)]); ax[2].plot(epochs, n.history["det(C)"]) ax[2].plot(epochs, n.history["det(test C)"]) ax[2].set_xlabel('Number of epochs') ax[2].set_ylabel(r'$|{\bf C}|$') ax[2].set_xlim([0, len(epochs)]); ax[3].plot(epochs, np.array(n.history["dμdθ"]).reshape((np.prod(np.array(n.history["dμdθ"]).shape)))) ax[3].plot(epochs, np.array(n.history["test dμdθ"]).reshape((np.prod(np.array(n.history["test dμdθ"]).shape)))) ax[3].set_ylabel(r'$\partial\mu/\partial\theta$') ax[3].set_xlabel('Number of epochs') ax[3].set_xlim([0, len(epochs)]) ax[4].plot(epochs, np.array(n.history["μ"]).reshape((np.prod(np.array(n.history["μ"]).shape)))) ax[4].plot(epochs, np.array(n.history["test μ"]).reshape((np.prod(np.array(n.history["test μ"]).shape)))) ax[4].set_ylabel('μ') ax[4].set_xlabel('Number of epochs') ax[4].set_xlim([0, len(epochs)]) print()We can see that the test loss deviates from the training loss. This is to be expected because there are will be a lot of correlation within a small training set which isn't in the test set. As long as the test loss doesn't start increasing then it is likely that the network is still working, with the maximum Fisher available from the network is the value obtained from the test set. Resetting the networkIf you need to reset the weights and biases for any reason then you can call```pythonn.reinitialise_session()``` Saving the networkIf you don't initialise the network with a save name you can save the network as a `TensorFlow` `meta` graph. For example saving the model in the directory `/.data` called `saved_model.meta` can be done using the function```pythonn.save_network(file_name = "data/saved_model", first_time = True)```If `save file` is passed with a correct file name when initialising the module then the initialised network will be saved by```pythonn.begin_session()```and then saved at the end of training. Loading the networkYou can load the network from a `TensorFlow` `meta` graph (from `/.data/saved_model.meta`) using the same parameter dictionary as used when originally training the network and then running```pythonn = IMNN.IMNN(parameters = parameters)n.restore_network()```Training can be continued after restoring the model - although the Adam optimiser might need to reacquaint itself. Approximate Bayesian computationWe can now do ABC (or PMC-ABC) with our calculated summary. First we generate some simulated real data:real_data = generate_data(θ = [1.], train = False)We can plot this real data to see what it looks like.fig, ax = plt.subplots(1, 1, figsize = (10, 6)) ax.plot(real_data[0], label = "real data") ax.legend(frameon = False) ax.set_xlim([0, 9]) ax.set_xticks([]) ax.set_ylabel("Data amplitude");ABCWe now perform ABC by drawing 100000 random samples from the prior. We define the upper and lower bounds of a uniform prior to be 0 and 10. Only a uniform prior is implemented at the moment. From the samples we create simulations at each parameter value and feed each simulation through the network to get summaries. The summaries are compared to the summary of the real data to find the distances which can be used to accept or reject points.Because the simulations are created within the ABC function then the generation function must be passed. This is why the generator should be of the form defined above, which takes only a list of parameter values and returns a simulation at each parameter.If the data is not preloaded as a TensorFlow constant then the data can be passed to the function as```pythonθ, summary, s, ρ, F = n.ABC(real_data = real_data, prior = [0, 10], draws = 100000, generate_simulation = generate_data, at_once = True, data = data)```Here we can useθ, summary, s, ρ, F = n.ABC(real_data = real_data, prior = [0, 10], draws = 100000, generate_simulation = generate_data, at_once = True, data = data)If the simulations are going to be too large to make all at once the `at_once` option can be set to false which will create one simulation at a time.```pythonθ, summary, s, ρ, F = n.ABC(real_data = real_data, der_den = der_den, prior = [0, 10], draws = 100000, generate_simulation = generate_data, at_once = False)``` Accept or rejectIn ABC draws are accepted if the distance between the simulation summary and the simulation of the real data are "close", i.e. smaller than some ϵ value, which is chosen somewhat arbitrarily.ϵ = 1 accept_indices = np.argwhere(ρ < ϵ)[:, 0] reject_indices = np.argwhere(ρ >= ϵ)[:, 0]Plot samplesWe can plot the output samples and the histogram of the accepted samples, which should peak around `θ = 1` (where we generated the real data). The monotonic function of all the output samples shows that the network has learned how to summarise the data.fig, ax = plt.subplots(2, 1, sharex = True, figsize = (10, 10)) plt.subplots_adjust(hspace = 0) ax[0].scatter(θ[accept_indices] , s[accept_indices, 0], s = 1) ax[0].scatter(θ[reject_indices], s[reject_indices, 0], s = 1, alpha = 0.1) ax[0].plot([0, 10], [summary[0], summary[0]], color = 'black', linestyle = 'dashed') ax[0].set_ylabel('Network output', labelpad = 0) ax[0].set_xlim([0, 10]) ax[1].hist(θ[accept_indices], np.linspace(0, 10, 100), histtype = u'step', density = True, linewidth = 1.5, color = '#9467bd'); ax[1].set_xlabel('$\\theta$') ax[1].set_ylabel('$\\mathcal{P}(\\theta|{\\bf d})$') ax[1].set_yticks([]);There can be a lot of $\theta$ draws which are unconstrained by the network because no similar structures were seen in the data which is indicative of using too small of a small training set. PMC-ABCPopulation Monte Carlo ABC is a way of reducing the number of draws by first sampling from a prior, accepting the closest 75% of the samples and weighting all the rest of the samples to create a new proposal distribution. The furthest 25% of the original samples are redrawn from the new proposal distribution. The furthest 25% of the simulation summaries are continually rejected and the proposal distribution updated until the number of draws needed accept all the 25% of the samples is much greater than this number of samples. This ratio is called the criterion. The inputs work in a very similar way to the `ABC` function above. If we want 1000 samples from the approximate distribution at the end of the PMC we need to set `num_keep = 1000`. The initial random draw (as in ABC above) initialises with `num_draws`, the larger this is the better proposal distribution will be on the first iteration.If the data is not preloaded as a TensorFlow constant then the data can be passed to the function as```pythonθ_, summary_, ρ_, s_, W, total_draws, F = n.PMC(real_data = real_data, prior = [0, 10], num_draws = 1000, num_keep = 1000, generate_simulation = generate_data, criterion = 0.1, data = data, at_once = True, samples = None)```Here we can useθ_, summary_, ρ_, s_, W, total_draws, F = n.PMC(real_data = real_data, prior = [0, 10], num_draws = 1000, num_keep = 1000, generate_simulation = generate_data, criterion = 0.1, at_once = True, samples = None, data = data)iteration = 14, current criterion = 0.09885330170027679, total draws = 36132, ϵ = 2.1938307881355286.If we want the PMC to continue for longer we can provide the output of PMC as an input as```pythonθ_, summary_, ρ_, s_, W, total_draws, F = n.PMC(real_data = real_data, der_den = der_den, prior = [0, 10], num_draws = 1000, num_keep = 1000, generate_simulation = generate_data, criterion = 0.01, data = data, at_once = True, samples = [θ_, summary_, ρ_, s_, W, total_draws, F])```Finally we can plot the accepted samples and plot their histogram.fig, ax = plt.subplots(2, 1, sharex = True, figsize = (10, 10)) plt.subplots_adjust(hspace = 0) ax[0].scatter(θ_ , s_, s = 1) ax[0].plot([0, 10], [summary[0], summary[0]], color = 'black', linestyle = 'dashed') ax[0].set_ylabel('Network output', labelpad = 0) ax[0].set_xlim([0, 10]) ax[0].set_ylim([np.min(s_), np.max(s_)]) ax[1].hist(θ_, np.linspace(0, 10, 100), histtype = u'step', density = True, linewidth = 1.5, color = '#9467bd'); ax[1].set_xlabel('θ') ax[1].set_ylabel('$\\mathcal{P}(\\theta|{\\bf d})$') ax[1].set_yticks([]);Maximum likelihood estimateWe can also calculate the first-order Gaussian approximation of the posterior on the parameter and find a maximum likelihood estimate.If the data is not preloaded as a TensorFlow constant then it can be passed using```pythonasymptotic_likelihood = n.asymptotic_likelihood(real_data = real_data, prior = np.linspace(0, 10, 1000).reshape((1, 1, 1000)), data = data)MLE = n.θ_MLE(real_data = real_data, data = data)```Here we will useasymptotic_likelihood = n.asymptotic_likelihood(real_data = real_data, prior = np.linspace(0, 10, 1000).reshape((1, 1, 1000)), data = data) MLE = n.θ_MLE(real_data = real_data, data = data) fig, ax = plt.subplots(1, 1, figsize = (10, 6)) ax.plot(np.linspace(0, 10, 1000), asymptotic_likelihood[0, 0], linewidth = 1.5) ax.axvline(x = MLE[0, 0], ymin = 0., ymax = 1., linestyle = 'dashed', color = 'black') ax.set_xlabel("θ") ax.set_xlim([0, 10]) ax.set_ylabel('$\\mathcal{P}(\\theta|{\\bf d})$') ax.set_yticks([]);Analytic posterior calculationWe know what the analytic posterior is for this model$$\mathcal{P}(\boldsymbol{\theta}|{\bf d}) = \frac{\displaystyle{\rm exp}\left[-\frac{1}{2\boldsymbol{\theta}}\sum_{i = 1}^{n_{\bf d}}d_i\right]}{(2\pi\boldsymbol{\theta})^{n_{\bf d}/2}}.$$We can there for plot this as a comparison.θ_grid = np.linspace(0.001, 10, 1000) analytic_posterior = np.exp(-0.5 * np.sum(real_data**2.) / θ_grid) / np.sqrt(2. * np.pi * θ_grid)**10. analytic_posterior = analytic_posterior / np.sum(analytic_posterior * (θ_grid[1] - θ_grid[0])) fig, ax = plt.subplots(1, 1, figsize = (10, 6)) ax.plot(θ_grid, analytic_posterior, linewidth = 1.5, color = 'C1', label = "Analytic posterior") ax.hist(θ_, np.linspace(0, 10, 100), histtype = u'step', density = True, linewidth = 1.5, color = '#9467bd', label = "PMC posterior"); ax.hist(θ[accept_indices], np.linspace(0, 10, 100), histtype = u'step', density = True, linewidth = 1.5, color = 'C2', label = "ABC posterior") ax.plot(np.linspace(0, 10, 1000), asymptotic_likelihood[0, 0], color = 'C0', linewidth = 1.5, label = "Asymptotic Gaussian likelihood") ax.axvline(x = MLE[0, 0], ymin = 0., ymax = 1., linestyle = 'dashed', color = 'black', label = "Maximum likelihood estimate") ax.legend(frameon = False) ax.set_xlim([0, 10]) ax.set_xlabel('θ') ax.set_ylabel('$\\mathcal{P}(\\theta|{\\bf d})$') ax.set_yticks([]);Introducción a Google Colaboratory Ejecución y edición de celdas  Celdas de código1 + 2Celdas de texto  Texto enriquericido  Imágenes o gifs ![link text](https://media.giphy.com/media/6aGXQxkohixag/giphy.gif) VariablesLínea de comandosInstalación de libreríasCiencia de datos Subir archivosLibrerías para Ciencia de Datos- 📊 **matplotlib**: Generación de gráficos a partir de listas o arrays.- 🧑‍💻 **numpy**: Cómputo científico para la manipulación de vectores.- 🧑‍💻 **pandas**: Manipulación y análisis de datos de tablas y series temporales.- 🧑‍💻 **scipy**: Herramientas y algoritmos matemáticos.- 📊 **seaborn**: Visualización de datos estadísticos.import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy as sc import seaborn as snsGráficas# load an example dataset from vega_datasets import data cars = data.cars() # plot the dataset, referencing dataframe column names import altair as alt alt.Chart(cars).mark_bar().encode( x=alt.X('Miles_per_Gallon', bin=True), y='count()', )Work with the Survey Manager Try it live View source on GitHub This notebook uses the ArcGIS API for Python. For more information, see the ArcGIS API for Python documentation and guides. As a survey owner, you may want to work with your ArcGIS Survey123 surveys via the ArcGIS API for Python to automate certain tasks, modify your surveys, or work with your surveys' data. This notebook demonstrates the different workflows for connecting to your surveys using the ArcGIS API for Python.The first way uses the form item's ID which is more precise and allows you to quickly get up and running with a specific survey. The second method shown performs a search against your content to identify form items. This method would be beneficial if you are looking to work with multiple surveys as a list. You can modify the search parameters to refine the list and only work with surveys that meet a specific criteria.import arcgis from arcgis.gis import GIS gis = GIS("home")Define a Survey ManagerNext, a Survey Manager is defined, a survey in the Survey Manager is a single instance of a survey project that contains the item information and properties and provides access to the underlying survey dataset. For more information on Survey Manager, see the API Reference for the ArcGIS API for Python.The code block below extracts all surveys in the Survey Manager as a list. The final statement prints the title for all surveys in the Survey Manager. The `'title'` property can be updated with any of the properties outlined in the Get survey by item section, below.survey_manager = arcgis.apps.survey123.SurveyManager(gis) surveys = survey_manager.surveys p = [print(s.properties['title']) for s in surveys]NIIT Reverse Geocode NIIT Pre Enumeration Survey schools Pre-Enumeration Survey Water Leak ReportGet a surveyThere are two ways you can retrieve a survey:1. Use the Survey Manager to get a survey using its item ID.2. Use the GIS Content Manager to search for a survey. Get survey by item IDIf one survey is of interest, you can use the `get` method to define a specific survey using the form's item ID. Using the `properties` method you can extract all, or specific properties associated with the form item.survey_by_id = survey_manager.get("9f01838a15594cfdbb2eb69fafb60d75") print(survey_by_id) print(survey_by_id.properties['title']) Water Quality InspectionGet survey by content searchThe code below uses the GIS Content Manager to search for a survey by owner. Returning the `forms` variable lists all the items identified (default is 10).forms = gis.content.search('type:form owner:NinjaGreen') formsUsing the `get` method you can extract a specific survey of interest using the index, and continue working with that object. The ninth result, index 8, is then extracted from the results and the properties are returned.survey_by_item = survey_manager.get(forms[8]) print(survey_by_item.properties){'id': '580747564c474cda835a918d0f97fa9a', 'owner': 'NinjaGreen', 'created': 1617153016000, 'isOrgItem': True, 'modified': 1617153016000, 'guid': None, 'name': 'Transmission_Tower.zip', 'title': 'Transmission Tower', 'type': 'Form', 'typeKeywords': ['Form', 'Survey123', 'Survey123 Connect', 'xForm'], 'description': 'Document transmission tower defects observed during routine inspections. This survey uses text, photos, sketches and annotation to capture details about defects.', 'tags': [], 'snippet': 'Record transmission tower defects', 'thumbnail': 'thumbnail/Transmission_Tower_Inspection.png', 'documentation': None, 'extent': [], 'categories': [], 'spatialReference': None, 'accessInformation': None, 'licenseInfo': None, 'culture': 'en-us', 'properties': None, 'url': None, 'proxyFilter': None, 'access': 'private', 'size': 103519, 'subInfo': 0, 'appCategories': [], 'industries': [], 'languages': [], 'largeThumbnail': None, 'banner': None, 'screenshots': [], 'listed': False, 'ownerFolder[...]Now that a survey has been identified from the Survey Manager let's do something with it. The code below creates a web map, specifies a basemap, and sets a default map scale. Using the survey identified previously, the survey's form item is obtained using the `get` method and its item ID. Next, the associated feature service is obtained and added to the web map. Finally, the web map with the survey data is displayed directly within the notebook.survey_webmap = gis.map('Maryland') survey_webmap.basemap = 'dark-gray' survey_webmap.zoom = 8 survey_service = gis.content.get(survey_by_item.properties['id']) survey_webmap.add_layer(survey_service.related_items('Survey2Service','forward')[0]) survey_webmap**Breast Cancer Prediction**\This project predicts if the tumor is malignant or benign based on the risk factors.import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np from google.colab import files uploaded = files.upload() #importing our cancer dataset dataset = pd.read_csv('cancer.csv',header=0) dataset.head() print("Cancer data set dimensions : {}".format(dataset.shape)) dataset.isnull().sum() dataset.isna().sum() dataset.drop("Unnamed: 32",axis=1,inplace=True) dataset.drop("id",axis=1,inplace=True) prediction_var = ['texture_mean','perimeter_mean','smoothness_mean','compactness_mean','symmetry_mean'] features_mean= list(dataset.columns[1:11]) features_se= list(dataset.columns[11:20]) features_worst=list(dataset.columns[21:31]) print(features_mean) print("-----------------------------------") print(features_se) print("------------------------------------") print(features_worst) #Encoding categorical data values dataset['diagnosis']=dataset['diagnosis'].map({'M':1,'B':0}) dataset.describe() sns.countplot(x=dataset['diagnosis'],label="Count") corr = dataset[features_mean].corr() # .corr is used for find corelation plt.figure(figsize=(14,14)) sns.heatmap(corr, cbar = True, square = True, annot=True, fmt= '.2f',annot_kws={'size': 15}, xticklabels= features_mean, yticklabels= features_mean, cmap= 'coolwarm') # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split train, test = train_test_split(dataset, test_size = 0.25, random_state = 1) X_train = train[prediction_var]# taking the training data input Y_train=train.diagnosis# This is output of our training data # same we have to do for test X_test= test[prediction_var] # taking test data inputs Y_test =test.diagnosis #output value of test dat #Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) #Using Logistic Regression Algorithm to the Training Set from sklearn.metrics import confusion_matrix from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) cm = confusion_matrix(Y_test, Y_pred) print("Logistic Regression\nConfusion matrix:") print(cm) print("Accuracy: ") print((cm[0,0]+cm[1,1])/(cm[0,0]+cm[0,1]+cm[1,1]+cm[1,1])) #Using KNeighborsClassifier Method of neighbors class to use Nearest Neighbor algorithm from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2) classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) cm = confusion_matrix(Y_test, Y_pred) print("\nK Neighbors Classifier\nConfusion matrix:") print(cm) print("Accuracy: ") print((cm[0,0]+cm[1,1])/(cm[0,0]+cm[0,1]+cm[1,1]+cm[1,1])) #Using SVC method of svm class to use Support Vector Machine Algorithm from sklearn.svm import SVC classifier = SVC(kernel = 'linear', random_state = 0) classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) cm = confusion_matrix(Y_test, Y_pred) print("\nSupport Vector Machine (linear)\nConfusion matrix:") print(cm) print("Accuracy: ") print((cm[0,0]+cm[1,1])/(cm[0,0]+cm[0,1]+cm[1,1]+cm[1,1])) #Using SVC method of svm class to use Kernel SVM Algorithm from sklearn.svm import SVC classifier = SVC(kernel = 'rbf', random_state = 0) classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) cm = confusion_matrix(Y_test, Y_pred) print("\nSupport Vector Machine (rbf)\nConfusion matrix:") print(cm) print("Accuracy: ") print((cm[0,0]+cm[1,1])/(cm[0,0]+cm[0,1]+cm[1,1]+cm[1,1])) #Using GaussianNB method of naïve_bayes class to use Naïve Bayes Algorithm from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) cm = confusion_matrix(Y_test, Y_pred) print("\nGaussian Naive Bayesian\nConfusion matrix:") print(cm) print("Accuracy: ") print((cm[0,0]+cm[1,1])/(cm[0,0]+cm[0,1]+cm[1,1]+cm[1,1])) #Using DecisionTreeClassifier of tree class to use Decision Tree Algorithm from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0) classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) cm = confusion_matrix(Y_test, Y_pred) print("\nDecision Tree Classifier\nConfusion matrix:") print(cm) print("Accuracy: ") print((cm[0,0]+cm[1,1])/(cm[0,0]+cm[0,1]+cm[1,1]+cm[1,1])) #Using RandomForestClassifier method of ensemble class to use Random Forest Classification algorithm from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0) classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) cm = confusion_matrix(Y_test, Y_pred) print("\nRandom Forest Classifier\nConfusion matrix:") print(cm) print("Accuracy: ") print((cm[0,0]+cm[1,1])/(cm[0,0]+cm[0,1]+cm[1,1]+cm[1,1]))Logistic Regression Confusion matrix: [[83 5] [ 8 47]] Accuracy: 0.7142857142857143 K Neighbors Classifier Confusion matrix: [[83 5] [ 8 47]] Accuracy: 0.7142857142857143 Support Vector Machine (linear) Confusion matrix: [[83 5] [ 7 48]] Accuracy: 0.7119565217391305 Support Vector Machine (rbf) Confusion matrix: [[84 4] [10 45]] Accuracy: 0.7247191011235955 Gaussian Naive Bayesian Confusion matrix: [[85 3] [ 8 47]] Accuracy: 0.7252747252747253 Decision Tree Classifier Confusion matrix: [[81 7] [10 45]] Accuracy: 0.7078651685393258 Random Forest Classifier Confusion matrix: [[84 4] [10 45]] Accuracy: 0.7247191011235955find pdb reference:refs = pdb_refs[pdb_refs.index.str.contains(fam.name)] refs.head() n_refs = refs.shape[0] print(n_refs) for i in range(n_refs): ref = refs.iloc[i] #print(ref) # pdb sequence #seq = msa[:,ref.seq] seq = msa[:,ref.seq+1] # change j-1 --> j #print(seq) gap_pos = seq == '-' seq_non_gap = seq[~gap_pos] #print(seq_non_gap.shape) #print(seq_non_gap) pdb_file = pdb_list.retrieve_pdb_file(ref.pdb_id, pdir=fam_dir, file_format='pdb') chain = pdb_parser.get_structure(ref.pdb_id, pdb_file)[0][ref.chain] coords = np.array([a.get_coord() for a in chain.get_atoms()]) #print(coords.shape) #print(coords) coords_cut = coords[ref.pdb_start-1:ref.pdb_end] #print(coords_cut.shape) print(seq_non_gap.shape[0]-coords_cut.shape[0])Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb1zdr.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb1zdr.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb3jwk.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb3s9u.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb3fl9.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb3fl8.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb4elf.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb4elf.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb4elh.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb4elf.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb4elg.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb3fl8.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb3s9u.ent' 0 Structure exists: '../protein_pfam/Pfam-A.full/PF00186/pdb3jw3.ent' 0 Struct[...]DatabaseRDMS(Relationnal database Manageament system) **open source**- MySQL(web and php)- PostgreSQL(entreprise level)- SQLITE(android app/ desktop app)**Proprietary**- MSSQL- Oracleimport sqlite3 #driver is being imported #psycopg2 for protsgeSQl #pymysql for mySQL conn= sqlite3.connect('example.sqlite3') #if the give nname of file exits it load s the file else it creates the file cur= conn.cursor() #like pointer cur.execute('CREATE TABLE countries(id integer, name text,iso3 text)') #creates a table with columns id name and iso3 cur.execute('SELECT * FROM countries') cur.fetchall() #data not entered in our table cur.execute('INSERT INTO countries(id,name,iso3)VALUES(1,"Nepal","NEP")') cur.execute('SELECT * FROM countries') cur.fetchall() sql='''INSERT INTO countries (id,name,iso3) VALUES (?,?,?)''' cur.executemany(sql,[(2,'India','INA'), (3,'Bhutan','BHU'), (4,'Afghanistan','AFG')]) cur.execute('SELECT * FROM countries') cur.fetchall() sql='''INSERT INTO countries (id,name,iso3) VALUES (4,'Pakistan','PAK')''' cur.execute(sql) cur.execute('SELECT * FROM countries') cur.fetchall() sql='UPDATE countries SET id=5 WHERE iso3= "PAK"' cur.execute(sql) cur.execute('SELECT * FROM countries') cur.fetchall() conn.commit() #to write in the database cur.execute('SELECT * FROM countries WHERE id=4') cur.fetchall() cur.execute('SELECT * FROM countries WHERE id>3') cur.fetchall() cur.execute('SELECT * FROM countries WHERE name LIKE "%an"') cur.fetchall() cur.execute('SELECT * FROM countries WHERE name LIKE "%an%"') cur.fetchall() # last ma ra 1st ma j bhaye ni huncha cur.execute('SELECT * FROM countries WHERE name LIKE "an%"') cur.fetchall() # must start with an cur.execute('DELETE FROM countries') cur.fetchall() import csv sql='INSERT INTO countries (id,name ,iso3) VALUES (?,?,?)' _id= 1 with open('netdata.txt','r') as datafile: csvfile=csv.DictReader(datafile) for row in csvfile: if row['Common Name'] and row['ISO 3166-1 3 Letter Code']: cur.execute(sql, (_id, row['Common Name'], row['ISO 3166-1 3 Letter Code'])) _id+=1 conn.commit() cur.execute('SELECT * FROM countries') cur.fetchall() cur.execute('DELETE FROM country_list') cur.fetchall() sql= '''CREATE TABLE country_list (id integer primary key autoincrement, country_name text not null, iso3 text not null unique)''' cur.execute(sql) sql='INSERT INTO country_list (country_name ,iso3) VALUES (?,?)' with open('netdata.txt','r') as datafile: csvfile=csv.DictReader(datafile) for row in csvfile: if row['Common Name'] and row['Formal Name']: cur.execute(sql, (row['Common Name'], row['Formal Name'])) conn.commit() cur.execute('SELECT * FROM country_list') cur.fetchall() sql='''INSERT INTO country_list (id,country_name,iso3) VALUES (595, 'Reunion', 'Overseas Region of Reunion')''' cur.execute(sql)IntroductionIn this example we create a classifier to recognize handwritten digits from the famous mnist dataset, using a convolutional neural network. Typically the training data is referred to as **X**, the training lables as **y**, and the test data and labesl as **X_test** and **y_test** respectively. Let's load the train/test data, and show one of the images with the help of the excellent matplotlib library:import keras.datasets as datasets (X, y), (X_test, y_test) = datasets.mnist.load_data() digit = X[2] plt.imshow(digit, cmap=plt.cm.binary)Using TensorFlow backend.Let's now define the network using convolutional layers. In this instance we used 3 convolutional layers with max pooling layers in between:from keras import models from keras import layers net = models.Sequential() net.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) net.add(layers.MaxPool2D(2, 2)) net.add(layers.Conv2D(64, (3, 3), activation='relu')) net.add(layers.MaxPool2D(2, 2)) net.add(layers.Conv2D(64, (3, 3), activation='relu'))In order to predict the number each image represents we use a couple of dense layers, but before we need to flatten the matrix coming from the last convolutional layer into a vector:net.add(layers.Flatten()) net.add(layers.Dense(64, activation='relu')) net.add(layers.Dense(10, activation='softmax'))Finally let's compile the network with an appropriate optimizer, loss and performance metric:net.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])And we are ready to train a model, but before we need to reshape and normalize the images, and apply **one-hot-encoding** to the labels.from keras.utils import to_categorical # Reshape and normalize the images X = X.reshape((len(X), 28, 28, 1)).astype('float32') / 255 X_test = X_test.reshape((len(X_test), 28, 28, 1)).astype('float32') / 255 # on-hot-encode the labels y = to_categorical(y) y_test = to_categorical(y_test) net.fit(X, y, epochs=5, batch_size=64)Epoch 1/5 60000/60000 [==============================] - 29s 484us/step - loss: 0.1769 - acc: 0.9445 Epoch 2/5 60000/60000 [==============================] - 29s 483us/step - loss: 0.0497 - acc: 0.9847 Epoch 3/5 60000/60000 [==============================] - 29s 481us/step - loss: 0.0337 - acc: 0.9894 Epoch 4/5 60000/60000 [==============================] - 29s 483us/step - loss: 0.0260 - acc: 0.9919 Epoch 5/5 60000/60000 [==============================] - 29s 484us/step - loss: 0.0210 - acc: 0.9934Let's see how our model performs on the test dataset.net.evaluate(X_test, y_test)10000/10000 [==============================] - 2s 173us/stepEDA on covid-19## importing all the libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline ##READING THE TOP 5 DATA FROM THE DATASET (df) df=pd.read_csv("covid_19_india.csv") df.head()Information about the datasetdf.info() df.describe() ###checking is there any null values df.isnull() ##Showing is there any null values using graph sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap="YlGnBu")Seaborn is a very powerful data visualizing librarysns.pairplot(df)Gives the no. of death cases in Indiasns.countplot(x="Deaths",data=df)Gives the no. of confirmed cases in India with respect to datasns.boxplot("Confirmed","Date",data=df) sns.violinplot("Confirmed","Date",data=df) ##GIVES THE RELATIONSHIP B/W CURED AND DEATH CASES IN INDIA sns.jointplot("Cured","Deaths",data=df)It shows that the no. of cured people is much higher than the death one.df_age=pd.read_csv("AgeGroupDetails.csv") df_age.head()Showing no. of total cases of different age groupsns.countplot(x="TotalCases",hue="AgeGroup",data=df_age) plt.bar(df_age["TotalCases"],height=2)Starbucks Capstone Challenge IntroductionThis data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. Not all users receive the same offer, and that is the challenge to solve with this data set.Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. ExampleTo give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. CleaningThis makes data cleaning especially important and tricky.You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers. Final AdviceBecause this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A). Data SetsThe data is contained in three files:* portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)* profile.json - demographic data for each customer* transcript.json - records for transactions, offers received, offers viewed, and offers completedHere is the schema and explanation of each variable in the files:**portfolio.json*** id (string) - offer id* offer_type (string) - type of offer ie BOGO, discount, informational* difficulty (int) - minimum required spend to complete an offer* reward (int) - reward given for completing an offer* duration (int) - time for offer to be open, in days* channels (list of strings)**profile.json*** age (int) - age of the customer * became_member_on (int) - date when customer created an app account* gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)* id (str) - customer id* income (float) - customer's income**transcript.json*** event (str) - record description (ie transaction, offer received, offer viewed, etc.)* person (str) - customer id* time (int) - time in hours since start of test. The data begins at time t=0* value - (dict of strings) - either an offer id or transaction amount depending on the record**Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook. You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal:Then you will want to run the above command:Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors. Problem Statement In this project I will determine how likely is a customer to complete an offer. The end goal is:1. To determine, does sending more offers lead to a higher completion rate.2. Customers with lower completion rate should be sent offers or not Exploratory Data Analysis Read Data Files#######Run This import pandas as pd import numpy as np import math import json import os %matplotlib inline portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True) profile = pd.read_json('data/profile.json', orient='records', lines=True) transcript = pd.read_json('data/transcript.json', orient='records', lines=True)Save The Data Filesif not os.path.isdir('explore'): os.makedirs('explore') def data_info(data, filename): path = os.path.join('explore', filename) if not os.path.isfile(path): pd.DataFrame(data).to_csv(path) print(data.shape) data_info(portfolio, 'portfolio.csv') data_info(profile, 'profile.csv') data_info(transcript, 'transcript.csv')(10, 6) (17000, 5) (306534, 4)Clean PortfolioBy looking at the portfolio file we can see that the channels column in grouped, so we'll use sklearn's MultiLabelBinarizer to unpack the channel column and then remove it from the DataFramefrom sklearn.preprocessing import MultiLabelBinarizer cleaned_portfolio = portfolio.copy() cleaned_portfolio.rename(columns={'id':'offer_id'}, inplace=True) s = cleaned_portfolio['channels'] mlb = MultiLabelBinarizer() channels = pd.DataFrame(mlb.fit_transform(s),columns=mlb.classes_, index=cleaned_portfolio.index) cleaned_portfolio = cleaned_portfolio.join(channels) cleaned_portfolio.drop(['channels'], axis=1, inplace=True) cleaned_portfolioClean ProfileBy looking at the profile data we can see that there are missing age values, we also observe that the people with missing age values have missing income values. Therefore for now we'll remove all the rows with NaN values(~2000), deduce inference and then later combine with missing values and compare results#profile['became_member_on'] = pd.to_datetime(profile['became_member_on'], format='%Y%m%d') profile.rename(columns={"id":"person"}, inplace=True) undefined_group = None cleaned_profile = None #cleaning profile and dividing it into cleaned_profile and undefined_group undefined_group = profile.copy() undefined_group['gender'] = undefined_group['gender'].fillna('U') undefined_group = undefined_group.loc[undefined_group['gender'] == 'U'].reset_index(drop=True) cleaned_profile = profile.dropna().reset_index(drop=True) cleaned_profileClean TranscriptFrom the transcript we can see that the column values has 2 values i.e it has an offer id or amount spent for that transaction, we'll split the value in 2 columns offer_id, amount and then drop the value columncleaned_transcript = transcript.copy() value = cleaned_transcript['value'] cleaned_transcript['amount'] = [int(i['amount']) if i.get('amount') else 0 for i in value] cleaned_transcript['offer_id'] = [i['offer_id'] if i.get('offer_id') else (i['offer id'] if i.get('offer id') else '0') for i in value] cleaned_transcript.drop(['value'], axis=1, inplace=True) #drop the profile which have no gender or income cleaned_transcript = cleaned_transcript[~cleaned_transcript.person.isin(undefined_group.person)] sort_df = cleaned_transcript.sort_values(by=['person', 'time']) sort_dfGet Datamethod: get_valid_dataparams: df {df is the set of all the events for a person, lets say offer received, viewed or completed}---- The idea is to parse a set of transaction entries for a person and then divide it into offer received, offer viewed and offer completed---- Then create a new column ['g'] which stores the cumalative count for every entry lets say offer id 'a' offered twice then the corresponding g column will store the count something like this: offer_id g a 0 a 1The idea behind g is that it will help us merge elements on [person, offer_id] and will prevent duplicatesdef get_valid_data(df): offer_received = df.loc[df['event'] == 'offer received'].reset_index(drop=True) offer_viewed = df.loc[df['event'] == 'offer viewed'].reset_index(drop=True) offer_completed = df.loc[df['event'] == 'offer completed'].reset_index(drop=True) offer_received['g'] = offer_received.groupby('offer_id').cumcount() offer_viewed['g'] = offer_viewed.groupby('offer_id').cumcount() offer_completed['g'] = offer_completed.groupby('offer_id').cumcount() res = pd.merge(offer_received, offer_viewed, on=['person', 'offer_id', 'g'], how='outer') res = pd.merge(res, offer_completed, on=['person', 'offer_id', 'g'], how='outer') return res offers_completed = sort_df.groupby('person').apply(lambda x: get_valid_data(x)) offers_completed = offers_completed.dropna() offers_completed = offers_completed.reset_index(drop=True) offers_completedCombine Portfolio with the offers completed for every entry method: valid_offer_completedparameter: df {offers completed}, cleaned_portfolio {cleaned_portfolio- information about every customer like age, income} Functions1. Drop columns like amount_x, amount_y since they only have value 0 and theh drop event like offer received etc2. Merge cleaned_portfolio[offer_type, duration] to df on offer_id 3. Drop the columns where a user have completed and offer before and viewed it later i.e keep only those where time_y <= timedef valid_offer_completed(df, cleaned_portfolio): df = df.rename(columns={"offer_id_x":"offer_id"}) offers = cleaned_portfolio[['offer_id', 'offer_type', 'duration']] df = df.merge(offers,how='left', on='offer_id') df = df.drop(['amount_x', 'amount_y', 'amount', 'event_x', 'event_y', 'event', 'g'], axis=1).reset_index(drop=True) df = df[['person','offer_id','time_x','time_y', 'time', 'offer_type', 'duration']] df = df[(df.time_x <= df.time_y) & (df.time_y <= df.time)] return df valid = valid_offer_completed(offers_completed, cleaned_portfolio) valid = valid.reset_index(drop=True) validFind Informational OffersInformational offers do not have any offer completed record so we need to find the offer_completed time because we need to combine then with the valid dataframe later onso we'll caluate the offfer completed based on the duration of the information offerdef info_offer(df): offer_received = df.loc[df['event'] == 'offer received'].reset_index(drop=True) offer_viewed = df.loc[df['event'] == 'offer viewed'].reset_index(drop=True) offer_received['g'] = offer_received.groupby('offer_id').cumcount() offer_viewed['g'] = offer_viewed.groupby('offer_id').cumcount() res = pd.merge(offer_received, offer_viewed, on=['person', 'offer_id', 'g'], how='outer') offers = cleaned_portfolio[['offer_id', 'offer_type', 'duration']] res = res.merge(offers,how='left', on='offer_id') res['time'] = res['time_x'] + res['duration'] * 24 res = res.dropna() res = res[res.time_x <= res.time_y] res['response'] = np.where(res.time_y > res.time , 0, 1) res = res.loc[res.response == 1] res = res.drop(['response', 'amount_x', 'amount_y', 'event_x', 'event_y', 'g'], axis=1).reset_index(drop=True) res = res[['person','offer_id','time_x','time_y', 'time', 'offer_type', 'duration']] return res info_df = sort_df[sort_df['offer_id'].isin(['3f207df678b143eea3cee63160fa8bed', '5a8bc65990b245e5a138643cd4eb9837'])] info_data = info_df.groupby('person').apply(lambda x: info_offer(x)) info_data =info_data.reset_index(drop=True) info_dataCombine the valid and information dataframescomplete = pd.concat([valid, info_data], ignore_index=True, sort=False) completeFill Profile method: fill_profileparams: gd {Grouped data is the grouped data which includes all the transction record per person}, df {df is the customer portfolio}1. Find the number of valid offers completed2. Append the total offers completed for every person in th customer portfoliodf = None def fill_profile(gd, df): grouped_data = gd.groupby(['person']) invalid = [] for index, row in df.iterrows(): if row['person'] in grouped_data.groups.keys(): offers = grouped_data.get_group(row['person'])['offer_type'].value_counts() df.at[index, 'offers completed'] = offers.sum() for offer, count in offers.items(): df.at[index, offer] = count else: invalid.append(row['person']) print(len(invalid)) df = df.fillna(0) return df df = fill_profile(complete, cleaned_profile) df2133Find Datamethod: find_dataparameters: gd {gd is the grouped data which includes all the transction record per person}, df {df is the customer portfolio}1. Find the total number of offers received for every customer from the original transcript not the updated one2. Calculate the completion rate3. Append the new details in the customer portfiolio dataframe for each userdef find_data(gd, df): gd = gd[(gd.event == 'offer received')].reset_index(drop=True) grouped_data = gd.groupby(['person']) for index, row in df.iterrows(): if row['person'] in grouped_data.groups.keys(): events = grouped_data.get_group(row['person'])['event'].count() df.at[index, 'offers received'] = events df.at[index, 'completion rate'] = row['offers completed'] * 100 / events return df df = find_data(sort_df, df) dfFind amount1. Find the total amount spent by each userdef find_amount(df): amount = pd.DataFrame() values = df.groupby(['person']).sum() amount['person'] = values.index amount['total amount'] = values.amount.to_numpy() return amount total_amount = find_amount(cleaned_transcript) df = df.merge(total_amount, on='person') df = df.reset_index(drop=True) ########### Convert gender to M-0/F-1/O-2 df['gender'] = df['gender'].map({'M': 0, 'F': 1, 'O': 2}) df = df.fillna(0) df data_info(df, 'complete_profile_with_missing_values.csv') data_info(complete, 'transcript_with_missing_values.csv')(14825, 12) (30956, 7)Data Visualization Visualising the Data in 1D Spaceimport matplotlib.pyplot as plt import matplotlib complete.hist(bins=15, color='steelblue', edgecolor='black', linewidth=1.0, xlabelsize=8, ylabelsize=8, grid=False) plt.tight_layout(rect=(0, 0, 1.5, 1.5)) df.hist(bins=15, color='steelblue', edgecolor='black', linewidth=2.0, xlabelsize=20, ylabelsize=20, grid=False) plt.tight_layout(rect=(0, 0, 5.8, 10)) import seaborn as sns f, ax = plt.subplots(figsize=(12, 8)) corr = df.corr() hm = sns.heatmap(round(corr,2), annot=True, ax=ax, cmap="coolwarm",fmt='.2f', linewidths=.05) f.subplots_adjust(top=0.93) t= f.suptitle('Profile Attributes Correlation Heatmap', fontsize=14) plt.figure(figsize=(15,4)) plt.plot(df['completion rate'].value_counts().sort_index())Unsupervised LearningWe will use 2 unsuoervised learning algorithms to check if our data is actually seprable in approximately 5 clustersThe reason being if we get good number of clusters(4 or 5) then we can go and label all the data points according to our logic for likeliness (will be discussed later)df.index = df['person'] df = df.drop(['person'], axis = 1)Normalizing the data Using sklearn's Min Max Scaler we will normalize the data in the range of 0 to 1 so that it becomes easier to work with supervised or unsupervised algorithmsfrom sklearn.preprocessing import MinMaxScaler def normalize_data(df): scaler = MinMaxScaler() df_scaled = pd.DataFrame(scaler.fit_transform(df.astype(float))) df_scaled.columns = df.columns df_scaled.index = df.index return df_scaled df_scaled = normalize_data(df) df_scaledAgglomerative Clustering1. Plot the dendogram2. Based on the dendogram detemine the distance threshold3. Use the distance threshold to find the number of clusters4. Check the distribution of clustersimport matplotlib.pyplot as plt from hcluster import pdist, linkage, dendrogram X = df_scaled.T.values #Transpose values Y = pdist(X) Z = linkage(Y) plt.subplots(figsize=(18,5)) dendrogram(Z, labels = df_scaled.columns)from the dendogram gram graph we cab determine the distance threshold i.e. the line from where we can cut the graph is about 40 on the y axisfrom sklearn.cluster import AgglomerativeClustering cluster = AgglomerativeClustering(n_clusters=None, affinity='euclidean', linkage='ward', distance_threshold=40) agg_clusters = np.array(cluster.fit_predict(df_scaled)) unique, counts = np.unique(agg_clusters, return_counts=True) dict(zip(unique, counts))K-means Clustering 1. Apply kmeans and find out the optimal number of clusters using the elbow method 2. Analyse the no of clusters formed and select the one where the clusters are equally distributedfrom sklearn.cluster import KMeans from sklearn import metrics from scipy.spatial.distance import cdist import numpy as np import matplotlib.pyplot as plt # create new plot and data plt.plot() X = df_scaled # k means determine k distortions = [] for k in range(1,10): km = KMeans(n_clusters=k, n_init=30) km.fit(X) wcss = km.inertia_ km_clusters = km.predict(X) unique, counts = np.unique(km_clusters, return_counts=True) print("Cluster ", k, dict(zip(unique, counts))) distortions.append(wcss) # Plot the elbow plt.plot(range(1,10), distortions, 'bx-') plt.xlabel('k') plt.ylabel('Distortion') plt.title('The Elbow Method showing the optimal k') plt.show()Cluster 1 {0: 14825} Cluster 2 {0: 7533, 1: 7292} Cluster 3 {0: 3638, 1: 5816, 2: 5371} Cluster 4 {0: 3948, 1: 3327, 2: 4534, 3: 3016} Cluster 5 {0: 3125, 1: 3102, 2: 1708, 3: 2957, 4: 3933} Cluster 6 {0: 2666, 1: 1921, 2: 2832, 3: 3749, 4: 1633, 5: 2024} Cluster 7 {0: 1967, 1: 2151, 2: 2936, 3: 1338, 4: 1903, 5: 1855, 6: 2675} Cluster 8 {0: 1661, 1: 2134, 2: 1250, 3: 1389, 4: 1871, 5: 2934, 6: 1731, 7: 1855} Cluster 9 {0: 1569, 1: 1740, 2: 2703, 3: 1220, 4: 1171, 5: 1605, 6: 1388, 7: 1785, 8: 1644}from the k-means clustering we can see that for cluster 4 and cluster 5, and also from the elbow graph we can see that around 5 clusters are suitablefrom sklearn.cluster import KMeans kmeans = KMeans(n_clusters=5) kmeans.fit(df_scaled) labels = kmeans.predict(df_scaled) centroids = kmeans.cluster_centers_ unique, counts = np.unique(labels, return_counts=True) print("Cluster 5",dict(zip(unique, counts)))Cluster 5 {0: 1711, 1: 3937, 2: 3105, 3: 3117, 4: 2955}Unsupervised Learning Algorithm ResultsAgglomerative clustering gives really bad results because of the variability in our dataset, whereas k-means gives an average result. We can also see that the elbow graph is not well formed but we do get an idea of the separablity in our dataset. Thereforw we will now use Supervised Learning Algorithms to properly label our data Supervised Learning for Multi-Label Classification Determining the likelinesslogic of determining the likeliness is column*weight,I am determining the likeliness using offer completion rate, and the the offer types i.e. bogo, informational and discount. We assign the completion rate a weightage of 3 and offer type a weightage of 1. We noramlize the dataframe so that all the values in the 3 offer type columns are in the range and therefore the logic begind weightage can be applied. total weights = 3(completion rate) + 1(offer types) = 4score = { (bogo + informational + discount)/3 + completion_rate*3} / total_weightLabel 4 - Very Likely (score>= 80)Label 3 - Likely (score>= 60)Label 2 - Neutral(50% chance) (score>= 40)Label 1 - Unlikely(score>= 20)Label 0 - Very Unlikely (score < 20)def calculate_likeliness(rate): if rate >= 80 and rate <= 100: return 4 elif rate >= 60 and rate < 80: return 3 elif rate >= 40 and rate < 60: return 2 elif rate >= 20 and rate < 40: return 1 else: return 0 def likelihood(row): completion_rate= row[9] discount = row[7] informational = row[6] bogo = row[5] rate = ((discount + informational + bogo)/3 + completion_rate*3)/4 return calculate_likeliness(rate * 100) df_scaled.apply(lambda x: likelihood(x), axis=1) df_scaled['likeliness'] = df_scaled.apply(lambda x: likelihood(x), axis=1) df_scaled ## we can see that the data is well distributed df_scaled.likeliness.value_counts() # drop the columns used for determing the likeliness, so that our supervised learning model is not able to cheat df_scaled = df_scaled.drop(['bogo', 'informational', 'discount', 'completion rate'], axis=1) df_scaled from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score mat = df_scaled.values mat = df_scaled.values X = mat[:,0:7] Y = mat[:,7] seed = 7 test_size = 0.33 X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed) def find_accuracy(y_test, y_pred): accuracy = accuracy_score(y_test, y_pred) print("Accuracy: %.2f%%" % (accuracy * 100.0)) from sklearn.svm import LinearSVC clf = LinearSVC(random_state=0, tol=1e-5) clf.fit(X_train, y_train) # print(clf.coef_) # print(clf.intercept_) y_pred = clf.predict(X_test) find_accuracy(y_test, y_pred) from sklearn.svm import SVC svclassifier = SVC(kernel='rbf') svclassifier.fit(X_train, y_train) y_pred = svclassifier.predict(X_test) find_accuracy(y_test, y_pred) from sklearn.svm import SVC svclassifier = SVC(kernel='poly') svclassifier.fit(X_train, y_train) y_pred = svclassifier.predict(X_test) find_accuracy(y_test, y_pred) from sklearn.svm import SVC svclassifier = SVC(kernel='linear') svclassifier.fit(X_train, y_train) y_pred = svclassifier.predict(X_test) find_accuracy(y_test, y_pred) from sklearn.ensemble import RandomForestClassifier rf_classifier = RandomForestClassifier() rf_classifier.fit(X_train, y_train) rf_pred = rf_classifier.predict(X_test) find_accuracy(y_test, rf_pred)Accuracy: 98.75%Benchmark ModelXgboost Algorithm is our benchmark model because it performs tasks like multilabel classifcation with ease and with very High accuracyfrom xgboost import XGBClassifier model = XGBClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) predictions = [round(value) for value in y_pred] find_accuracy(y_test, y_pred)Accuracy: 100.00%Evaluating the Model Now we will evaluate our random forest modelfrom sklearn import model_selection from sklearn.metrics import mean_absolute_error, mean_squared_error from math import sqrt from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, rf_pred)) print(classification_report(y_test, rf_pred)) kfold = model_selection.KFold(n_splits=10, random_state=seed) scoring = 'accuracy' results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=scoring) print('Accuracy -val set: %.2f%% (%.2f)' % (results.mean()*100, results.std())) print("MAE test score:", mean_absolute_error(y_test, rf_pred)) print("RMSE test score:", sqrt(mean_squared_error(y_test, rf_pred)))[[1164 0 0 0 0] [ 0 1213 1 0 0] [ 0 0 1304 1 0] [ 0 4 25 713 2] [ 0 0 3 25 438]] precision recall f1-score support 0.0 1.00 1.00 1.00 1164 1.0 1.00 1.00 1.00 1214 2.0 0.98 1.00 0.99 1305 3.0 0.96 0.96 0.96 744 4.0 1.00 0.94 0.97 466 accuracy 0.99 4893 macro avg 0.99 0.98 0.98 4893 weighted avg 0.99 0.99 0.99 4893 Accuracy -val set: 100.00% (0.00) MAE test score: 0.01389740445534437 RMSE test score: 0.12945514583973036We can observe that our model performs really well and it has accuracy nearly as our benchmark model with really low MAE and RMSE Scores Now solving the same problem by filling the missing valuesundefined = None income_max = cleaned_profile.income.describe().max() undefined_group = undefined_group.fillna(income_max) # undefined = pd.concat([undefined_group, cleaned_profile], ignore_index=True) # undefined complete_transcript = transcript.copy() value = complete_transcript['value'] complete_transcript['amount'] = [int(i['amount']) if i.get('amount') else 0 for i in value] complete_transcript['offer_id'] = [i['offer_id'] if i.get('offer_id') else (i['offer id'] if i.get('offer id') else '0') for i in value] complete_transcript.drop(['value'], axis=1, inplace=True) sort_df = complete_transcript.sort_values(by=['person', 'time']) sort_df users = sort_df.groupby('person').apply(lambda x: get_valid_data(x)) users = users.dropna() users = users.reset_index(drop=True) users valid_df = valid_offer_completed(users, cleaned_portfolio) valid_df complete_info = sort_df[sort_df['offer_id'].isin(['3f207df678b143eea3cee63160fa8bed', '5a8bc65990b245e5a138643cd4eb9837'])] complete_info_data = complete_info.groupby('person').apply(lambda x: info_offer(x)) complete_info_data =complete_info_data.reset_index(drop=True) complete_info_data complete_df = pd.concat([valid_df, complete_info_data], ignore_index=True) complete_df full_profile = profile.copy() full_profile['gender'] = full_profile['gender'].fillna('U') full_profile['income'] = full_profile['income'].fillna(income_max) full_profile df2 = fill_profile(complete_df, full_profile) df2 = find_data(sort_df, df2) df2 = df2.fillna(0) df2 total_amount = find_amount(complete_transcript) df2 = df2.merge(total_amount, on='person') df2 = df2.reset_index(drop=True) df2.index = df2['person'] df2 = df2.drop(['person'], axis = 1) ########### Convert gender to 0/1/2 df2['gender'] = df2['gender'].map({'M': 0, 'F': 1, 'O': 2, 'U':3}) df2_scaled = normalize_data(df2) df2_scaled data_info(df2, 'complete_profile.csv') data_info(complete_df, 'complete_transcript.csv') df2_scaled.apply(lambda x: likelihood(x), axis=1) df2_scaled['likeliness'] = df2_scaled.apply(lambda x: likelihood(x), axis=1) df2_scaled df2_scaled = df2_scaled.drop(['bogo', 'informational', 'discount', 'completion rate'], axis=1) df2_scaled mat2 = df2_scaled.values X2 = mat2[:,0:7] Y2 = mat2[:,7] seed = 7 test_size = 0.20 X2_train, X2_test, y2_train, y2_test = train_test_split(X2, Y2, test_size=test_size, random_state=seed) ##Random Forest rf2_classifier = RandomForestClassifier() rf2_classifier.fit(X2_train, y2_train) rf2_pred = rf2_classifier.predict(X2_test) find_accuracy(y2_test, rf2_pred) ## XGBoost Algorithm model2 = XGBClassifier() model2.fit(X2_train, y2_train) y2_pred = model2.predict(X2_test) predictions2 = [round(value) for value in y2_pred] find_accuracy(y2_test, predictions2)Accuracy: 100.00%Conculsion In the project I have tried to determine how likely is a user complete an offer. I have used some data visualition to explain some realtion in the data. Then I have used unsupervised learning techniques to determine how seprable the data is and if we are actually able to divide the data in like 5 clusters. Then I determined the likeliness of every data point and removed the columns used to calculate the likeliness. This means that our supervised leaning model will not be able to deduce any inference in determining the likeliness. Then I split the data into training and test set and passed it to several SVM models with different kernel. One thing, I observed that tree models like Random Forest or XGBoost Algorithm perform really well for multi labeling tasks like this. Hence we will choose Gradient Boost as the algorithm of our choice and XGBoost as a benchmark model. Hence we will choose Gradient Boost as the algorithm of our choice and XGBoost as a benchmark model. For evaluating our model we look at the confusion matrix from where we can see High precision and High Recall which means that our results have been labeled correctly.Although we are having very good accuracy for our models but that does not mean that our model is perfect it simply means that our model has very less data for now and therefore classifying our data is an easy task, also because we do not have very high dimensional data. Multi-label Classification is easy for low dimensional data. Missing Values vs Non-Missing ValuesWhen we removed missing values from our data we found a good balance between various classes but when we added those missing values and performed some inference on that data we notice that our class imbalance increased. Otherwise, the performance is the samedf2['offers received'].describe() df2['total amount'].describe() df['likeliness'] = df_scaled['likeliness'] def find_info(df2, class_label): label = df2[df2['likeliness'] == class_label] print("Likeliness ==", class_label) print("bogo", label.bogo.sum()) print("discount", label.discount.sum()) print("informational", label.informational.sum()) print("offers received", label['offers received'].sum()) print() find_info(df, 4) find_info(df, 3) find_info(df, 2) find_info(df, 1) find_info(df, 0)Likeliness == 4 bogo 2244.0 discount 2331.0 informational 1433.0 offers received 6008.0 Likeliness == 3 bogo 2855.0 discount 2871.0 informational 1931.0 offers received 9628.0 Likeliness == 2 bogo 3674.0 discount 3998.0 informational 2625.0 offers received 17959.0 Likeliness == 1 bogo 1675.0 discount 2101.0 informational 1749.0 offers received 16269.0 Likeliness == 0 bogo 344.0 discount 511.0 informational 614.0 offers received 16637.0Unsupervised Analysis of Days of WeekTreating crossings each day as features to learn about the relationships between various days%matplotlib inline import matplotlib.pyplot as plt plt.style.use('seaborn-pastel') import pandas as pd import numpy as np from sklearn.decomposition import PCA from sklearn.mixture import GaussianMixtureGet Datafrom jupyterworkflow.data import get_fremont_data data = get_fremont_data() pivoted = data.pivot_table('Total', index=data.index.time, columns=data.index.date) pivoted.plot(legend=False, alpha = 0.01);Principal Component AnalysisX = pivoted.fillna(0).T.values X.shape X2 = PCA(2, svd_solver='full').fit_transform(X) X2.shape plt.scatter(X2[:,0], X2[:, 1]);Unsupervised Clusteringgmm = GaussianMixture(2).fit(X) labels = gmm.predict(X) plt.scatter(X2[:,0], X2[:, 1], c = labels, cmap = 'rainbow') plt.colorbar(); fig, ax = plt.subplots(1,2, figsize=(14,6)) pivoted.T[labels == 0].T.plot(legend=False, alpha = 0.1, ax = ax[0]); pivoted.T[labels == 1].T.plot(legend=False, alpha = 0.1, ax = ax[1]); ax[0].set_title('Red Cluster'); ax[1].set_title('Purple Cluster');Comparing with Day of Weekdayofweek=pd.DatetimeIndex(pivoted.columns).dayofweek plt.scatter(X2[:,0], X2[:, 1], c = dayofweek, cmap = 'rainbow') plt.colorbar();Analyzing OutliersThe following points are weekdays with holiday-like patterndates = pd.DatetimeIndex(pivoted.columns) dates[(labels ==0) & (dayofweek < 5)]used primarily if i am importing a module from another source and just want to run a specific function# print (__name__) # print ('first module : {}'. format (__name__))first module : __main__[link](https://www.youtube.com/watch?v=sugvnHA7ElY)def main(): print ('this is my first module') if __name__ == '__main__' : main()this is my first module02 | Decision Tree. A Supervised Classification Model - Subscribe to my [Blog ↗](https://blog.pythonassembly.com/)- Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄 Discipline to Search Solutions in Google > Apply the following steps when **looking for solutions in Google**:>> 1. **Necesity**: How to load an Excel in Python?> 2. **Search in Google**: by keywords> - `load excel python`> - ~~how to load excel in python~~> 3. **Solution**: What's the `function()` that loads an Excel in Python?> - A Function to Programming is what the Atom to Phisics.> - Every time you want to do something in programming> - **You will need a `function()`** to make it> - Theferore, you must **detect parenthesis `()`**> - Out of all the words that you see in a website> - Because they indicate the presence of a `function()`. Load the Data > Load the Titanic dataset with the below commands> - This dataset **people** (rows) aboard the Titanic> - And their **sociological characteristics** (columns)> - The aim of this dataset is to predict the probability to `survive`> - Based on the social demographic characteristics.import seaborn as sns df = sns.load_dataset(name='titanic').iloc[:, :4] df.head()`DecisionTreeClassifier()` Model in Python Build the Model > 1. **Necesity**: Build Model> 2. **Google**: How do you search for the solution?> 3. **Solution**: Find the `function()` that makes it happen Code Thinking> Which function computes the Model?> - `fit()`>> How could can you **import the function in Python**?fit() model.fit()`model = ?`from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier() model.__dict__ model.fit()Separate Variables for the Model> Regarding their role:> 1. **Target Variable `y`**>> - [ ] What would you like **to predict**?>> 2. **Explanatory Variable `X`**>> - [ ] Which variable will you use **to explain** the target?explanatory = df.drop(columns='survived') target = df.survivedFinally `fit()` the Modelmodel.__dict__ model.fit(X=explanatory, y=target) import pandas as pd pd.get_dummies(data=df) df = pd.get_dummies(data=df, drop_first=True) df explanatory = df.drop(columns='survived') target = df.survived model.fit(X=explanatory, y=target) df df.isna().sum() df.fillna('hola') df.dropna(inplace=True) # df = df.dropna() df df.dropna(inplace=True) # df = df.dropna() df explanatory = df.drop(columns='survived') target = df.survived model.fit(X=explanatory, y=target)Calculate a Prediction with the Model > - `model.predict_proba()`model.predict_proba()Model Visualization > - `tree.plot_tree()` Model Interpretation > Why `sex` is the most important column? What has to do with **EDA** (Exploratory Data Analysis)?%%HTML Prediction vs Reality > How good is our model?dfsel = df[['survived']].copy() dfsel['pred'] = model.predict(X=explanatory) dfsel.sample(10) comp = dfsel.survived == dfsel.pred comp.sum() comp.sum()/714 comp.mean()Precision > - `model.score()`model.score(X=explanatory, y=target)Fairseq in Amazon SageMaker: Pre-trained English to French translation modelIn this notebook, we will show you how to serve an English to French translation model using pre-trained model provided by the [Fairseq toolkit](https://github.com/pytorch/fairseq) PermissionsRunning this notebook requires permissions in addition to the regular SageMakerFullAccess permissions. This is because it creates new repositories in Amazon ECR. The easiest way to add these permissions is simply to add the managed policy AmazonEC2ContainerRegistryFullAccess to the role that you used to start your notebook instance. There's no need to restart your notebook instance when you do this, the new permissions will be available immediately. Download pre-trained modelFairseq maintains their pre-trained models [here](https://github.com/pytorch/fairseq/blob/master/examples/translation/README.md). We will use the model that was pre-trained on the [WMT14 English-French](http://statmt.org/wmt14/translation-task.htmlDownload) dataset. As the models are archived in .bz2 format, we need to convert them to .tar.gz as this is the format supported by Amazon SageMaker. Convert archive%%sh wget https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2 tar xvjf wmt14.v2.en-fr.fconv-py.tar.bz2 > /dev/null cd wmt14.en-fr.fconv-py mv model.pt checkpoint_best.pt tar czvf wmt14.en-fr.fconv-py.tar.gz checkpoint_best.pt dict.en.txt dict.fr.txt bpecodes README.md > /dev/nullThe pre-trained model has been downloaded and converted. The next step is upload the data to Amazon S3 in order to make it available for running the inference. Upload data to Amazon S3import sagemaker sagemaker_session = sagemaker.Session() region = sagemaker_session.boto_session.region_name account = sagemaker_session.boto_session.client("sts").get_caller_identity().get("Account") bucket = sagemaker_session.default_bucket() prefix = "sagemaker/DEMO-pytorch-fairseq/pre-trained-models" role = sagemaker.get_execution_role() trained_model_location = sagemaker_session.upload_data( path="wmt14.en-fr.fconv-py/wmt14.en-fr.fconv-py.tar.gz", bucket=bucket, key_prefix=prefix )Build Fairseq serving containerNext we need to register a Docker image in Amazon SageMaker that will contain the Fairseq code and that will be pulled at inference time to perform the of the precitions from the pre-trained model we downloaded.%%sh chmod +x create_container.sh ./create_container.sh pytorch-fairseq-serveThe Fairseq serving image has been pushed into Amazon ECR, the registry from which Amazon SageMaker will be able to pull that image and launch both training and prediction. Hosting the pre-trained model for inferenceWe first needs to define a base JSONPredictor class that will help us with sending predictions to the model once it's hosted on the Amazon SageMaker endpoint.from sagemaker.predictor import RealTimePredictor, json_serializer, json_deserializer class JSONPredictor(RealTimePredictor): def __init__(self, endpoint_name, sagemaker_session): super(JSONPredictor, self).__init__( endpoint_name, sagemaker_session, json_serializer, json_deserializer )We can now use the Model class to deploy the model artificats (the pre-trained model), and deploy it on a CPU instance. Let's use a `ml.m5.xlarge`.from sagemaker import Model algorithm_name = "pytorch-fairseq-serve" image = "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, algorithm_name) model = Model( model_data=trained_model_location, role=role, image=image, predictor_cls=JSONPredictor, ) predictor = model.deploy(initial_instance_count=1, instance_type="ml.m5.xlarge")Now it's your time to play. Input a sentence in English and get the translation in French by simply calling predict.import html result = predictor.predict("I love translation") # Some characters are escaped HTML-style requiring to unescape them before printing print(html.unescape(result))Once you're done with getting predictions, remember to shut down your endpoint as you no longer need it. Delete endpointmodel.sagemaker_session.delete_endpoint(predictor.endpoint)``` Make sure to change runtime type to hardware accelerator for the boost in performance``` **Figuring** out the transitions between Localization and Sub-ballistic behaviour of the Quantum Walks## load cython module to convert python code to C %load_ext CythonThe following code generates data and graph for Hierarchical Disorder in the Qunatum walks, the tuning parameter eps show the transition from sub-ballistic to localized behaviour of the walk%%cython import numpy as np from numpy import log2 import math from math import log,pi,ceil,floor import matplotlib.pyplot as plt import cmath from cmath import exp from numpy import ones, zeros, sin, cos, array, roll, sqrt import random from random import uniform import gc from scipy import stats from numba import jit,njit, prange,vectorize import joblib from joblib import Parallel, delayed gc.collect() def index(g, N): g=abs(g-N) if g % 2 == 1:return 0; if g % (2 ** 20) == 0:return 20; if g % (2 ** 19) == 0:return 19; if g % (2 ** 18) == 0:return 18; if g % (2 ** 17) == 0:return 17; if g % (2 ** 16) == 0:return 16; if g % (2 ** 15) == 0:return 15; if g % (2 ** 14) == 0:return 14; if g % (2 ** 13) == 0:return 13; if g % (2 ** 12) == 0:return 12; if g % (2 ** 11) == 0:return 11; if g % (2 ** 10) == 0:return 10; if g % (2 ** 9) == 0:return 9; if g % (2 ** 8) == 0:return 8; if g % (2 ** 7) == 0:return 7; if g % (2 ** 6) == 0:return 6; if g % (2 ** 5) == 0:return 5; if g % (2 ** 4) == 0:return 4; if g % (2 ** 3) == 0:return 3; if g % (2 ** 2) == 0:return 2; if g % (2) == 0:return 1; #Hierarchical Disorder rotation def rotation_1(N,eps, w): q = [pow(eps,index(g,N))*0.25*pi for g in range(2*N+1) ] hash_map = {} for g in range(21): hash_map[g] = exp(1j * uniform(-w, w) * pi) disorder =array([hash_map[index(g,N)] for g in range(2*N+1)]) SIN = array([a*b for a,b in zip(sin(q),disorder)]) SINM = array([-a*b for a,b in zip(sin(q),disorder)]) COS =array([a*b for a,b in zip(cos(q),disorder)]) return array([[SIN, COS], [COS, SINM]]) def qw_split_avg(eps, w): N = 10_000 a = 1 / sqrt(2.0) b = 1j / sqrt(2.0) avg_disorder = zeros(21) r1 = rotation_1(N,eps, w) psi = np.zeros((2, 2 * N + 1), dtype=complex) psi[0,N] = a psi[1, N] = b std_dev = np.zeros(N + 1,dtype=float) positions = np.arange(-N,N+1) #positions_sq = [i ** 2 for i in range(-N, N + 1)] pow_2 = [pow(2, i) for i in range(1, 21)] j = 0 for n in range(1, N + 1): psi[:,N-n:N+n+1] = np.einsum("ijk,jk->ik", r1[:,:,N-n:N+n+1], psi[:,N-n:N+n+1], optimize="optimal") # rotation theta1 psi[0] = roll(psi[0], 1) # shift up psi[1] = roll(psi[1], -1) # shift down if n == pow_2[j]: psi_sq = abs(psi[0, N - n : N + n + 1]) ** 2 + abs(psi[1, N - n : N + n + 1]) ** 2 sum_0 = np.sum([(a**2)* b for a, b in zip(positions[N - n : N + n + 1], psi_sq)]) sum_1 = np.sum([(a * b) ** 2 for a, b in zip(positions[N - n : N + n + 1], psi_sq)]) std_dev[n] = sqrt(sum_0 - sum_1) j += 1 #Make sure to change the denominator corresponding to number of averaging iterations avg_disorder[j] += std_dev[n]/50 return avg_disorder ##Parellizing the averaging using joblib library def qw_split(eps,w): with joblib.parallel_backend(backend="threading"): parallel = Parallel(verbose=5) standard_dev= np.sum(parallel([delayed(qw_split_avg)(eps, w) for k in range(50)]),axis=0) return standard_dev def main(): W = [pi,0.5*pi,0.1*pi, 0.05*pi] #change tuning parameter to observe different behaviors of the walks eps=0.8 with joblib.parallel_backend(backend="threading"): parallel = Parallel(verbose=5) standard_dev= (parallel([delayed(qw_split)(eps, j) for j in W])) print(standard_dev) for j in range(4): log_std=[] n=0 for i in standard_dev[j]: if i != 0: n+=1 log_std.append(log(i, 2)/n) if i==0: log_std.append(0) log_scale =[1/i for i in range(1,14)] log_scale =[0]+log_scale res = np.polyfit(log_scale[10:14] ,log_std[10:14],1) log_std[0] = res[1] print("Intercept for W = "+str(W[j])+" ="+str(res[1])) fig2 = plt.figure(2) ax2 = fig2.add_subplot(111) plot2 = plt.figure(2) ax2.plot(log_scale,log_std[0:14], "x", label="W= " + str(W[j])) ax2.legend(loc="lower right", frameon=False) plt.xlabel("Log-time steps") plt.ylabel("Log-Mean Squared Displacement") plt.title("STDEv for Hierarchical Disorder with eps= "+str(eps)) plt.xlim(0, 0.2) plt.savefig(f"{round(j,2)} plot.png", dpi=600) main() %%cython import numpy as np from numpy import log2 import math from math import log,pi,ceil,floor import matplotlib.pyplot as plt import cmath from cmath import exp from numpy import ones, zeros, sin, cos, array, roll, sqrt import random from random import uniform import gc from scipy import stats from numba import jit,njit, prange,vectorize import joblib from joblib import Parallel, delayed gc.collect() def index(g, N): g=abs(g-N) if g % 2 == 1:return 0; if g % (2 ** 20) == 0:return 20; if g % (2 ** 19) == 0:return 19; if g % (2 ** 18) == 0:return 18; if g % (2 ** 17) == 0:return 17; if g % (2 ** 16) == 0:return 16; if g % (2 ** 15) == 0:return 15; if g % (2 ** 14) == 0:return 14; if g % (2 ** 13) == 0:return 13; if g % (2 ** 12) == 0:return 12; if g % (2 ** 11) == 0:return 11; if g % (2 ** 10) == 0:return 10; if g % (2 ** 9) == 0:return 9; if g % (2 ** 8) == 0:return 8; if g % (2 ** 7) == 0:return 7; if g % (2 ** 6) == 0:return 6; if g % (2 ** 5) == 0:return 5; if g % (2 ** 4) == 0:return 4; if g % (2 ** 3) == 0:return 3; if g % (2 ** 2) == 0:return 2; if g % (2) == 0:return 1; def rotation_1(N,eps, w): q = [pow(eps,index(g,N))*0.25*pi for g in range(2*N+1) ] disorder =array([exp(1j * uniform(-w, w) * pi) for g in range(2*N+1)]) SIN = array([a*b for a,b in zip(sin(q),disorder)]) SINM = array([-a*b for a,b in zip(sin(q),disorder)]) COS =array([a*b for a,b in zip(cos(q),disorder)]) return array([[SIN, COS], [COS, SINM]]) def qw_split_avg(eps, w): N = 10_000 a = 1 / sqrt(2.0) b = 1j / sqrt(2.0) avg_disorder = zeros(21) r1 = rotation_1(N,eps, w) psi = np.zeros((2, 2 * N + 1), dtype=complex) psi[0,N] = a psi[1, N] = b std_dev = np.zeros(N + 1,dtype=float) positions = np.arange(-N,N+1) #positions_sq = [i ** 2 for i in range(-N, N + 1)] pow_2 = [pow(2, i) for i in range(1, 21)] j = 0 for n in range(1, N + 1): psi[:,N-n:N+n+1] = np.einsum("ijk,jk->ik", r1[:,:,N-n:N+n+1], psi[:,N-n:N+n+1], optimize="optimal") # rotation theta1 psi[0] = roll(psi[0], 1) # shift up psi[1] = roll(psi[1], -1) # shift down if n == pow_2[j]: psi_sq = abs(psi[0, N - n : N + n + 1]) ** 2 + abs(psi[1, N - n : N + n + 1]) ** 2 sum_0 = np.sum([(a**2)* b for a, b in zip(positions[N - n : N + n + 1], psi_sq)]) sum_1 = np.sum([(a * b) ** 2 for a, b in zip(positions[N - n : N + n + 1], psi_sq)]) std_dev[n] = sqrt(sum_0 - sum_1) j += 1 avg_disorder[j] += std_dev[n]/50 return avg_disorder def qw_split(eps,w): with joblib.parallel_backend(backend="threading"): parallel = Parallel(verbose=5) standard_dev= np.sum(parallel([delayed(qw_split_avg)(eps, w) for k in range(50)]),axis=0) return standard_dev def main(): W = [pi,0.5*pi,0.1*pi, 0.05*pi] eps=1 with joblib.parallel_backend(backend="threading"): parallel = Parallel(verbose=5) standard_dev= (parallel([delayed(qw_split)(eps, j) for j in W])) print(standard_dev) for j in range(4): log_std=[] n=0 for i in standard_dev[j]: if i != 0: n+=1 log_std.append(log(i, 2)/n) if i==0: log_std.append(0) log_scale =[1/i for i in range(1,14)] log_scale =[0]+log_scale res = np.polyfit(log_scale[10:14] ,log_std[10:14],1) log_std[0] = res[1] print("Intercept for W = "+str(W[j])+" ="+str(res[1])) fig2 = plt.figure(2) ax2 = fig2.add_subplot(111) plot2 = plt.figure(2) ax2.plot(log_scale,log_std[0:14], "x", label="W= " + str(W[j])) ax2.legend(loc="lower right", frameon=False) plt.xlabel("Log-time steps") plt.ylabel("Log-Mean Squared Displacement") plt.title("STDEv for Regular Disorder with eps= "+str(eps)) plt.xlim(0, 0.2) plt.savefig(f"{round(j,2)} plot.png", dpi=600) main() %load_ext CythonThe Cython extension is already loaded. To reload it, use: %reload_ext CythonThe following code would generate snapshots at any time-step. To observe disorder plots switch rotation function from the codes given above. The tuning parameter eps will provide further help in observing transitions.%%cython import numpy as np from numpy import log2 import math from math import log,pi,ceil,floor import matplotlib.pyplot as plt import cmath from cmath import exp from numpy import ones, zeros, sin, cos, array, roll, sqrt import random from random import uniform import gc from scipy import stats from numba import jit,njit, prange,vectorize import joblib from joblib import Parallel, delayed gc.collect() def index(g, N): g=abs(g-N) if g % 2 == 1:return 0; if g % (2 ** 20) == 0:return 20; if g % (2 ** 19) == 0:return 19; if g % (2 ** 18) == 0:return 18; if g % (2 ** 17) == 0:return 17; if g % (2 ** 16) == 0:return 16; if g % (2 ** 15) == 0:return 15; if g % (2 ** 14) == 0:return 14; if g % (2 ** 13) == 0:return 13; if g % (2 ** 12) == 0:return 12; if g % (2 ** 11) == 0:return 11; if g % (2 ** 10) == 0:return 10; if g % (2 ** 9) == 0:return 9; if g % (2 ** 8) == 0:return 8; if g % (2 ** 7) == 0:return 7; if g % (2 ** 6) == 0:return 6; if g % (2 ** 5) == 0:return 5; if g % (2 ** 4) == 0:return 4; if g % (2 ** 3) == 0:return 3; if g % (2 ** 2) == 0:return 2; if g % (2) == 0:return 1; def rotation_1(N,eps): q = [pow(eps,index(g,N))*0.25*pi for g in range(2*N+1) ] return array([[sin(q), cos(q)], [cos(q), -sin(q)]]) def qw_split(eps,N): a = 1 / sqrt(2.0) b = 1j / sqrt(2.0) avg_disorder = zeros(21) r1 = rotation_1(N,eps) psi = np.zeros((2, 2 * N + 1), dtype=complex) psi_t = zeros((2, 2*N + 1, N+1), dtype = complex) psi_t[:,:,0] = psi psi[0,N] = a psi[1, N] = b for n in range(1, N + 1): psi[:,N-n:N+n+1] = np.einsum("ijk,jk->ik", r1[:,:,N-n:N+n+1], psi[:,N-n:N+n+1], optimize="optimal") # rotation theta1 psi[0] = roll(psi[0], 1) # shift up psi[1] = roll(psi[1], -1) # shift down psi_t[:,:,n] = psi return psi_t def measure(psi): return abs(psi[0,:])**2 + abs(psi[1,:])**2 def main(): N=1000 P=2*N+1 eps=0.7 for n in range(0,N+1,100): psi_t=qw_split(eps,N) psi = psi_t[:,:,n] prob = measure(psi) fig1 = plt.figure(1) ax1 = fig1.add_subplot(111) plt.title("Espilon= "+str(eps)+" n= "+str(n)) plot1=plt.figure(1) ax1.plot(range(P), prob) ax1.plot(range(P), prob, 'o') loc = range (0, P, int(P / 10)) #Location of ticks plt.xticks(loc) plt.xlim(0, P) ax1.set_xticklabels(range (-N, N+1,int(P/10))) plt.show() main()Descargar Precios con API OANDA V20 ---import ta as ta import pandas as pd from oandapyV20 import API import oandapyV20.endpoints.pricing as pricing import oandapyV20.endpoints.instruments as instruments A1_OA_Da = 17 # Day Align A1_OA_Ta = "America/Mexico_City" # Time Align A1_OA_Ai = "101-004-2221697-001" # Id de cuenta A1_OA_At = "practice" # Tipo de cuenta A1_OA_In = "USD_MXN" # Instrumento A1_OA_Gn = "H1" # Granularidad de velas A1_OA_Ak = "a"+ "da4a61b0d5bc0e5939365e01450b614" + "-4121f84f01ad78942c46fc3ac777baa" + "6" F1 = "2017-01-01T00:00:00Z" F2 = "2017-02-01T00:00:00Z" api = API(access_token=A1_OA_Ak) params={"granularity": A1_OA_Gn, "price": "M", "dailyAlignment": A1_OA_Da, "alignmentTimezone": A1_OA_Ta, "from": F1, "to": F2} A1_Req1 = instruments.InstrumentsCandles(instrument=A1_OA_In, params=params) A1_Hist = api.request(A1_Req1) lista = [] for i in range(len(A1_Hist['candles'])-1): lista.append({'TimeStamp': A1_Hist['candles'][i]['time'], 'Open': A1_Hist['candles'][i]['mid']['o'], 'High': A1_Hist['candles'][i]['mid']['h'], 'Low': A1_Hist['candles'][i]['mid']['l'], 'Close': A1_Hist['candles'][i]['mid']['c']}) pd_hist = pd.DataFrame(lista) pd_hist = pd_hist[['TimeStamp', 'Open', 'High', 'Low', 'Close']] pd_hist['TimeStamp'] = pd.to_datetime(pd_hist['TimeStamp'])Nanodegree Engenheiro de Machine Learning Aprendizado Supervisionado Projeto: Encontrando doadores para a *CharityML* Seja bem-vindo ao segundo projeto do Nanodegree Engenheiro de Machine Learning! Neste notebook, você receberá alguns códigos de exemplo e será seu trabalho implementar as funcionalidades adicionais necessárias para a conclusão do projeto. As seções cujo cabeçalho começa com **'Implementação'** indicam que o bloco de código posterior requer funcionalidades adicionais que você deve desenvolver. Para cada parte do projeto serão fornecidas instruções e as diretrizes da implementação estarão marcadas no bloco de código com uma expressão `'TODO'`. Por favor, leia cuidadosamente as instruções!Além de implementações de código, você terá de responder questões relacionadas ao projeto e à sua implementação. Cada seção onde você responderá uma questão terá um cabeçalho com o termo **'Questão X'**. Leia com atenção as questões e forneça respostas completas nas caixas de texto que começam com o termo **'Resposta:'**. A submissão do seu projeto será avaliada baseada nas suas resostas para cada uma das questões além das implementações que você disponibilizar.>**Nota:** Por favor, especifique QUAL A VERSÃO DO PYTHON utilizada por você para a submissão deste notebook. As células "Code" e "Markdown" podem ser executadas utilizando o atalho do teclado **Shift + Enter**. Além disso, as células "Markdown" podem ser editadas clicando-se duas vezes na célula. IniciandoNeste projeto, você utilizará diversos algoritmos de aprendizado supervisionado para modelar com precisão a remuneração de indivíduos utilizando dados coletados no censo americano de 1994. Você escolherá o algoritmo mais adequado através dos resultados preliminares e irá otimizá-lo para modelagem dos dados. O seu objetivo com esta implementação é construir um modelo que pode predizer com precisão se um indivíduo possui uma remuneração superior a $50,000. Este tipo de tarefa pode surgir em organizações sem fins lucrativos que sobrevivem de doações. Entender a remuneração de um indivíduo pode ajudar a organização o montante mais adequado para uma solicitação de doação, ou ainda se eles realmente deveriam entrar em contato com a pessoa. Enquanto pode ser uma tarefa difícil determinar a faixa de renda de uma pesssoa de maneira direta, nós podemos inferir estes valores através de outros recursos disponíveis publicamente. O conjunto de dados para este projeto se origina do [Repositório de Machine Learning UCI](https://archive.ics.uci.edu/ml/datasets/Census+Income) e foi cedido por e , após a sua publicação no artigo _"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid"_. Você pode encontrar o artigo de [online](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf). Os dados que investigaremos aqui possuem algumas pequenas modificações se comparados com os dados originais, como por exemplo a remoção da funcionalidade `'fnlwgt'` e a remoção de registros inconsistentes. ---- Explorando os dadosExecute a célula de código abaixo para carregas as bibliotecas Python necessárias e carregas os dados do censo. Perceba que a última coluna deste conjunto de dados, `'income'`, será o rótulo do nosso alvo (se um indivíduo possui remuneração igual ou maior do que $50,000 anualmente). Todas as outras colunas são dados de cada indívduo na base de dados do censo.# Importe as bibliotecas necessárias para o projeto. import numpy as np import pandas as pd from time import time from IPython.display import display # Permite a utilização da função display() para DataFrames. import warnings warnings.filterwarnings('ignore') # Importação da biblioteca de visualização visuals.py import visuals as vs # Exibição amigável para notebooks %matplotlib inline # Carregando os dados do Censo data = pd.read_csv("census.csv") # Sucesso - Exibindo o primeiro registro display(data.head(n=1))Implementação: Explorando os DadosUma investigação superficial da massa de dados determinará quantos indivíduos se enquadram em cada grupo e nos dirá sobre o percentual destes indivúdos com remuneração anual superior à \$50,000. No código abaixo, você precisará calcular o seguinte:- O número total de registros, `'n_records'`- O número de indivíduos com remuneração anual superior à \$50,000, `'n_greater_50k'`.- O número de indivíduos com remuneração anual até \$50,000, `'n_at_most_50k'`.- O percentual de indivíduos com remuneração anual superior à \$50,000, `'greater_percent'`.** DICA: ** Você pode precisar olhar a tabela acima para entender como os registros da coluna `'income'` estão formatados.# TODO: Número total de registros. n_records = data.shape[0] # TODO: Número de registros com remuneração anual superior à $50,000 n_greater_50k = data['income'].value_counts()['>50K'] # TODO: O número de registros com remuneração anual até $50,000 n_at_most_50k = data['income'].value_counts()['<=50K'] # TODO: O percentual de indivíduos com remuneração anual superior à $50,000 greater_percent = n_greater_50k / n_records * 100 # Exibindo os resultados print("Total number of records: {}".format(n_records)) print("Individuals making more than $50,000: {}".format(n_greater_50k)) print("Individuals making at most $50,000: {}".format(n_at_most_50k)) print("Percentage of individuals making more than $50,000: {:.2f}%".format(greater_percent))Total number of records: 45222 Individuals making more than $50,000: 11208 Individuals making at most $50,000: 34014 Percentage of individuals making more than $50,000: 24.78%**Explorando as colunas*** **age**: contínuo. * **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked. * **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool. * **education-num**: contínuo. * **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse. * **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces. * **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried. * **race**: Black, White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other. * **sex**: Female, Male. * **capital-gain**: contínuo. * **capital-loss**: contínuo. * **hours-per-week**: contínuo. * **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands. ---- Preparando os dadosAntes de que os dados possam ser utilizados como input para algoritmos de machine learning, muitas vezes eles precisam ser tratados, formatados e reestruturados — este processo é conhecido como **pré-processamento**. Felizmente neste conjunto de dados não existem registros inconsistentes para tratamento, porém algumas colunas precisam ser ajustadas. Este pré-processamento pode ajudar muito com o resultado e poder de predição de quase todos os algoritmos de aprendizado. Transformando os principais desvios das colunas contínuasUm conjunto de dados pode conter ao menos uma coluna onde os valores tendem a se próximar para um único número, mas também podem conter registros com o mesmo atributo contendo um valor muito maior ou muito menor do que esta tendência. Algoritmos podem ser sensíveis para estes casos de distribuição de valores e este fator pode prejudicar sua performance se a distribuição não estiver normalizada de maneira adequada. Com o conjunto de dados do censo, dois atributos se encaixam nesta descrição: '`capital-gain'` e `'capital-loss'`.Execute o código da célula abaixo para plotar um histograma destes dois atributos. Repare na distribuição destes valores.# Dividindo os dados entre features e coluna alvo income_raw = data['income'] features_raw = data.drop('income', axis = 1) # Visualizando os principais desvios das colunas contínuas entre os dados vs.distribution(data)Para atributos com distribuição muito distorcida, tais como `'capital-gain'` e `'capital-loss'`, é uma prática comum aplicar uma transformação logarítmica nos dados para que os valores muito grandes e muito pequenos não afetem a performance do algoritmo de aprendizado. Usar a transformação logarítmica reduz significativamente os limites dos valores afetados pelos outliers (valores muito grandes ou muito pequenos). Deve-se tomar cuidado ao aplicar esta transformação, pois o logaritmo de `0` é indefinido, portanto temos que incrementar os valores em uma pequena quantia acima de `0` para aplicar o logaritmo adequadamente.Execute o código da célula abaixo para realizar a transformação nos dados e visualizar os resultados. De novo, note os valores limite e como os valores estão distribuídos.# Aplicando a transformação de log nos registros distorcidos. skewed = ['capital-gain', 'capital-loss'] features_log_transformed = pd.DataFrame(data = features_raw) features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1)) # Visualizando as novas distribuições após a transformação. vs.distribution(features_log_transformed, transformed = True)Normalizando atributos numéricosAlém das transformações em atributos distorcidos, é uma boa prática comum realizar algum tipo de adaptação de escala nos atributos numéricos. Ajustar a escala nos dados não modifica o formato da distribuição de cada coluna (tais como `'capital-gain'` ou `'capital-loss'` acima); no entanto, a normalização garante que cada atributo será tratado com o mesmo peso durante a aplicação de aprendizado supervisionado. Note que uma vez aplicada a escala, a observação dos dados não terá o significado original, como exemplificado abaixo.Execute o código da célula abaixo para normalizar cada atributo numérico, nós usaremos para isso a [`sklearn.preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html).# Importando sklearn.preprocessing.StandardScaler from sklearn.preprocessing import MinMaxScaler # Inicializando um aplicador de escala e aplicando em seguida aos atributos scaler = MinMaxScaler() # default=(0, 1) numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week'] features_log_minmax_transform = pd.DataFrame(data = features_log_transformed) features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical]) # Exibindo um exemplo de registro com a escala aplicada display(features_log_minmax_transform.head(n=5))Implementação: Pré-processamento dos dadosA partir da tabela em **Explorando os dados** acima, nós podemos observar que existem diversos atributos não-numéricos para cada registro. Usualmente, algoritmos de aprendizado esperam que os inputs sejam numéricos, o que requer que os atributos não numéricos (chamados de *variáveis de categoria*) sejam convertidos. Uma maneira popular de converter as variáveis de categoria é utilizar a estratégia **one-hot encoding**. Esta estratégia cria uma variável para cada categoria possível de cada atributo não numérico. Por exemplo, assuma que `algumAtributo` possuí três valores possíveis: `A`, `B`, ou `C`. Nós então transformamos este atributo em três novos atributos: `algumAtributo_A`, `algumAtributo_B` e `algumAtributo_C`.| | algumAtributo | | algumAtributo_A | algumAtributo_B | algumAtributo_C || :-: | :-: | | :-: | :-: | :-: || 0 | B | | 0 | 1 | 0 || 1 | C | ----> one-hot encode ----> | 0 | 0 | 1 || 2 | A | | 1 | 0 | 0 |Além disso, assim como os atributos não-numéricos, precisaremos converter a coluna alvo não-numérica, `'income'`, para valores numéricos para que o algoritmo de aprendizado funcione. Uma vez que só existem duas categorias possíveis para esta coluna ("50K"), nós podemos evitar a utilização do one-hot encoding e simplesmente transformar estas duas categorias para `0` e `1`, respectivamente. No trecho de código abaixo, você precisará implementar o seguinte: - Utilizar [`pandas.get_dummies()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html?highlight=get_dummiespandas.get_dummies) para realizar o one-hot encoding nos dados da `'features_log_minmax_transform'`. - Converter a coluna alvo `'income_raw'` para re. - Transforme os registros com "50K" para `1`.# TODO: Utilize o one-hot encoding nos dados em 'features_log_minmax_transform' utilizando pandas.get_dummies() features_final = pd.get_dummies(features_log_minmax_transform) # TODO: Faça o encode da coluna 'income_raw' para valores numéricos income = income_raw.replace("<=50K", 0).replace(">50K", 1) print("{} income registers '<=50K' converted to 0.".format(income.value_counts()[0])) print("{} income registers '>50K' converted to 1.".format(income.value_counts()[1])) # Exiba o número de colunas depois do one-hot encoding encoded = list(features_final.columns) print("{} total features after one-hot encoding.".format(len(encoded))) # Descomente a linha abaixo para ver as colunas após o encode # print(encoded)34014 income registers '<=50K' converted to 0. 11208 income registers '>50K' converted to 1. 103 total features after one-hot encoding.Embaralhar e dividir os dadosAgora todas as _variáveis de categoria_ foram convertidas em atributos numéricos e todos os atributos numéricos foram normalizados. Como sempre, nós agora dividiremos os dados entre conjuntos de treinamento e de teste. 80% dos dados serão utilizados para treinamento e 20% para teste.Execute o código da célula abaixo para realizar divisão.# Importar train_test_split from sklearn.model_selection import train_test_split # Dividir os 'atributos' e 'income' entre conjuntos de treinamento e de testes. X_train, X_test, y_train, y_test = train_test_split(features_final, income, test_size = 0.2, random_state = 0) # Show the results of the split print("Training set has {} samples.".format(X_train.shape[0])) print("Testing set has {} samples.".format(X_test.shape[0]))Training set has 36177 samples. Testing set has 9045 samples.---- Avaliando a performance do modeloNesta seção nós investigaremos quatro algoritmos diferentes e determinaremos qual deles é melhor para a modelagem dos dados. Três destes algoritmos serão algoritmos de aprendizado supervisionado de sua escolha e o quarto algoritmo é conhecido como *naive predictor*. Métricas e o Naive predictor*CharityML*, equipada com sua pesquisa, sabe que os indivíduos que fazem mais do que \$50,000 possuem maior probabilidade de doar para a sua campanha de caridade. Por conta disto, a *CharityML* está particularmente interessada em predizer com acurácia quais indivíduos possuem remuneração acima de \$50,000. Parece que utilizar **acurácia (accuracy)** como uma métrica para avaliar a performance de um modelo é um parâmetro adequado. Além disso, identificar alguém que *não possui* remuneração acima de \$50,000 como alguém que recebe acima deste valor seria ruim para a *CharityML*, uma vez que eles estão procurando por indivíduos que desejam doar. Com isso, a habilidade do modelo em predizer com preisão aqueles que possuem a remuneração acima dos \$50,000 é *mais importante* do que a habilidade de realizar o **recall** destes indivíduos. Nós podemos utilizar a fórmula **F-beta score** como uma métrica que considera ambos: precision e recall.$$ F_{\beta} = (1 + \beta^2) \cdot \frac{precision \cdot recall}{\left( \beta^2 \cdot precision \right) + recall} $$Em particular, quando $\beta = 0.5$, maior ênfase é atribuída para a variável precision. Isso é chamado de **F$_{0.5}$ score** (ou F-score, simplificando).Analisando a distribuição de classes (aqueles que possuem remuneração até \$50,000 e aqueles que possuem remuneração superior), fica claro que a maioria dos indivíduos não possui remuneração acima de \$50,000. Isto pode ter grande impacto na **acurácia (accuracy)**, uma vez que nós poderíamos simplesmente dizer *"Esta pessoa não possui remuneração acima de \$50,000"* e estar certos em boa parte das vezes, sem ao menos olhar os dados! Fazer este tipo de afirmação seria chamado de **naive**, uma vez que não consideramos nenhuma informação para balisar este argumento. É sempre importante considerar a *naive prediction* para seu conjunto de dados, para ajudar a estabelecer um benchmark para análise da performance dos modelos. Com isso, sabemos que utilizar a naive prediction não traria resultado algum: Se a predição apontasse que todas as pessoas possuem remuneração inferior à \$50,000, a *CharityML* não identificaria ninguém como potencial doador. Nota: Revisando: accuracy, precision e recall**Accuracy** mede com que frequência o classificador faz a predição correta. É a proporção entre o número de predições corretas e o número total de predições (o número de registros testados).**Precision** informa qual a proporção de mensagens classificamos como spam eram realmente spam. Ou seja, é a proporção de verdadeiros positivos (mensagens classificadas como spam que eram realmente spam) sobre todos os positivos (todas as palavras classificadas como spam, independente se a classificação estava correta), em outras palavras, é a proporção`[Verdadeiros positivos/(Verdadeiros positivos + Falso positivos)]`**Recall (sensibilidade)** nos informa qual a proporção das mensagens que eram spam que foram corretamente classificadas como spam. É a proporção entre os verdadeiros positivos (classificados como spam, que realmente eram spam) sobre todas as palavras que realmente eram spam. Em outras palavras, é a proporção entre`[Verdadeiros positivos/(Verdadeiros positivos + Falso negativos)]`Para problemas de classificação distorcidos em suas distribuições, como no nosso caso, por exemplo, se tivéssemos 100 mensagems de texto e apenas 2 fossem spam e todas as outras não fossem, a "accuracy" por si só não seria uma métrica tão boa. Nós poderiamos classificar 90 mensagems como "não-spam" (incluindo as 2 que eram spam mas que teriam sido classificadas como não-spam e, por tanto, seriam falso negativas.) e 10 mensagems como spam (todas as 10 falso positivas) e ainda assim teriamos uma boa pontuação de accuracy. Para estess casos, precision e recall são muito úteis. Estas duas métricas podem ser combinadas para resgatar o F1 score, que é calculado através da média(harmônica) dos valores de precision e de recall. Este score pode variar entre 0 e 1, sendo 1 o melhor resultado possível para o F1 score (consideramos a média harmônica pois estamos lidando com proporções). Questão 1 - Performance do Naive PredictorSe escolhessemos um modelo que sempre prediz que um indivíduo possui remuneração acima de $50,000, qual seria a accuracy e o F-score considerando este conjunto de dados? Você deverá utilizar o código da célula abaixo e atribuir os seus resultados para as variáveis `'accuracy'` e `'fscore'` que serão usadas posteriormente.Por favor, note que o propósito ao gerar um naive predictor é simplesmente exibir como um modelo sem nenhuma inteligência se comportaria. No mundo real, idealmente o seu modelo de base será o resultado de um modelo anterior ou poderia ser baseado em um paper no qual você se basearia para melhorar. Quando não houver qualquer benchmark de modelo, utilizar um naive predictor será melhor do que uma escolha aleatória.**DICA:** * Quando temos um modelo que sempre prediz '1' (e.x o indivíduo possui remuneração superior à 50k) então nosso modelo não terá Verdadeiros Negativos ou Falso Negativos, pois nós não estaremos afirmando que qualquer dos valores é negativo (ou '0') durante a predição. Com isso, nossa accuracy neste caso se torna o mesmo valor da precision (Verdadeiros positivos/ (Verdadeiros positivos + Falso positivos)) pois cada predição que fizemos com o valor '1' que deveria ter o valor '0' se torna um falso positivo; nosso denominador neste caso é o número total de registros.* Nossa pontuação de Recall(Verdadeiros positivos/(Verdadeiros Positivos + Falsos negativos)) será 1 pois não teremos Falsos negativos.''' TP = np.sum(income) # Contando pois este é o caso "naive". Note que 'income' são os dados 'income_raw' convertidos para valores numéricos durante o passo de pré-processamento de dados. FP = income.count() - TP # Específico para o caso naive TN = 0 # Sem predições negativas para o caso naive FN = 0 # Sem predições negativas para o caso naive ''' TP = income.sum() FP = income.count() - TP TN = 0 FN = 0 # TODO: Calcular accuracy, precision e recall accuracy = (TP + TN) / (TP + TN + FP + FN) recall = TP / (TP + FN) precision = TP / (TP + FP) # TODO: Calcular o F-score utilizando a fórmula acima para o beta = 0.5 e os valores corretos de precision e recall. beta = 0.5 fscore = (1 + beta**2) * (precision * recall) / ((beta**2) * precision + recall) # Exibir os resultados print("Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore))Naive Predictor: [Accuracy score: 0.2478, F-score: 0.2917]Modelos de Aprendizado Supervisionado**Estes são alguns dos modelos de aprendizado supervisionado disponíveis em** [`scikit-learn`](http://scikit-learn.org/stable/supervised_learning.html)- Gaussian Naive Bayes (GaussianNB)- Decision Trees (Árvores de decisão)- Ensemble Methods (Bagging, AdaBoost, Random Forest, Gradient Boosting)- K-Nearest Neighbors (KNeighbors)- Stochastic Gradient Descent Classifier (SGDC)- Support Vector Machines (SVM)- Logistic Regression Questão 2 - Aplicação do ModeloListe três dos modelos de aprendizado supervisionado acima que são apropriados para este problema que você irá testar nos dados do censo. Para cada modelo escolhido- Descreva uma situação do mundo real onde este modelo pode ser utilizado. - Quais são as vantagems da utilização deste modelo; quando ele performa bem?- Quais são as fraquezas do modelo; quando ele performa mal?- O que torna este modelo um bom candidato para o problema, considerando o que você sabe sobre o conjunto de dados?**DICA:**Estruture sua resposta no mesmo formato acima, com 4 partes para cada um dos modelos que você escolher. Por favor, inclua referências em cada uma das respostas. **Resposta:**A princípio, o nosso problema é um caso de classificação binária, em que a variável alvo (*income*) ou possui um valor superior a $50k ou um valor menor ou igual a este. Dentre as variáveis independentes, temos dados contínuos e categóricos. Para resolver este problema, vários algoritmos poderiam ser adotados, mas neste trabalho escolhemos os seguintes:- Regressão Logística- Árvore de Decisão- AdaBoostO índices entre colchetes indicam as fontes e referências bibliográficas utlizadas para obter ou complementar o conhecimento para construção do parágrafo que o acompanha. Todas as referências utilizadas estão no final deste documento.**Regressão Logística**É um algoritmo de classificação linear amplamente empregado em várias áreas, como na saúde predizendo o risco de desenvolver determinada doença a partir de várias características do paciente. [1]Dentre suas vantagens: além de predizer a qual classe pertence, ele pode estimar a probabilidade desse pertencimento; é um algoritmo baseado em funções matemáticas que cria uma linha, plano ou hiperplano de segmentação simples e flexível entre classes, podendo assumir qualquer direção ou orientação do espaço. [1][8]Por outro lado, entre as desvantagens: este algoritmo se aplica apenas a problemas de classificação binários ou dicotômicos; embora o plano de classificação seja simples, sua interpretação e entendimento pode ser mais complexa para aqueles que não tem uma boa base matemática; não se ajusta muito bem quando a relação entre as variáveis dependentes e independentes for complexa, com uma alta não linearidade. [1][8]É um método computacionalmente barato e muito poderoso, considerando que ele se restringe a problemas de classificação binárias, o que é o nosso caso, é um algoritmo que vale muito tentar antes de partir para outros mais complexos.**Árvore de Decisão**Este é outro algoritmo amplamente usado em várias áreas, mas tem um grande espaço conquistado nas áreas e processos de negócio que usam decisões analíticas por dados, principalmente por sua facilidade de compreensão. [2][8]Podemos citar como as vantagens: é um método poderoso e flexível, se enquadrando tanto em problemas de classificação quanto de regressão, atua com variáveis dependentes e independentes categóricas e contínuas, podendo ser utilizado em classificações binárias e não-binárias; como em regressão logística, além da classificação pode também estimar a probabilidade de pertencimento à classe; os limites de decisão criados para segmentar as classes são sempre perpendiculares aos eixos, podendo criar complexas regiões de classificação, o que torna um bom algoritmo para lidar com a não linearidade; como sua classificação não se dá por meio de funções matemáticas, mas por declarações lógicas, é um modelo muito mais simples e intuitivo de entender e explicar, principalmente para pessoas sem sólida fundamentação matemática. [2][4][8]Como desvantagens: por sua possibilidade de criar complexas regiões de classificação, é um método que pode facilmente sobreajustar o modelo, exigindo maior atenção na seleção dos hiperparâmetros; comparado com outros algoritmos, ele tende a produzir modelos com menor acurácia; é instável, significando que pequenas mudanças nos dados pode levar a grandes mudanças na estrutura de otimização do modelo. [2][4][8]É um modelo também computacionalmente barato, flexível e trabalha bem com problemas não lineares, fazendo um contraponto ao primeiro algoritmo escolhido, a regressão logística. Logo, vale muito a pena avaliar a performance deste algoritmo antes de partirmos para outros ainda mais complexos.**AdaBoost**Os chamados *ensemble methods* tem ganhado cada vez mais espaço, principalmente em competições de machine learning como o Kaggle. Dentre eles, o Adaboost é um dos mais famosos e tem sido utilizado em aplicações de visão computacional, de detecção e reconhecimento de objetos. [3][5]Seus pontos fortes: de modo geral, os ensemble methods utilizam vários estimadores fracos para alcançar um modelo forte e os métodos de boosting, o Adaboot é um deles, fazem isso de forma iterativa, penalizando com maior intensidade os dados que na etapa anterior foram classificados erroneamente. Com isso, a tendência é criar modelos com maior acurácia e redução significativa dos erros com o crescimento da quantidade de estimadores; realiza uma boa generalização do modelo, reduzindos os erros de viés e variância; tem a capacidade de realizar classificações não binárias. [3][6][7]E suas fraquezas: naturalmente, como precisa de vários estimadores fracos para compor um modelo forte, seu custo computacional é mais elevado; é um modelo menos intuitivo e intelegível para profissionais fora do universo de data science; é um método sensível a ruídos e outliers. [3][6][7]É um metodo mais complexo e menos intuitivo, contudo tende a apresentar excelente performance com o incremento de estimadores. Com os tratamentos e as transformações dos dados na etapa de preparação, mitigando o efeito dos outliers, avaliamos que esta é uma excelente opção e complementa bem o arcabouço de algoritmos escolhidos para este problema. Implementação - Criando um Pipeline de Treinamento e PrediçãoPara avaliar adequadamente a performance de cada um dos modelos que você escolheu é importante que você crie um pipeline de treinamento e predição que te permite de maneira rápida e eficiente treinar os modelos utilizando vários tamanhos de conjuntos de dados para treinamento, além de performar predições nos dados de teste. Sua implementação aqui será utilizada na próxima seção. No bloco de código abaixo, você precisará implementar o seguinte: - Importar `fbeta_score` e `accuracy_score` de [`sklearn.metrics`](http://scikit-learn.org/stable/modules/classes.htmlsklearn-metrics-metrics). - Adapte o algoritmo para os dados de treinamento e registre o tempo de treinamento. - Realize predições nos dados de teste `X_test`, e também nos 300 primeiros pontos de treinamento `X_train[:300]`. - Registre o tempo total de predição. - Calcule a acurácia tanto para o conjundo de dados de treino quanto para o conjunto de testes. - Calcule o F-score para os dois conjuntos de dados: treino e testes. - Garanta que você configurou o parâmetro `beta`!# TODO: Import two metrics from sklearn - fbeta_score and accuracy_score from sklearn.metrics import accuracy_score, fbeta_score def train_predict(learner, sample_size, X_train, y_train, X_test, y_test): ''' inputs: - learner: the learning algorithm to be trained and predicted on - sample_size: the size of samples (number) to be drawn from training set - X_train: features training set - y_train: income training set - X_test: features testing set - y_test: income testing set ''' results = {} # TODO: Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:]) start = time() # Get start time step = round(len(y_train) / sample_size) # Calcula o passo necessário para selecionar a quantidade de amostras definida por sample_size learner.fit(X_train[::step], y_train[::step]) # Aplica o passo na seleção das amostras para treinamento do modelo end = time() # Get end time # TODO: Calculate the training time results['train_time'] = end - start # TODO: Get the predictions on the test set(X_test), # then get predictions on the first 300 training samples(X_train) using .predict() start = time() # Get start time predictions_test = learner.predict(X_test) predictions_train = learner.predict(X_train[:300]) end = time() # Get end time # TODO: Calculate the total prediction time results['pred_time'] = end - start # TODO: Compute accuracy on the first 300 training samples which is y_train[:300] results['acc_train'] = accuracy_score(predictions_train, y_train[:300]) # TODO: Compute accuracy on test set using accuracy_score() results['acc_test'] = accuracy_score(predictions_test, y_test) # TODO: Compute F-score on the the first 300 training samples using fbeta_score() results['f_train'] = fbeta_score(predictions_train, y_train[:300], beta=0.5) # TODO: Compute F-score on the test set which is y_test results['f_test'] = fbeta_score(predictions_test, y_test, beta=0.5) # Success print("{} trained on {} samples.".format(learner.__class__.__name__, sample_size)) # Return the results return resultsImplementação: Validação inicial do modeloNo código da célula, você precisará implementar o seguinte:- Importar os três modelos de aprendizado supervisionado que você escolheu na seção anterior - Inicializar os três modelos e armazená-los em `'clf_A'`, `'clf_B'`, e `'clf_C'`. - Utilize um `'random_state'` para cada modelo que você utilizar, caso seja fornecido. - **Nota:** Utilize as configurações padrão para cada modelo - você otimizará um modelo específico em uma seção posterior- Calcule o número de registros equivalentes à 1%, 10%, e 100% dos dados de treinamento. - Armazene estes valores em `'samples_1'`, `'samples_10'`, e `'samples_100'` respectivamente.**Nota:** Dependendo do algoritmo de sua escolha, a implementação abaixo pode demorar algum tempo para executar!# TODO: Importe os três modelos de aprendizado supervisionado da sklearn from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import AdaBoostClassifier # TODO: Inicialize os três modelos clf_A = LogisticRegression(random_state=42) clf_B = DecisionTreeClassifier(random_state=42) clf_C = AdaBoostClassifier(random_state=42) # TODO: Calcule o número de amostras para 1%, 10%, e 100% dos dados de treinamento # HINT: samples_100 é todo o conjunto de treinamento e.x.: len(y_train) # HINT: samples_10 é 10% de samples_100 # HINT: samples_1 é 1% de samples_100 samples_100 = len(y_train) samples_10 = round(0.1 * samples_100) samples_1 = round(0.01 * samples_100) # Colete os resultados dos algoritmos de aprendizado results = {} for clf in [clf_A, clf_B, clf_C]: clf_name = clf.__class__.__name__ results[clf_name] = {} for i, samples in enumerate([samples_1, samples_10, samples_100]): results[clf_name][i] = \ train_predict(clf, samples, X_train, y_train, X_test, y_test) # Run metrics visualization for the three supervised learning models chosen vs.evaluate(results, accuracy, fscore)LogisticRegression trained on 362 samples. LogisticRegression trained on 3618 samples. LogisticRegression trained on 36177 samples. DecisionTreeClassifier trained on 362 samples. DecisionTreeClassifier trained on 3618 samples. DecisionTreeClassifier trained on 36177 samples. AdaBoostClassifier trained on 362 samples. AdaBoostClassifier trained on 3618 samples. AdaBoostClassifier trained on 36177 samples.---- Melhorando os resultadosNesta seção final, você irá escolher o melhor entre os três modelos de aprendizado supervisionado para utilizar nos dados dos estudantes. Você irá então realizar uma busca grid para otimização em todo o conjunto de dados de treino (`X_train` e `y_train`) fazendo o tuning de pelo menos um parâmetro para melhorar o F-score anterior do modelo. Questão 3 - Escolhendo o melhor modeloBaseado na validação anterior, em um ou dois parágrafos explique para a *CharityML* qual dos três modelos você acredita ser o mais apropriado para a tarefa de identificar indivíduos com remuneração anual superior à \$50,000. **DICA:** Analise o gráfico do canto inferior esquerdo da célula acima(a visualização criada através do comando `vs.evaluate(results, accuracy, fscore)`) e verifique o F score para o conjunto de testes quando 100% do conjunto de treino é utilizado. Qual modelo possui o maior score? Sua resposta deve abranger os seguintes pontos:* métricas - F score no conjunto de testes quando 100% dos dados de treino são utilizados, * tempo de predição/treinamento * a adequação do algoritmo para este conjunto de dados. **Resposta:**Acreditamos que o algoritmo mais adequado para solução do nosso problema é o **Adaboost**. Utilizando os classificadores com os parâmetros padrões, a Árvore de Decisão tem um bom desempenho com os dados de treinamento, mas não generaliza bem como pode ser observado nos resultados com os dados de teste, indicando um sobreajuste do modelo. Observando apenas os resultados gráficos com os dados de teste, notamos uma acurácia e um f-score aproximado entre os 3 modelos, com um desempenho superior para o Adaboost. Por outro lado, embora tenha um desempenho melhor, ele precisa de mais tempo para treinar o modelo e realizar predições.O nosso caso de uso não é uma aplicação em tempo real, então não é necessário criar um streaming de dados que serão processados e utlizados para tomada de decisão e ação no momento que chegam. Como parte de uma estratégia de campanha de marketing, o nosso modelo tem o papel de identificar potenciais indíviduos com maiores chances de serem impactados por nossa campanha e realizarem doações para nossa ONG. Desta forma, o critério *tempo* pode ser secundarizado ou preterido diante um melhor desempenho de classificação dos indivíduos. Questão 4 - Descrevendo o modelo nos termos de Layman Em um ou dois parágrafos, explique para a *CharityML*, nos termos de layman, como o modelo final escolhido deveria funcionar. Garanta que você está descrevendo as principais vantagens do modelo, tais como o modo de treinar o modelo e como o modelo realiza a predição. Evite a utilização de jargões matemáticos avançados, como por exemplo a descrição de equações. **DICA:**Quando estiver explicando seu modelo, cite as fontes externas utilizadas, caso utilize alguma. **Resposta:** O Adaboost é o modelo que escolhemos para a seleção dos melhores indíviduos, ou seja, aqueles com o maior potencial de responderem às nossas campanhas e converterem em doações. Ele é um modelo muito simples de ser implementado e muito poderoso. Mas como funciona?Diferente de vários outros métodos que criam apenas um modelo de classificação, o Adaboost divide aleatoriamente o nosso conjunto de dados em vários conjuntos menores e a partir de cada conjunto reduzido é criado um modelo "fraco" de classificação. Por fraco, estamos dizendo que é um modelo com baixa precisão, que não acerta tanto quanto gostaríamos. Contudo, combinando de forma iterativa vários desses modelos fracos, é possível criar um modelo forte, com esteróides.Em uma analogia, seria como se pegássemos um grande projeto de um prédio e entregássemos para vários especialistas (arquiteto, paisagista, engenheiro civil, engenheiro eletricista, entre outros) para projetar nossa estrutura. Cada projeto individualmente seria fraco para construir o nosso prédio, mas pegando o melhor de cada é possível alcançarmos o melhor projeto. É isso que o Adaboost faz. Como é preciso treinar e combinar vários modelos em apenas um, normalmente o Adaboost leva mais tempo que outros algoritmos nas tarefas de treinamento e até mesmo de predição, mas ele apresenta bons resultados em termos de precisão da predição. Então, com certeza é um método que vale a pena investirmos. Implementação: Tuning do modeloRefine o modelo escolhido. Utilize uma busca grid (`GridSearchCV`) com pleo menos um parâmetro importante refinado com pelo menos 3 valores diferentes. Você precisará utilizar todo o conjunto de treinamento para isso. Na célula de código abaixo, você precisará implementar o seguinte:- Importar [`sklearn.model_selection.GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) e [`sklearn.metrics.make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html).- Inicializar o classificador escolhido por você e armazená-lo em `clf`. - Configurar um `random_state` se houver um disponível para o mesmo estado que você configurou anteriormente.- Criar um dicionário dos parâmetros que você quer otimizar para o modelo escolhido. - Exemplo: `parâmetro = {'parâmetro' : [lista de valores]}`. - **Nota:** Evite otimizar o parâmetro `max_features` se este parâmetro estiver disponível! - Utilize `make_scorer` para criar um objeto de pontuação `fbeta_score` (com $\beta = 0.5$).- Realize a busca gride no classificador `clf` utilizando o `'scorer'` e armazene-o na variável `grid_obj`. - Adeque o objeto da busca grid aos dados de treino (`X_train`, `y_train`) e armazene em `grid_fit`.**Nota:** Dependendo do algoritmo escolhido e da lista de parâmetros, a implementação a seguir pode levar algum tempo para executar!# TODO: Importar 'GridSearchCV', 'make_scorer', e qualquer biblioteca necessária from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer from sklearn.metrics import fbeta_score # TODO: Inicializar o classificador clf = AdaBoostClassifier(random_state=42) # TODO: Criar a lista de parâmetros que você quer otimizar, utilizando um dicionário, caso necessário. # HINT: parameters = {'parameter_1': [value1, value2], 'parameter_2': [value1, value2]} parameters = {'n_estimators': [50, 100, 250, 500], 'learning_rate': [0.25, 0.5, 1, 1.5, 2]} # TODO: Criar um objeto fbeta_score utilizando make_scorer() scorer = make_scorer(fbeta_score, beta=0.5) # TODO: Realizar uma busca grid no classificador utilizando o 'scorer' como o método de score no GridSearchCV() grid_obj = GridSearchCV(estimator=clf, param_grid=parameters, scoring=scorer) # TODO: Adequar o objeto da busca grid como os dados para treinamento e encontrar os parâmetros ótimos utilizando fit() start = time() grid_fit = grid_obj.fit(X_train, y_train) end = time() train_time = end - start # Recuperar o estimador best_clf = grid_fit.best_estimator_ # Realizar predições utilizando o modelo não otimizado e modelar start = time() predictions = (clf.fit(X_train, y_train)).predict(X_test) best_predictions = best_clf.predict(X_test) end = time() pred_time = end - start # Reportar os scores de antes e de depois print("Train/Optimize and Predict Time\n------") print("Time to train: ", train_time) print("Time to predict: ", pred_time) print("\nUnoptimized model\n------") print("Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions))) print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, predictions, beta = 0.5))) print("\nOptimized Model\n------") print("Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions))) print("Final F-score on the testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5)))Train/Optimize and Predict Time ------ Time to train: 658.7397973537445 Time to predict: 3.942863702774048 Unoptimized model ------ Accuracy score on testing data: 0.8576 F-score on testing data: 0.7246 Optimized Model ------ Final accuracy score on the testing data: 0.8677 Final F-score on the testing data: 0.7452Questão 5 - Validação final do modelo* Qual é a accuracy e o F-score do modelo otimizado utilizando os dados de testes?* Estes scores são melhores ou piores do que o modelo antes da otimização? * Como os resultados do modelo otimizado se comparam aos benchmarks do naive predictor que você encontrou na **Questão 1**?**Nota:** Preencha a tabela abaixo com seus resultados e então responda as questões no campo **Resposta** Resultados:| Metric | Naive Predictor | Unoptimized Model | Optimized Model ||---------------|-----------------|-------------------|-----------------| |Accuracy Score |0.2478 |0.8576 |0.8677|F-score |0.2917 |0.7246 |0.7452 **Resposta:**Observamos que os nossos modelos não otimizados e otimizados apresentam uma performance bem superior ao nosso naive predictor. Observamos também que o Adaboost sem qualquer otimização, utilizando seus hiperparâmetros padrões, já apresenta uma performance considerável, sendo aproximadamente 85,8% de acurácia e 72,5% de F-score. Com a otimização, tivemos um ganho em torno de 1% na acurácia e 2% no F-score, o que pode ser muito significativo, já que a nossa base tem mais de 45 mil pessoas e qualquer percentual pode significar um impacto em 450 pessoas. ---- Importância dos atributosUma tarefa importante quando realizamos aprendizado supervisionado em um conjunto de dados como os dados do censo que estudamos aqui é determinar quais atributos fornecem maior poder de predição. Focando no relacionamento entre alguns poucos atributos mais importantes e na label alvo nós simplificamos muito o nosso entendimento do fenômeno, que é a coisa mais importante a se fazer. No caso deste projeto, isso significa que nós queremos identificar um pequeno número de atributos que possuem maior chance de predizer se um indivíduo possui renda anual superior à \$50,000.Escolha um classificador da scikit-learn (e.x.: adaboost, random forests) que possua o atributo `feature_importance_`, que é uma função que calcula o ranking de importância dos atributos de acordo com o classificador escolhido. Na próxima célula python ajuste este classificador para o conjunto de treinamento e utilize este atributo para determinar os 5 atributos mais importantes do conjunto de dados do censo. Questão 6 - Observação da Relevância dos AtributosQuando **Exploramos os dados**, vimos que existem treze atributos disponíveis para cada registro nos dados do censo. Destes treze atributos, quais os 5 atributos que você acredita que são os mais importantes para predição e em que ordem você os ranquearia? Por quê? **Resposta:**Intuitivamente falando, acreditamos que os cinco atributos abaixo sejam os mais significativos para a determinação do nível de renda (income) dos indívíduos:- **capital-gain e capita-loss**: o montante de ganho e perda de capital, definitivamente, devem ser dois atribuitos importantes para determinação do nível de renda.- **education**: níveis maiores de educação normalmente implica em melhores empregos com melhores condições de salário e benefícios.- **workclass**: a estabilidade financeira também deve outro aspecto importante para indivíduos realizarem doações. Acreditamos que empregos/condições mais estáveis e com maiores direitos/benefícios, traz uma maior segurança e tranquilidade para indíviduos, podendo realizar gastos e investimentos para além da subsistência.- **age**: acreditamos que pessoas mais jovens e que estão iniciando suas carreiras e construções de patrimônios realizem menos doações do que pessoas mais velhas e com maior estabilidade. Implementação - Extraindo a importância do atributoEscolha um algoritmo de aprendizado supervisionado da `sciki-learn` que possui o atributo `feature_importance_` disponível. Este atributo é uma função que ranqueia a importância de cada atributo dos registros do conjunto de dados quando realizamos predições baseadas no algoritmo escolhido.Na célula de código abaixo, você precisará implementar o seguinte: - Importar um modelo de aprendizado supervisionado da sklearn se este for diferente dos três usados anteriormente. - Treinar o modelo supervisionado com todo o conjunto de treinamento. - Extrair a importância dos atributos utilizando `'.feature_importances_'`.# TODO: Importar um modelo de aprendizado supervisionado que tenha 'feature_importances_' from sklearn.ensemble import AdaBoostClassifier # TODO: Treinar o modelo utilizando o conjunto de treinamento com .fit(X_train, y_train) model = AdaBoostClassifier(n_estimators=500, learning_rate=1.5, random_state=42) model.fit(X_train, y_train) # TODO: Extrair a importância dos atributos utilizando .feature_importances_ importances = model.feature_importances_ # Plotar vs.feature_plot(importances, X_train, y_train)Questão 7 - Extraindo importância dos atributosObserve a visualização criada acima que exibe os cinco atributos mais relevantes para predizer se um indivíduo possui remuneração igual ou superior à \$50,000 por ano.* Como estes cinco atributos se comparam com os 5 atributos que você discutiu na **Questão 6**? * Se você estivesse próximo da mesma resposta, como esta visualização confirma o seu raciocínio? * Se você não estava próximo, por que você acha que estes atributos são mais relevantes? **Resposta:**A avaliação dos cinco atributos mais importantes mostra que nossa intuição estava parcialmente certa. De fato, as características *capital-gain* e *capital-loss* são determinantes para estimar a renda sendo, reespectivamente, o primeiro e segunto atributos mais importantes. A idade (*age*) também se mostrou um forte atributo, embora tenha um impacto superior ao esperado, sendo o terceiro atributo mais significativo. Nossa intuição apontava que o nível educacional também seria relevante no processo de classificação e isso se confirma em partes, embora seja importante, esta característica é melhor representada pelo atributo *education-num* ao invés de *education*, que foi a nossa suposição inicial. Por fim, um atributo inicialmente inesperado, sendo o quarto mais relevante, é o *hours-per-week* e faz muito sentido. Naturalmente, a tendência é de que quanto mais horas trabalhadas por dia, semana ou mês, maior será remuneração e consequentemente, mais fácil será atingir o patamar de $50k de renda, o que qualifica o nosso indíviduo com maior propensão para realizar doações. Selecionando atributosComo um modelo performa se nós só utilizamos um subconjunto de todos os atributos disponíveis nos dados? Com menos atributos necessários para treinar, a expectativa é que o treinamento e a predição sejam executados em um tempo muito menor — com o custo da redução nas métricas de performance. A partir da visualização acima, nós vemos que os cinco atributos mais importantes contribuem para mais de 50% da importância de **todos** os atributos presentes nos dados. Isto indica que nós podemos tentar *reduzir os atributos* e simplificar a informação necessária para o modelo aprender. O código abaixo utilizará o mesmo modelo otimizado que você encontrou anteriormente e treinará o modelo com o mesmo conjunto de dados de treinamento, porém apenas com *os cinco atributos mais importantes*# Importar a funcionalidade para clonar um modelo from sklearn.base import clone # Reduzir a quantidade de atributos X_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]] X_test_reduced = X_test[X_test.columns.values[(np.argsort(importances)[::-1])[:5]]] # Treinar o melhor modelo encontrado com a busca grid anterior clf = (clone(best_clf)).fit(X_train_reduced, y_train) # Fazer novas predições reduced_predictions = clf.predict(X_test_reduced) # Reportar os scores do modelo final utilizando as duas versões dos dados. print("Final Model trained on full data\n------") print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, best_predictions))) print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5))) print("\nFinal Model trained on reduced data\n------") print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, reduced_predictions))) print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, reduced_predictions, beta = 0.5)))Final Model trained on full data ------ Accuracy on testing data: 0.8677 F-score on testing data: 0.7452 Final Model trained on reduced data ------ Accuracy on testing data: 0.8421 F-score on testing data: 0.7003Не обращайте внимание, тут я просто ставил эксперементы над распредлением Пирсона.from numpy import random as r from scipy.stats import multivariate_normal as mnorm import numpy as np import matplotlib.pyplot as plt n1 = r.randn(100) n2 = r.randn(100) max_tick = int(max(n1.max(), n2.max())) + 1 min_tick = int(min(n1.min(), n2.min())) - 1 max_tick = max(abs(min_tick), max_tick) ticks = np.linspace(-max_tick, max_tick, 2*max_tick + 1) fig, ax = plt.subplots(figsize=(10, 10)) ax.add_artist(plt.Circle((0.0, 0.0), 2.45, color='blue', alpha=0.3)) ax.scatter(n1, n2, color='r', s=5) ax.set_xticks(ticks) ax.set_yticks(ticks) plt.show()Running hyperparameter optimization - Part 2This notebook goes through part two of the steps and codes for identifying the optimal hyperparameter settings for the Variational Autoencoder framework for integrating multi-omics and clinical data spanning both categorical and continuous variables. The optimal settings are identified based on multiple steps cosidering both reconstruction on the test and training sets as well as the stability/similiarity of the latent space in case of repeated training. Part one focus on the test and training reconstruction accuracies using in MOVE_hyperparameter_optimization_reconstruction.ipynb. From those results the optimal combination are then tested for stability of the latent space in repeated training using this notebook.# Import functions import os, sys import torch import numpy as np from torch.utils import data from torch import nn from torch import optim import torch.nn.functional as F from torch.utils.data import DataLoader from torch.utils.data.dataset import TensorDataset import umap import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') from scipy.stats.stats import pearsonr from sklearn.metrics.pairwise import cosine_similarity import pandas as pd import seaborn as sns import matplotlib import re import random from collections import defaultdict path="/" sys.path.append(path + "src/") import VAE_v2_1Below are the funcitons for reading data and calculations defiend# Functions for reading data def read_cat(file): data = np.load(file) data = data.astype(np.float32) data_input = data.reshape(data.shape[0], -1) return data, data_input def read_con(file): data = np.load(file) data = data.astype(np.float32) data[np.isnan(data)] = 0 consum = data.sum(axis=0) mask_col = consum != 0 data = data[:,mask_col] return data, mask_col def read_header(file, mask=None, start=1): with open(file, "r") as f: h = f.readline().rstrip().split("\t")[start:] if not mask is None: h = np.array(h) h = h[mask] return h # Functions for calculations def cal_cat_recon(cat_shapes, cat_recon, cat_class): cat_true_recon = [] cat_total_recon = [] pos = 0 for s in cat_shapes: n = s[1] cat_class_tmp = cat_class[:,pos:(n + pos)] cat_recon_tmp = cat_recon[:,pos:(n + pos)] missing_cat = cat_recon_tmp[cat_class_tmp == -1] diff_cat = cat_class_tmp - cat_recon_tmp diff_cat[diff_cat != 0] = -1 true_cat = diff_cat[diff_cat == 0] false_cat = diff_cat[diff_cat != 0] cat_true = len(true_cat)/(float(diff_cat.size) - missing_cat.size) cat_true_recon.append(cat_true) diff_cat[diff_cat == 0] = 1 diff_cat[diff_cat != 1] = 0 cat_total_recon.append(np.count_nonzero(diff_cat, 1) / diff_cat.shape[1]) pos += n return cat_total_recon def cal_con_recon(train_loader, con_recon, con_shapes): total_shape = 0 true_recon = [] cos_values = [] all_values = [] for s in con_shapes: cor_con = list() cos_con = list() all_val =list() for n in range(0, con_recon.shape[0]): con_no_missing = train_loader.dataset.con_all[n,total_shape:(s + total_shape - 1)][train_loader.dataset.con_all[n,total_shape:(s + total_shape - 1)] != 0] if len(con_no_missing) <= 1: all_val.append(np.nan) continue con_out_no_missing = con_recon[n,total_shape:(s + total_shape - 1)][train_loader.dataset.con_all[n,total_shape:(s + total_shape - 1)] != 0] cor = pearsonr(con_no_missing, con_out_no_missing)[0] cor_con.append(cor) com = np.vstack([con_no_missing, con_out_no_missing]) cos = cosine_similarity(com)[0,1] cos_con.append(cos) all_val.append(cos) cor_con = np.array(cor_con) cos_con = np.array(cos_con) cos_values.append(cos_con) all_values.append(np.array(all_val)) true_recon.append(len(cos_con[cos_con >= 0.7]) / len(cos_con)) total_shape += s return true_reconThe next part is for reading in the data. This example uses the different datatypes included in the publication of MOVE which consist of three categorical datatypes and seven continuous. NOTE the data is not availble for testing.path = '/' # Read in categorical data cat, cat_input = read_cat(path + "data/baseline_categorical.npy") cat_h = read_header(path + "data/baseline_categorical.tsv") geno, geno_input = read_cat(path + "data/diabetes_genotypes.npy") geno_h = read_header(path + "data/diabetes_genotypes_all.tsv") drug, drug_input = read_cat(path + "data/baseline_drugs.npy") drug_h = read_header(path + "data/baseline_drugs.tsv") # Read in continuous data clin, _mask = read_con(path + "data/baseline_continuous.npy") clin_h = read_header(path + "data/baseline_continuous.tsv", con_mask) diet_wearables, _mask = read_con(path + "data/baseline_diet_wearables_.npy") diet_wearables_h = read_header(path + "data/baseline_diet_wearables.tsv", con_mask) pro, con_mask = read_con(path + "data/baseline_proteomic_antibodies.npy") pro_h = read_header(path + "/data/proteomics_genes_one_line.txt", con_mask) pro_h = [s + "_pro" for s in pro_h] # due to overlap in gene names form transcriptomics data target_mata, con_mask = read_con(path + "data/baseline_target_metabolomics.npy") targm_h = read_header(path + "data/baseline_target_metabolomics.tsv", con_mask) untarget_mata, con_mask = read_con(path + "data/baseline_untarget_metabolomics.npy") untargm_h = read_header(path + "data/baseline_untarget_metabolomics.tsv", con_mask) trans, con_mask = read_con(path + "data/baseline_transcriptomics.npy") tran_h = read_header(path + "data/baseline_transcriptomics.tsv", con_mask, start=0) metagen, con_mask = read_con(path + "data/baseline_metagenomics.npy") meta_h = read_header(path + "data/baseline_metagenomics.tsv", con_mask, start=0) # combined headers con_names = np.concatenate((clin_h, diet_wearables_h, pro_h, targm_h, untargm_h, tran_h, meta_h)) cat_names = np.concatenate((cat_h, geno_h, drug_h))For this part we use all the data contraty to part 1 where it was divided into trainig and test, and investigate how similar the latent space is between the repeated runs. Below we define the selected hyper-parameter settings with equal or close to equal performance based on part 1. For plotting purposes we only test on three different "types" here being size of the hidden layer (nHidden), size of the latent space (nLatents) and the drop-out (drop_outs). The number of hidden lasyers are set to 1 (nl=1). We here repeat the traininng 5 times.# If using GPU set to TRUE cuda = True device = torch.device("cuda" if cuda == True else "cpu") con_list = [con, diet_wearables, pro, target_mata, untarget_mata, trans, metagen] cat_list = [cat, geno] nHiddens = [1000, 2000] nLatents = [50, 100, 200] drop_outs = [0.1, 0.3] repeat = 5 latents = defaultdict(list) embeddings = defaultdict(list) con_recons = defaultdict(list) cat_recons = defaultdict(list) recon_acc = defaultdict(list) los = defaultdict(list) likelihood = defaultdict(list) models = defaultdict() nepochs = 250 nl = 1Below we run the full grid search. Here we also save the UMAP embeddings for the posibility of a visual investigation of the results.for nHidden in nHiddens: for nLatent in nLatents: for do in drop_outs: for r in range(repeat): combi = str([nHidden] * nl) + "+" + str(nLatent) + ", Drop-out:" + str(do) print(combi) mask, train_loader = VAE_v2_1.make_dataloader(cat_list=cat_list, con_list=con_list, batchsize= 10) ncategorical = train_loader.dataset.cat_all.shape[1] ncontinuous = train_loader.dataset.con_all.shape[1] con_shapes = train_loader.dataset.con_shapes cat_shapes = train_loader.dataset.cat_shapes model = VAE_v2_1.VAE(ncategorical=ncategorical, ncontinuous= ncontinuous, con_shapes=con_shapes, cat_shapes=cat_shapes, nhiddens=[nHidden]*nl, nlatent=nLatent, alpha=0.1, beta=0.0001, con_weights=[1,1,1,1,1,1,1], cat_weights=[1,1], dropout=do, cuda=cuda).to(device) loss = list() ce = list() sse = list() KLD = list() kld_w = 0 l = len(kldsteps) r = 20/l update = 1 epochs = range(1, nepochs + 1) for epoch in range(1, nepochs + 1): if epoch in kldsteps: kld_w = 1/20 * update update += r if epoch in batchsteps: train_loader = DataLoader(dataset=train_loader.dataset, batch_size=int(train_loader.batch_size * 1.25), shuffle=True, drop_last=True, num_workers=train_loader.num_workers, pin_memory=train_loader.pin_memory) l, c, s, k = model.enodeing(train_loader, epoch, lrate, kld_w) loss.append(l) ce.append(c) sse.append(s) KLD.append(k) test_loader = DataLoader(dataset=train_loader.dataset, batch_size=1, drop_last=False, shuffle=False, num_workers=1, pin_memory=train_loader.pin_memory) latent, latent_var, cat_recon, cat_class, con_recon, test_loss, test_likelihood = model.latent(test_loader, kld_w) con_recon = np.array(con_recon) con_recon = torch.from_numpy(con_recon) cat_true_recon = cal_cat_recon(cat_shapes, cat_recon, cat_class) true_recon = cal_con_recon(train_loader, con_recon, con_shapes) ### Umap clustering reducer = umap.UMAP() embedding = reducer.fit_transform(latent) # save recon_acc[combi].append(cat_true_recon + true_recon) latents[combi].append(latent) embeddings[combi].append(embedding) con_recons[combi].append(con_recon) cat_recons[combi].append(cat_recon) # Saving the results np.save(path + "hyperparameters/embedding_stab.npy", embeddings) np.save(path + "hyperparameters/latent_stab.npy", latents) np.save(path + "hyperparameters/con_recon_stab.npy", con_recons) np.save(path + "hyperparameters/cat_recon_stab.npy", cat_recons) np.save(path + "hyperparameters/recon_acc_stab.npy", recon_acc) # Run if you want to reload the results embeddings = np.load(path + "wp2.2/stability/embedding_stab.npy", allow_pickle = True).item() latents = np.load(path + "wp2.2/stability/latent_stab.npy", allow_pickle = True).item() con_recons = np.load(path + "wp2.2/stability/con_recon_stab.npy", allow_pickle = True).item() cat_recons = np.load(path + "wp2.2/stability/cat_recon_stab.npy", allow_pickle = True).item() recon_acc = np.load(path + "wp2.2/stability/recon_acc_stab.npy", allow_pickle = True).item()Below is calcualtion and visualisation only focusig on the top 10 closest neigbour for each individual# Top 10 changes top10_changes = defaultdict(list) stability_top10 = defaultdict(list) for nHidden in nHiddens: for nLatent in nLatents: max_pos_values_init = list() old_sum_max = list() name = str([nHidden] * nl) + "+" + str(nLatent) top10_changes[name] = [ [] for i in range(npatient) ] for r in range(repeat): cos_sim = cosine_similarity(latents[name][r]) corr = pd.DataFrame(cos_sim) step = list() for index, row in corr.iterrows(): if r == 0: max_pos = np.asarray(row.argsort()[11:][::-1][1:11]) max_pos_values_init.append(max_pos) #summed_max = np.mean(row[max_pos]) old_sum_max.append(row[max_pos]) else: old_pos = max_pos_values_init[index] old_sum = old_sum_max[index] #summed_max = np.mean(row[old_pos]) top10_changes[name][index].append(np.mean(abs(old_sum - row[old_pos]))) step.append(np.mean(abs(old_sum - row[old_pos]))) if r != 0: stability_top10[name].append(np.mean(step)) # Plot hidden+latent combination differnces sns.set_color_codes("dark") df = pd.DataFrame(stability_top10) df_t = df.T fig = plt.figure(figsize=(18,14)) ax = sns.boxplot(data=df) ax.set_xticklabels(ax.get_xticklabels(),rotation=45, size=16) plt.title('Difference across replicationes in cosine similarity of ten closest neighbours in first iteration', size=20) plt.ylabel("Average change", size=16) plt.xlabel('') plt.savefig(path + "hyperparameters/stability_top10.png") plt.clf()The next part compared based on all of the latent space. Furthermore, it includes code for calculation on cluster stability if the latent space is to be used for clustering (not used by MOVE in the paper here only cosine similarity on latent is included).repeat = 5 rand_index_baseline = list() # First it looks at the for d in range(repeat): if d == 0: kmeans = KMeans(n_clusters=4) kmeans = kmeans.fit(latents[name][0]) true_labels = kmeans.predict(latents[name][0]) else: kmeans = KMeans(n_clusters=4) kmeans = kmeans.fit(latents[name][0]) labels = kmeans.predict(latents[name][0]) ran_index_baseline.append(adjust_rand(true_labels, labels)) rand_avg_baseline = np.mean(ran_index_baseline) total_changes = defaultdict(list) stability_total = defaultdict(list) rand_index = defaultdict(list) nl = 1 for nHidden in nHiddens: for nLatent in nLatents: for do in drop_outs: pos_values_init = list() old_rows = list() name = str([nHidden] * nl) + "+" + str(nLatent) + ", Drop-out:" + str(do) total_changes[name] = [ [] for i in range(npatient) ] for r in range(repeat): cos_sim = cosine_similarity(latents[name][r]) corr = pd.DataFrame(cos_sim) step = list() for index, row in corr.iterrows(): if r == 0: max_pos = np.asarray(row.argsort()[:][::-1][1:]) pos_values_init.append(max_pos) old_rows.append(row[max_pos]) else: old_pos = pos_values_init[index] old_row = old_rows[index] total_changes[name][index].append(np.mean(abs(old_row - row[old_pos]))) step.append(np.mean(abs(old_row - row[old_pos]))) if r != 0: kmeans = KMeans(n_clusters=4) rand_tmp = [] for i in range(0,100): kmeans = kmeans.fit(latents[name][r]) labels = kmeans.predict(latents[name][r]) rand_tmp.append(adjust_rand(true_labels, labels)) rand_index[name].append(np.mean(rand_tmp)) stability_total[name].append(np.mean(step)) else: kmeans = KMeans(n_clusters=4) kmeans = kmeans.fit(latents[name][r]) true_labels = kmeans.predict(latents[name][r]) df = pd.DataFrame(stability_total) # Plot stability fig = plt.figure(figsize=(18,18)) ax = sns.boxplot(data=df, palette = sns.color_palette('colorblind', df.shape[1])) ax.set_xticklabels(ax.get_xticklabels(),rotation=45, size=16, horizontalalignment='right') plt.title('Difference across replicationes in cosine similarity compared to first iteration', size=20) plt.ylabel("Average change", size=16) plt.yticks(fontsize=16) #plt.xlabel('Hyperparameter setting') plt.xlabel('') fig.subplots_adjust(bottom=0.2) plt.savefig(path + "hyperparameters/stability_all.png") plt.clf()![stability_all.png](attachment:stability_all.png)# Plot rand index df = pd.DataFrame(rand_index) fig = plt.figure(figsize=(18,18)) ax = sns.boxplot(data=df, palette = sns.color_palette('colorblind', df.shape[1])) ax.set_xticklabels(ax.get_xticklabels(),rotation=45, size=16, horizontalalignment='right') plt.title('Rand index across replicationes compared to first iteration', size=20) plt.ylabel("Rand index", size=16) plt.yticks(fontsize=16) #plt.xlabel('Hyperparameter setting') plt.xlabel('') fig.subplots_adjust(bottom=0.2) plt.savefig(path + "hyperparameters/rand_index_all.png") plt.clf()EDA with VisualsCreate visualizations to answer the quiz questions below this notebook. Use `winequality_edited.csv`. You should've created this data file in the previous section: *Appending Data (cont.)*.# Load dataset import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('winequality_edited.csv') df.head()Histograms for Various Featuresdf.fixed_acidity.hist(); df.total_sulfur_dioxide.hist(); df.pH.hist(); df.alcohol.hist();Scatterplots of Quality Against Various Featuresdf.plot(x="volatile_acidity", y="quality", kind="scatter"); df.plot(x="residual_sugar", y="quality", kind="scatter"); df.plot(x="pH", y="quality", kind="scatter"); df.plot(x="alcohol", y="quality", kind="scatter");Rainfall%matplotlib inline from datetime import timedelta import pandas as pd from collections import namedtuple from rdii.rain import find_rain_periods import matplotlib.pyplot as plt from pylab import rcParams rcParams['figure.figsize'] = 14, 6Load and clean datadf = pd.read_csv('../testdata/rainfall1.csv.gz', index_col='time', parse_dates=['time']) rainfall = df['rainfall'].fillna(0).resample('1H').sum() print(rainfall.head()) print(rainfall.describe()) rains = find_rain_periods(rainfall) print('Found {} rain events'.format(len(rains))) %timeit find_rain_periods(rainfall) sorted_rains = rains.sort_values('intensity', ascending=False) sorted_rains.head(10) def plot_rain(ds, start_date, end_date): ds2 = ds[start:end] plt.bar(ds2.index, ds2, 0.01, edgecolor='lightblue', color='lightblue') plt.show() index = 7 start = sorted_rains.iloc[index]['start_date'].date() end = sorted_rains.iloc[index]['end_date'].date() + timedelta(days=1) plot_rain(rainfall, start, end)Making custom plots with matplotlibBy [](https://parrt.cs.usfca.edu). If you like visualization in machine learning, check out my stuff at [explained.ai](https://explained.ai).The matplotlib library has a lot of capabilities, but there's a lot of customization that you can do above and beyond the basic plotting functionality. You can even create your own kinds of plots by using the drawing and annotation primitives.import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.patches as patches # for drawing shapes %config InlineBackend.figure_format = 'retina' df_cars = pd.read_csv("data/cars.csv") # Get average miles per gallon for each car with the same number of cylinders avg_mpg = df_cars.groupby('CYL').mean()['MPG'] avg_wgt = df_cars.groupby('CYL').mean()['WGT'] # do the same for average weight # Get average miles per gallon for each car with the same weight avg_mpg_per_wgt = df_cars.groupby('WGT').mean()['MPG'] # Get the unique list of cylinders in numerical order cyl = sorted(df_cars['CYL'].unique()) # Get a list of all mpg values for three specific cylinder sizes cyl4 = df_cars[df_cars['CYL']==4]['MPG'].values cyl6 = df_cars[df_cars['CYL']==6]['MPG'].values cyl8 = df_cars[df_cars['CYL']==8]['MPG'].valuesAnnotating graphs with text and linesOnce you've drawn plot, it's a good idea to go back and annotated to highlight interesting features. Let's get the cars data again and redraw the histogram of car weights, but this time let's annotate it.fig, ax = plt.subplots(figsize=(4,3)) wgt = df_cars['WGT'] n, bins, hpatches = ax.hist(wgt, color='#FEE08F') # save the results of hist ax.set_xlabel("Weight (lbs)") ax.set_ylabel("Count at that weight") ax.set_title("Weight histogram") # iterate through the rectangles associated with each bar for rect in hpatches: rect.set_linewidth(.5) rect.set_edgecolor('grey') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_linewidth(.5) ax.spines['bottom'].set_linewidth(.5) # -------------------------------------------------------------------------------- # New stuff a horizontal line, and annotated arrow, and a wedge beneath the X axis. # -------------------------------------------------------------------------------- mx = wgt.mean() my = np.mean(n) # Add an arrow with text pointing to something ax.annotate('check this out', xy=(2500, 60), xytext=(2800, 80), arrowprops=dict(color='black',arrowstyle='->'), fontsize=11) ax.text(max(wgt), my+1, "mean count", horizontalalignment='right', fontsize=11) # Draw a horizontal dashed line at the mean ax.plot([min(wgt),max(wgt)], [my,my], ':', c='#415BA3', lw=.8) # Draw a wedge underneath the axis tria = [(mx,0),(mx+90,-5),(mx-90,-5)] tria = np.array(tria) wedge = patches.Polygon(tria, closed=True, facecolor='#415BA3') wedge.set_clip_on(False) # absolutely critical to drawing outside the graph area ax.add_patch(wedge) ax.tick_params(axis='x', which='major', pad=10) # make room for the wedge ax.text(mx+90,-5,"mean",fontsize=9) ax.set_ylim(0,90) plt.show()Exercise 1Add annotations to the following plot to show the intersections. You will have to move the legend to the center right as well.fig, ax = plt.subplots(figsize=(4,3)) # make one subplot (ax) on the figure ax.plot(cyl, avg_mpg, c='#4574B4', label="mpg") # Those are 6-digit hexadecimal numbers for red-green-blue ax.plot(cyl, avg_wgt/100, c='#F46C43', label="wgt") # ... add annotations here ... plt.legend(loc='center right') plt.show()Your result might look something like this: Adding shapes to graphsLet's say we want to fill a two-dimensional region with different color shape. To do that, we need to add so-called [Patches](https://matplotlib.org/api/patches_api.html?highlight=patchesmodule-matplotlib.patches) to the drawing area. We need a new import:import matplotlib.patches as patchesThe basic idea is to create a patch and then add it to the drawing area, `ax`. We also have to set the X and Y limits because the library does not figure this out from the patches we add.fig, ax = plt.subplots(figsize=(4,3)) ax.set_xlim(0,50) ax.set_ylim(0,50) rect = patches.Rectangle(xy=(5,20), width=40, height=25, facecolor='#E0F4F7', linewidth=.5, edgecolor="grey") ax.add_patch(rect) rect = patches.Rectangle(xy=(20,10), width=10, height=20, alpha=.75, facecolor='#FEE08F', linewidth=.5, edgecolor="grey") ax.add_patch(rect) ax.add_patch( patches.Wedge(center=(5,5), r=10, theta1=0, theta2=90, facecolor='#73ADD2', linewidth=.5, edgecolor="black") ) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_linewidth(.5) ax.spines['bottom'].set_linewidth(.5) plt.show()Note: the order in which we add the patches is relevant. Drawing the orange after the cyan puts the orange one on top. I have set the alpha channel to be slightly transparent on the orange one. Exercise 2Fill in the following code so that it draws rectangles at random locations, with random width and height, and random color. It might look like this:fig, ax = plt.subplots(figsize=(4,3)) size = 50 ax.set_xlim(0,size) ax.set_ylim(0,size) n = 5 xy = np.random.rand(n,2) * size w = np.random.rand(n) * size/2 h = np.random.rand(n) * size/2 # get mapping of n colors in the coolwarm colormap cmap = plt.get_cmap('coolwarm') colors=cmap(np.linspace(0,1,num=n)) # get n colors # ... Draw random rectangles ... plt.show()Strip plotsBox plots are a common mechanism to display information about the distribution of a collection of numbers. However, the box plot is still showing more or less point statistics. A violin plot tries to show the shape of the distribution by varying the width. I actually prefer something called a strip plot, but it is not a standard plot so we have to do it ourselves. The idea is simply to scatterplot all values but add noise to the X or Y values, depending on the orientation. Let's make a vertical strip plot for three series from the cars data set. If we just plot all of the miles per gallon values for 4, 6, and 8 cylinder cars, we get the following unsatisfying graph. Despite setting the transparency setting, we still don't have a clear idea about where the density lies.fig, ax = plt.subplots(figsize=(4,3)) n4 = len(cyl4) n6 = len(cyl6) n8 = len(cyl8) ax.scatter([4]*n4, cyl4, alpha=.2) ax.scatter([6]*n6, cyl6, alpha=.2) ax.scatter([8]*n8, cyl8, alpha=.2) ax.set_xlabel("Cylinders") ax.set_ylabel("MPG") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_linewidth(.5) ax.spines['bottom'].set_linewidth(.5) plt.show() fig, ax = plt.subplots(figsize=(4,3)) n4 = len(cyl4) n6 = len(cyl6) n8 = len(cyl8) sigma = .05 mu = 0 x_noise4 = np.random.normal(mu, sigma, size=n4) x_noise6 = np.random.normal(mu, sigma, size=n6) x_noise8 = np.random.normal(mu, sigma, size=n8) ax.scatter(4+x_noise4, cyl4, alpha=.2) ax.scatter(6+x_noise6, cyl6, alpha=.2) ax.scatter(8+x_noise8, cyl8, alpha=.2) pad = 4*sigma ax.set_xlim(4-pad,8+pad) ax.set_xlabel("Cylinders") ax.set_ylabel("MPG") ax.set_title("Strip plot of # cylinders vs MPG") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_linewidth(.5) ax.spines['bottom'].set_linewidth(.5) plt.show()Exercise 3Using the same cylinder vs mpg data, create a horizontal strip plot where the number of cylinders is on the vertical axis and the miles per gallon is on the horizontal axis. Line + text drawingsThere are times when we want something that looks a bit more like an "infographic". As an example, let's look at some world happiness scores and see how they change from 2015 to 2016 (data is in the [data directory](https://github.com/parrt/msds593/tree/master/notebooks/data)):df_2015 = pd.read_csv("data/happy-2015.csv") df_2016 = pd.read_csv("data/happy-2016.csv") df_2015.head(2) countries = ['Finland','Canada','Norway'] countries = ['Syria','Togo','Burundi'] scores = dict() for c in countries: a = df_2015.loc[df_2015['Country']==c, "Happiness Score"].iloc[0] b = df_2016.loc[df_2016['Country']==c, "Happiness Score"].iloc[0] scores[c] = (a,b) scoresNow that we've pulled out the data we want for three countries, let's do some plotting with just lines in text. The axes are a bit tricky to get right.fig, ax = plt.subplots(figsize=(3,3)) # Let's use 0 as the left-hand side and 1 as the right-hand side # (below we will set labels to 2015 for 0 and 2016 for 1) ax.set_xlim(0-.1,1+.1) ax.set_ylim(2.7,3.32) # Draw lines and text associated with scores for c in scores: a,b = scores[c] color = '#878787' if c=='Togo': color = '#F46C43' ax.plot([0,1], [a,b], 'o-', lw=2, c=color) ax.text(0-.04, a, f"{a:.1f}", color='#878787', horizontalalignment='right', verticalalignment='center') ax.text(1+.04, b, f"{b:.1f}", color='#878787', horizontalalignment='left', verticalalignment='center') ax.text(0-.20, a, c, color='#878787', horizontalalignment='right', verticalalignment='center') # Make the axes look right ax.set_title("Happiness scores\n2015 - 2016") ax.spines['bottom'].set_bounds(0, 1) ax.set_xticks([0,1]) ax.set_xticklabels(['2015','2016']) ax.set_yticks([]) # Only show the bottom axis ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_linewidth(.5) plt.show()[Project Title]by Problem Description:[Insert formal problem definition]# Display relevant visuals # Display relevant visualsProblem assumptions:* [Item 1]* [Item 2]* ... Similar problems:* [Item 1]* [Item 2]* ... Basic Information* Number of observations: **[NUMBER OF RECORDS]*** Number of features: **[NUMBER OF COLUMNS]*** Data types of features: **[DATA TYPES]*** Target variable: **[INSERT HERE]** *(Classification)* Peek at the Data# Import Libaries import pandas as pd import numpy as np # Import Data #data_location = "/Users/wmemorgan/Google Drive/Computer_Data_Science_Lab/iris_data_modeling/data/02_prepared_data/iris.data" #names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class'] #data = pd.read_csv(data_location, names=names) data.head()Class Distributionprint(data.groupby('class').size())class Iris-setosa 50 Iris-versicolor 50 Iris-virginica 50 dtype: int64Other Observations* [Item 1]* [Item 2]* ...# Import Visualization Libaries import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline f, axes = plt.subplots(2, 2, figsize=(12,8)) #More than one dimension bp1 = sns.boxplot(data=data, x="class", y="sepal-length", ax=axes[0,0]) bp2 = sns.boxplot(data=data, x="class", y="sepal-width", ax=axes[0,1]) bp3 = sns.boxplot(data=data, x="class", y="petal-length", ax=axes[1,0]) bp4 = sns.boxplot(data=data, x="class", y="petal-width", ax=axes[1,1])Model Evaluation and Selection* Selected a mix of simple linear (LR and LDA), nonlinear (KNN, CART, NB and SVM) algorithms. * Used k-fold cross-validation (CV) procedure to evaluate the accuracy score of each algorithm.* Identified k-Nearest Neighbor (k-NN) as the best algorithm to use for modeling this dataset. * **NOTE: Used default parameters for KNNClassifier module**# Test options and evaluation metric seed = 7 scoring = 'accuracy'Training and Test Data# Import library from sklearn.model_selection import train_test_split, KFold, cross_val_score # Assign features and output X = data.drop('class',axis=1) y = data['class'] test_size = 0.2 seed = 7 # Create training and test datasets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state=seed) # Import Algorithm Libraries from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.model_selection import KFold, cross_val_scoreAlgorithm Evaluation# Spot Check Algorithms models = [] models.append(('LR', LogisticRegression())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('KNN', KNeighborsClassifier())) models.append(('CART', DecisionTreeClassifier())) models.append(('NB', GaussianNB())) models.append(('SVM', SVC())) # evaluate each model in turn results = [] names = [] for name, model in models: kfold = KFold(n_splits=10, random_state=seed) cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring) results.append(cv_results) names.append(name) msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()) print(msg) # Compare Algorithms fig = plt.figure() fig.suptitle('Algorithm Comparison') ax = fig.add_subplot(111) plt.boxplot(results) ax.set_xticklabels(names) plt.show()Fit Model to the Training Dataset# Model FittingResults* [Item 1]* [Item 2]* ...* **Success Factors:** * [Item 1] * [Item 2] * ...# Import accuracy metrics modules from sklearn.metrics import accuracy_score, classification_report, confusion_matrixAccuracy Summaryprint('Accuracy Score:') print(accuracy_score(y_test,predictions)) print('Confusion Matrix:') print(confusion_matrix(y_test,predictions)) print('Classification Report:') print(classification_report(y_test,predictions))Accuracy Score: 0.9 Confusion Matrix: [[ 7 0 0] [ 0 11 1] [ 0 2 9]] Classification Report: precision recall f1-score support Iris-setosa 1.00 1.00 1.00 7 Iris-versicolor 0.85 0.92 0.88 12 Iris-virginica 0.90 0.82 0.86 11 avg / total 0.90 0.90 0.90 30Hey Nikhil:I downloaded the folder from Dropbox and unzipped it. Then it create a folder called mcit-hackathon-aug-2020 on GCP.Then I created three folders (data with the cleaned data, keypoints with the keypoints, and train_val for training and validation sets)What I'm doing is create a logic using regex to seperate the file name (e.g 01166A18.jpg) into the index (01166) and the age(18).Then I'll create n+1 classes of ages (from age 0 to age n)And then I'll link the photos to the classes. (This will be tricky)Then create a DataBunchThe rest should be to create ResNet50 learner, then learn. Data Prep!pip install -Uqq fastbook import fastbook fastbook.setup_book() from fastbook import * from fastai.vision.widgets import * !pwd base_dir = "/home/jupyter/mcit-hackathon-aug-2020/" base_path = Path(base_dir) data_dir = base_dir + "data/" data_path = Path(data_dir) data_path.ls()https://github.com/JingchunCheng/All-Age-Faces-Dataset DataBlock creationfnames = get_image_files(data_path) dblock = DataBlock() def label_func(fname): return int(fname.name[-6:-4]) dblock = DataBlock( blocks = (ImageBlock, RegressionBlock), get_items = get_image_files, get_y = label_func, splitter = RandomSplitter(), item_tfms = Resize(224)) dsets = dblock.datasets(data_path) dsets.train[0] len(dsets.train),len(dsets.valid) dls = dblock.dataloaders(data_path) dls.valid.show_batch(max_n=4, nrows=1)Train Model (stage 1)learn = cnn_learner(dls, resnet50, metrics=rmse) learn.model learn.fit_one_cycle(4)Loss is still pretty big.More training to go.learn.fit_one_cycle(4)Since training loss is lower than valid_loss, if we keep training, we would run into overfitting. Let's save the model, unfreeze, find the learning rate and fine tune it.# Save the weights of the network for future reuse learn.save('stage-1') learn.unfreeze() # Unfreeze the layers to test learn.lr_find() # Find the learning rateData Cleaningfns = get_image_files('data') w = ImagesCleaner(('A','B')) w.set_fns(fns) wTraining the model (stage 2) with fine tuninglearn_2 = cnn_learner(dls, resnet50, metrics=rmse) learn_2.load('stage-1');Using the learning rate plot, we found that the steepest learning rate was 2.2908675418875646e-06. So let's picked 2e-6 as the rate.learn_2.fine_tune(4, lr=2e-6)Looks like we could eek out more from this. Going on the right path!Before tuning more, let's save the stage along the way to err on the safe side.learn_2.save('stage-2') learn_2.fine_tune(4, lr=2e-6) learn_2.unfreeze() learn_2.lr_find()We could see that after stage 2, things didn't get any better as it bottoms out at 7 for rmse. Let's call it a day and export this model outlearn_2.save('stage-3')Exporting stage 3learn_2.export()integrated_test_00----Check ray generation for plasma sources. In particular we want to check that emissivity to ray calculations are being performed correctly including solid angle and bundle volume calculations.%matplotlib notebook import numpy as np import xicsrt # 1. config = dict() # 2. config['general'] = {} config['general']['number_of_iter'] = 1 config['general']['number_of_runs'] = 1 # 3. config['sources'] = {} config['sources']['source'] = {} config['sources']['source']['class_name'] = 'XicsrtPlasmaCubic' config['sources']['source']['origin'] = [0.0, 0.0, 0.0] config['sources']['source']['xsize'] = 0.01 config['sources']['source']['ysize'] = 0.01 config['sources']['source']['zsize'] = 0.01 config['sources']['source']['target'] = [0.0, 0.0, 1.0] config['sources']['source']['emissivity'] = 1e12 config['sources']['source']['time_resolution'] = 1 config['sources']['source']['spread'] = np.radians(180) # 4. config['optics'] = {} config['optics']['detector'] = {} config['optics']['detector']['class_name'] = 'XicsrtOpticDetector' config['optics']['detector']['origin'] = [0.0, 0.0, 1.0] config['optics']['detector']['zaxis'] = [0.0, 0.0, -1.0] config['optics']['detector']['xsize'] = 0.1 config['optics']['detector']['ysize'] = 0.1 config['optics']['detector']['check_size'] = False # 6. results = xicsrt.raytrace(config) num_expected = (config['sources']['source']['emissivity'] * config['sources']['source']['xsize'] * config['sources']['source']['ysize'] * config['sources']['source']['zsize']) num_actual = results['total']['meta']['source']['num_out'] print(f'Rays generated at source') print(f' num_expected: {num_expected} num_actual: {num_actual}') np.testing.assert_allclose(num_expected, num_actual, 1) num_exp_detector = num_expected/2 num_act_detector = results['total']['meta']['detector']['num_out'] print(f'Rays on detector plane') print(f' num_expected: {num_exp_detector} num_actual: {num_act_detector}') sigma_5 = np.sqrt(num_exp_detector)*5 np.testing.assert_allclose(num_exp_detector, num_act_detector, sigma_5)Electric Vehicle Price and Sales Tax ExemptionThis notebook is an independent t-test analysis using electric vehicles (EVs) in the state of Washington from 1998 to 2020.By Data Source: Electric Vehicle Data from Department of Revenue Washington State. The data contains information for 62261 electric vehicles for model years 1993 to 2021 in Washington state.According to the Washington Departmetn of Revenue, vehicles that can travel at least 30 miles on battery power alone qualify for a sales tax exemption up to 25,000 dollars of the price of a vehicle.URL: https://data.wa.gov/Transportation/Electric-Vehicle-Population-Data/f6w7-q2d2We're interested in the price of electric vehicles and how that impacts sales tax exemptions. The data contains information on over 62,000 EVs sold in Washington between 1998 to 2020. Because some (base manufactured retail price (MSRP) are missing, this analysis looks only at the years 2008 through 2020. We're interested in whether or not a car is eligible for the sales tax exemption and what the price of the EV is. Specifically, we're asking whether there is any difference in the mean price of cars that qualify and those that don't. This information may be useful to automakers and legislators alike. Automakers may adjust sales and marketed efforts if it turns out that the price of a car does not impact whether it qualifies for a tax exemption. Lawmakers may adjust tax policy on electric vehicles to either increase tax revenue or increase environmental sustainability efforts.Our null hypothesis is that there is no difference in the median price between EVs that qualify and those that don't.Our alternative hypothesis is that there is a significant difference between EVs that qualify and those that don't. The data reveals that both price (MSRP) and eligibility (yes or no) are both not normally distributed based on Q-Q plots. Instead of using a t-test on normally distributed data, we turn to the Mann-Whitney U test which is the nonparametric alternative t-test.import pandas as pd import numpy as np # pip install plotly import plotly.graph_objects as go import plotly.express as px import seaborn as sns import matplotlib.pyplot as plt import statsmodels.api as sm from scipy import stats %matplotlib inline df = pd.read_csv('Electric_Vehicle_Population_Data.csv') df.head() # looking at number of columns and rows and data types for Electric Vehicle Type, CAFV and Base MSRP. df.info() # changing columns names to remove spaces df.columns = df.columns.str.replace(' ','_') df.columns # EV models that do not have a base price by model year # there are 8542 vehicles that have a 0 for price in the Base_MSRP column df.query('Base_MSRP==0')['Model_Year'].value_counts() # which models don't have a price listed df.query('Base_MSRP==0')['Model'].value_counts() # removing 8542 cars without a price. 7926 of them are 2020 model years # consider whether removing this many late model vehicles may bias a ttest. indexNames = df[ df['Base_MSRP'] == 0 ].index df.drop(indexNames , inplace=True) df.info() df['Model_Year'].value_counts() # removing 1 Porsche 918 Spyder as an outlier at $845,000 spyder = df[ df['Base_MSRP'] == 845000 ].index df.drop(spyder , inplace=True) df.describe() # shortening column name based on EV's that are eligible for Washington state tax exemptions # eligible = 1 and not eligible = 0 df['Eligibility'] = df['Clean_Alternative_Fuel_Vehicle_(CAFV)_Eligibility'].replace({'Not eligible due to low battery range': 0,'Clean Alternative Fuel Vehicle Eligible': 1}) df df.describe() # how many of each vehicle type df['Electric_Vehicle_Type'].value_counts() # how many EVs are eligible for the tax break and how many are not # both samples sizes are large at 51,494 and 10,767 cars of each type df['Eligibility'].value_counts() # replacing values for Eligibility # eligible = 1 # not eligible = 0 eligible_yes = df[(df['Eligibility'] == 1)] eligible_yes.info() eligible_no = df[(df['Eligibility'] == 0)] eligible_no.info() # price is not normally distributed sns.distplot(df['Base_MSRP']) plt.show() # price is not normally distributed # skew may be within normal limits but kurtosis is greater than 3 plt.hist(df['Base_MSRP'], bins=40, label = 'Price') plt.legend(loc="upper right") print(stats.describe(df['Base_MSRP'])) plt.show() # creating histgrams of eligible and not eligible EVs to look at distribution # eligible EVs is not normally distributed but skew and kurtosis are within normal limits to do a ttest plt.hist(eligible_yes['Base_MSRP'], bins=40, label = 'Eligible') plt.legend(loc="upper right") print(stats.describe(eligible_yes['Base_MSRP'])) plt.show() # EVs not eligible are not normally distributed # both skew and kurtosis are high and greater than 3 so a ttest may not reveal dependable results plt.hist(eligible_no['Base_MSRP'], bins=40, label = 'Not eligible') plt.legend(loc="upper right") print(stats.describe(eligible_no['Base_MSRP'])) plt.show() # boxplot reveals a lot of outliers in price especially for EVs not eligible # is it a good idea to remove EV costing over $120k? # log transform using y=np.log(df['Base_MSRP']) did not improve normality sns.boxplot(x='Eligibility', y='Base_MSRP', data=df) plt.show() # violin plot reveals a midrange gap in price for both EVs that qualify for tax exemptions and those that don't # EVs that are eligible for tax exemptions have a bigger gap in price. # perhaps we might look at whether standard EVs or luxury EVs qualify for tax exemptions sns.violinplot(x='Eligibility', y='Base_MSRP', data=df) plt.show()C:\Users\Mike\Anaconda3\lib\site-packages\scipy\stats\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result. return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumvalMeans are not great to compare on using visuals# checking eligiblity normality with QQ plots # normality assumption is not valid sm.qqplot(eligible_yes['Base_MSRP'], line='s') plt.title('Eligible EVs') plt.show() # checking not eligiblity normality with QQ plots # normality assumption is not valid sm.qqplot(eligible_no['Base_MSRP'], line='s') plt.title('Not Eligible EVs') plt.show() t, p = stats.ttest_ind(eligible_yes['Base_MSRP'], eligible_no['Base_MSRP']) t, p p< 0.05We need a nonparametric t-test for two independent samples. Wilcoxon Rank Sum Test or Kruskal-Wallis or the Mann-Whitney?M-W is used when measurements are independent, samples have equal variances and > 20 per sample, OR normality of the population of every sample is verified OR assumed. Tests if price is different based on rank instead of mean.Neither the independent variables or price are normally distributed and are positively skewed. A ttest ran on this data revealed a significant difference but results are not dependable. We'll run a Mann-Whitney test on the data because we only have two independent variables with similar but nonparametric distributions. We'll look for differences by medians rather than means.* $H_0$: Median price between eligible and not eligible vehicles are the same.* $H_a$: Median price between eligible and not eligible vehicles are significantly different.# creating a new list of booleans of vehicles by eligibility # required for Mann-Whitneytest using PyNonpar package # scipy package for Mann-Whitney does not return a z value which we may need if sig price_eligible = df['Eligibility'] == 1 price_noneligible = df['Eligibility'] == 0 price_noneligible.head() # creating a new list of price for Mann-Whitney test using PyNonpar package price = df['Base_MSRP'] price.value_counts() # grouping Base_MSRP and Eligiblity for Mann-Whitney test sample1 = price[price_eligible] sample2 = price[price_noneligible] sample1.describe().median() sample2.describe().median() U, p =stats.mannwhitneyu(sample1, sample2) U,p # results (160930993.0, 0.0)Using a Mann-Whitney U test reveals a p-value = or less than 0. We can reject the null hypothesis and accept the alternative hypothesis that there is a significan difference in the median price between vehicles that are eligible for the sales tax exemption in Washington state and vehicles that are not. To determine how different they are, we will get a confidence interal using a bootstrap method to report a more meaningful range of difference.# code to bootstrap a CI from my two samples med_diffs = [] for i in range(10000): eligible = sample1.sample(frac=1.0, replace=True) noneligible = sample2.sample(frac=1.0, replace=True) med_diff = eligible.median() - noneligible.median() med_diffs.append(med_diff) ci_lo = np.percentile(med_diffs, 2.5) ci_hi = np.percentile(med_diffs, 97.5) # eligible is bigger median 2,600 to 2,880 (ci_lo, ci_hi)Because we subtracted noneligible vehicle median price from eligible vehicle price, positive numbers in our CI results indicate that eligible vehicles have a higher median price than noneligible vehicles. We can report with 95% confidence that vehicles that do qualify the Washington state sales tax exemptions have a higher median MSRP anywhere from 2,600 to 2,880 dollars.plt.hist(med_diffs) plt.axvline(ci_lo, c="orange") plt.axvline(ci_hi, c="orange") plt.show()Method 1: polyfit()m_fit, b_fit = np.poly1d(np.polyfit(x,y,1,w=1./y_err)) #weight with uncertainties print(m_fit, b_fit) y_fit = m_fit*x + b_fit f = plt.figure(figsize=(7,7)) plt.errorbar(x,y,yerr=y_err,fmt='o',label='data') plt.plot(x,y_fit,label='fit') plt.xlabel('x') plt.ylabel('y') plt.legend(loc=2,frameon=False)Method 2: scipy + optimize#import optimize from scipy from scipy import optimize #define the function to fit def f_line(x,m,b): return m*x + b #perform the fit params, params_cov = optimize.curve_fit(f_line,x,y,sigma=y_err) m_fit = params[0] b_fit = params[1] print(m_fit,b_fit) f = plt.figure(figsize=(7,7)) plt.errorbar(x,y,yerr=y_err,fmt='o',label='data') plt.plot(x,y_fit,label='fit') plt.xlabel('x') plt.ylabel('y') plt.legend(loc=2,frameon=False)More complicated data#redefine x and y npoints = 50 x = np.linspace(0.,2*np.pi,npoints) #make y a complicated function a = 3.4 b = 2.1 c =0.27 d = -1.3 sig = 0.6 y = a*np.sin(b*x + c) + d + np.random.normal(scale=sig,size=npoints) y_err = np.full(npoints,sig) f = plt.figure(figsize=(7,7)) plt.errorbar(x,y,yerr=y_err,fmt='o') plt.xlabel('x') plt.ylabel('y')Using scipy.optimize.curve_fit()#import optimize from scipy from scipy import optimize #define the function to fit def f_line(x,a,b,c,d): return a*np.sin(b*x + c) + d #perform the fit - p0 values are your best guess for a,b,c,d params, params_cov = optimize.curve_fit(f_line,x,y,sigma=y_err,p0=[1,2.,0.1,-0.1]) a_fit = params[0] b_fit = params[1] c_fit = params[2] d_fit = params[3] print(a_fit,b_fit,c_fit,d_fit) y_fit = a_fit*np.sin(b_fit*x + c_fit) + d_fit f = plt.figure(figsize=(7,7)) plt.errorbar(x,y,yerr=y_err,fmt='o',label='data') plt.plot(x,y_fit,label='fit') plt.xlabel('x') plt.ylabel('y') plt.legend(loc=0,frameon=False)Probability estimation for numerical / ordinal / categorical variables%matplotlib inline import numpy as np import pandas as pd import seaborn import warnings warnings.filterwarnings('ignore')load datasetdf = seaborn.load_dataset('titanic') df.info() RangeIndex: 891 entries, 0 to 890 Data columns (total 15 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 survived 891 non-null int64 1 pclass 891 non-null int64 2 sex 891 non-null object 3 age 714 non-null float64 4 sibsp 891 non-null int64 5 parch 891 non-null int64 6 fare 891 non-null float64 7 embarked 889 non-null object 8 class 891 non-null category 9 who 891 non-null object 10 adult_male 891 non-null bool 11 deck 203 non-null category 12 embark_town 889 non-null object 13 alive 891 non-null object 14 alone 891 non-null bool dtypes: bool(2), category(2), float64(2), int64(4), object(5) memory usage: 80.6+ KBPROBABILITY: for categorical / ordinal variables## Estimate probability of a categorical/ordinal variable def estimate_probability_non_numerical(df:pd.DataFrame, colname:str)->pd.DataFrame: """ Estimate probability values of a categorical/ordinal variable. df -- df who include the variable to be used. colname -- variable name of data to be used. return -- df of categories and their respective probabilities. """ # validate if colname in df assert colname in df.columns.tolist(), f'"{colname}" is required.' # validate if too much categories cats = list(df[colname].dropna().unique()) assert len(cats) < 50, f'"{colname}" has too much categories.' # create dictionary of probability values per categories dfprob = df[[colname]].dropna().groupby(colname)[[colname]].count() / len(df[[colname]].dropna()) # rename dfprob.rename(columns = {colname: 'probability'}, inplace = True) # return return dfprob.reset_index() # for categorical colname = 'embark_town' dfprob = estimate_probability_non_numerical(df, colname) dfprob # for ordinal colname = 'pclass' dfprob = estimate_probability_non_numerical(df, colname) dfprobPROBABILITY: for numerical variables (estimated PDF)> NOTE: The returned PDF is just an approximation by KDE.## Get pdf estimated with KDE for 1D data def estimate_probability_numerical(df:pd.DataFrame, colname:str)->pd.DataFrame: """ Estimate probability values of a numerical variable. df -- df who include the variable to be used. colname -- variable name of data to be used. return -- df of categories and their respective probabilities. """ from scipy import stats # validate if colname in df assert colname in df.columns.tolist(), f'"{colname}" is required.' # get data v = df[colname].dropna().values # get x values x = np.linspace(v.min(),v.max(), v.shape[0]) # get kde kernel kernel = stats.gaussian_kde(v) # store in a df dfprob = pd.DataFrame({colname:x, 'probability':kernel(x)}) # return return dfprob colname = 'fare' dfprob = estimate_probability_numerical(df, colname) dfprobUsing Jupyter Notebooks in an Upper-Division GEOL Class*(, demo for GEOL faculty, June 2020)*This notebook demonstrates some of the ways I used Jupyter Notebooks in **GEOL3820 Fluid Earth** during fall semester 2019.To run this on the CSDMS JupyterHub server, go to this URL and click the link in the README:https://github.com/gregtucker/jupyter-in-geol-classroom Outline- About notebooks - What's a Jupyter Notebook? - How do you install notebook software? - How do you run notebooks remotely?- Summary of uses in GEOL 3820 and GEOL 5775 - Lecturing with formatted math (simultaneous in-person and zoom) - Student papers written as notebooks - Lab and assignment-based exercises in data analysis and plotting - Lab and assignment-based exercises in experimenting with numerical models - Students developing and sharing programming assignments (GEOL 5775)- Some things you and/or your students can do with notebooks - Create formatted text (Markdown), math ($\LaTeX$), and embedded figures - Run Python, R, or Julia code About notebooks Fun factJupyter notebooks---originally IPython notebooks---were invented by [](https://en.wikipedia.org/wiki/Fernando_P%C3%A9rez_(software_developer)) when he was a physics PhD student here at CU Boulder. Installing locallyThe easiest way to install the software is to install the [Anaconda Python Distribution](https://www.anaconda.com/products/individual). Once installed, you can launch a notebook "server" (a browser app) either through the Anaconda app, or by opening a command window and running: `jupyter notebook`. Binder and other JupyterHub serversAn alternative is to run notebooks on a remote system that's running a JupyterHub server. One free resource is [**Binder**](https://mybinder.org/). CU Research Computing offered a prototype JupyterHub server for teaching in 2019-20; last I checked, they didn't have the resources to continue it in 2020-21 but that may change. CSDMS has a cloud-based server that we use for workshops. Classroom applications Writing math and notes in real timeBelow is an excerpt from a notebook I wrote in real time, during a snowstorm, to a mixed audience of students in the room and students connected via zoom: Example Pitot calculation:Suppose airspeed is 200 m/s, what's the pressure differential?$P_t - P_s = \rho v_s^2 / 2$, so:rho = 1.0 # kg/m3 vs = 200 # m/s dP = rho * vs * vs * 0.5 dPArcticHeat Alamo 11010 Deployment:** 2018 **__pyversion__==3.6 __author__==S.Bellimport datetime print("Last run {0}".format(datetime.datetime.now()))Last run 2018-11-29 12:47:26.041038ERDDAP Communication and Dataset Retrieval Connecting to erddap and retrieving dataset information for ALAMO Floatsimport warnings #remove the numpy/pandas/cython warnings warnings.filterwarnings(action='ignore', message="numpy.dtype size changed,") from erddapy import ERDDAP import pandas as pd import numpy as np import cmocean #server_url='http://downdraft.pmel.noaa.gov:8080/erddap' server_url='http://ferret.pmel.noaa.gov/alamo/erddap' e = ERDDAP(server=server_url)Get only ALAMO Float datafiles - can be found by searching for arctic.Searching on ALAMO provides an "alldata" response that would need to be filtered outdf = pd.read_csv(e.get_search_url(response='csv', search_for='arctic_heat_alamo_profiles_11010')) alamos = df['Dataset ID'].values print(alamos) kw = { 'standard_name': 'sea_water_temperature', 'min_lon': -180.0, 'max_lon': -130.0, 'min_lat': 65.0, 'max_lat': 90.0, 'min_time': '2018-01-10T00:00:00Z', 'max_time': '2019-01-10T00:00:00Z', 'cdm_data_type': 'trajectoryprofile' } variables = [e.get_var_by_attr(dataset_id=alamo, standard_name=lambda v: v is not None) for alamo in alamos] common_variables = set(variables[0]).intersection(*variables[1:]) common_variables.update(['CYCLE_NUMBER']) #common_variables.remove('JULD') constraints = { 'longitude>=': kw['min_lon'], 'longitude<=': kw['max_lon'], 'latitude>=': kw['min_lat'], 'latitude<=': kw['max_lat'], 'time>=': kw['min_time'], 'time<=': kw['max_time'], } download_url = e.get_download_url( dataset_id=alamos[0], protocol='tabledap', response='csv', variables=common_variables, ) print(download_url)http://ferret.pmel.noaa.gov/alamo/erddap/tabledap/arctic_heat_alamo_profiles_11010.csv?longitude,JULD_LOCATION,time,CYCLE_NUMBER,PRES,PSAL,latitude,TEMPRetrieving Datafrom requests.exceptions import HTTPError dfs = {} for alamo in alamos: print(alamo) try: e = ERDDAP(server=server_url, protocol='tabledap', response='csv' ) e.constraints=constraints e.variables=common_variables e.dataset_id=alamo except HTTPError: print('Failed to generate url {}'.format(alamo)) continue try: dfs.update({alamo: e.to_pandas( index_col='time', parse_dates=True, skiprows=(1,) # units information can be dropped. )}) except: passarctic_heat_alamo_profiles_11010Alternative Float Lat/Lon retrievals from WHOI NetCDF filesERDDAP system is not updating for whatever reasons, pull netcdf files from WHOI system for processing,#using xarray for data read import xarray as xa xdf = xa.open_mfdataset('/Volumes/WDC_internal/Users/bell/ecoraid/2018/Additional_FieldData/ArcticHeat/ALAMO_FLOATS/netcdf_fromWHOI/11010/*.nc') xdfERDDAP retrieval of Smith and Sandwell Bathymetry (V11.1) for contourIt's easier to just build the url than it is to use erddapy to build it as griddap is not yet officially supported#using xarray for data read import xarray as xa server_url = 'https://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSS111' bathy = xa.open_dataset(server_url) bathy bathy_sub = bathy.sel(latitude=slice(72,70),longitude=slice(-172,-162)) bathy_sub.topo.plot(cmap=cmocean.cm.deep_r)Plotting and VisualizationPlots done with matplotlib + cartopy **and** gmt-python%matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import cartopy.crs as ccrs import cartopy.feature as cfeature from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter import gmt ### specify primary bulk figure parameters fontsize = 20 labelsize = 16 #plotstyle = 'seaborn' max_xticks = 10 plt.style.use('seaborn-ticks') mpl.rcParams['svg.fonttype'] = 'none' mpl.rcParams['ps.fonttype'] = 42 #truetype/type2 fonts instead of type3 mpl.rcParams['pdf.fonttype'] = 42 #truetype/type2 fonts instead of type3 mpl.rcParams['axes.grid'] = False mpl.rcParams['axes.edgecolor'] = 'black' mpl.rcParams['axes.linewidth'] = 1.5 mpl.rcParams['axes.labelcolor'] = 'black' mpl.rcParams['grid.linestyle'] = '--' mpl.rcParams['grid.linestyle'] = '--' mpl.rcParams['xtick.major.size'] = 4 mpl.rcParams['xtick.minor.size'] = 1 mpl.rcParams['xtick.major.width'] = 2 mpl.rcParams['xtick.minor.width'] = 1 mpl.rcParams['ytick.major.size'] = 4 mpl.rcParams['ytick.minor.size'] = 1 mpl.rcParams['ytick.major.width'] = 2 mpl.rcParams['ytick.minor.width'] = 1 mpl.rcParams['ytick.direction'] = 'out' mpl.rcParams['xtick.direction'] = 'out' mpl.rcParams['ytick.color'] = 'black' mpl.rcParams['xtick.color'] = 'black' mpl.rcParams['contour.negative_linestyle'] = 'solid'WHOI's Quicklook Pagesdef show_iframe(src): """Helper function to show HTML returns.""" from IPython.display import HTML iframe = ''.format return HTML(iframe(src=src)) show_iframe('http://argo.whoi.edu/alamo/11010/index.html')Alternative Plotsextent = [-177.5, -162.5, 65, 72.5] fig = gmt.Figure() fig.coast(region=extent, projection='B-170/60/55/60/6i', land='lightgray', water='gray', frame=True,U=True) for alamo, df in dfs.items(): fig.plot(x=df['longitude'], y=df['latitude'], color='blue', style='p0.05') fig.show() def make_map(projection=ccrs.PlateCarree()): fig, ax = plt.subplots(figsize=(16, 8.5), subplot_kw=dict(projection=projection)) if projection == ccrs.PlateCarree(): gl = ax.gridlines(draw_labels=True) gl.xlabels_top = gl.ylabels_right = False gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER return fig, ax projection=ccrs.LambertConformal(central_longitude=-160.0) transformation=ccrs.PlateCarree() land_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m', edgecolor='face', facecolor='1.0') dx = dy = 0.5 extent = [-175, -150, 62, 72.5] fig,ax = make_map(projection=projection) for alamo, df in dfs.items(): ax.plot(df['longitude'], df['latitude'], 'b', label=alamo, transform=transformation) ax.plot(df['longitude'], df['latitude'], '+k', markersize=1, label=alamo, transform=transformation) ax.pcolormesh(bathy_sub.longitude, bathy_sub.latitude, bathy_sub.topo, cmap=cmocean.cm.deep_r, transform=transformation) # ax.contour(bathy_sub.longitude, bathy_sub.latitude, bathy_sub.data, [-50,-40,-30], colors='k', # transform=transformation) leg = ax.legend(bbox_to_anchor=(1.45,1), loc='upper right') ax.add_feature(land_50m) ax.coastlines(resolution='50m') ax.set_extent(extent)Zoom in on last relative positionextent = [-170.75, -167.75, 70.25, 71] fig,ax = make_map(projection=projection) for alamo, df in dfs.items(): ax.plot(df['longitude'], df['latitude'], 'b', label=alamo, transform=transformation) ax.plot(df['longitude'], df['latitude'], 'ok', markersize=5, label=alamo, transform=transformation) ax.pcolormesh(bathy_sub.longitude, bathy_sub.latitude, bathy_sub.topo, cmap=cmocean.cm.deep_r, transform=transformation) CS = ax.contour(bathy_sub.longitude, bathy_sub.latitude, bathy_sub.topo, levels=[-50, -40, -30], colors='k', alpha=.5, transform=transformation) ax.clabel(CS, inline=False, fontsize=10, fmt='%1.1f') #leg = ax.legend(bbox_to_anchor=(1.45,1), loc='upper right') ax.add_feature(land_50m) ax.coastlines(resolution='50m') ax.set_extent(extent) #fig.savefig('map.png',bbox_inches='tight',dpi=300)Zoom in on last Relative Positon - using WHOI raw dataextent = [-170.75, -167.75, 70.25, 71] fig,ax = make_map(projection=projection) ax.plot(xdf['LONGITUDE'], xdf['LATITUDE'], 'c', transform=transformation) ax.plot(xdf['LONGITUDE'], xdf['LATITUDE'], 'ok', markersize=5, transform=transformation) ax.pcolormesh(bathy_sub.longitude, bathy_sub.latitude, bathy_sub.topo, cmap=cmocean.cm.deep_r, transform=transformation) CS = ax.contour(bathy_sub.longitude, bathy_sub.latitude, bathy_sub.topo, levels=[-50, -40, -30], colors='k', alpha=.5, transform=transformation) ax.clabel(CS, inline=False, fontsize=10, fmt='%1.1f') #leg = ax.legend(bbox_to_anchor=(1.45,1), loc='upper right') ax.add_feature(land_50m) ax.coastlines(resolution='50m') ax.set_extent(extent) fig.savefig('map.png',bbox_inches='tight',dpi=300)Depth v Time/Dive Profiles%matplotlib inline import matplotlib.pyplot as plt import matplotlib.dates as mdates import cmocean for dfn, df in dfs.items(): fig, ax = plt.subplots(figsize=(17, 2)) cs = ax.scatter(df.index, df['PRES'], s=15, c=df['TEMP'], marker='o', edgecolor='none', vmin=-2, vmax=10, cmap=cmocean.cm.thermal) ax.invert_yaxis() ax.set_xlim(df.index[0], df.index[-1]) xfmt = mdates.DateFormatter('%d-%b') ax.xaxis.set_major_formatter(xfmt) cbar = fig.colorbar(cs, orientation='vertical', extend='both') cbar.ax.set_ylabel('Temperature ($^\circ$C)') ax.set_ylabel('Depth (m)') ax.set_xlabel(dfn) for dfn, df in dfs.items(): fig, ax = plt.subplots(figsize=(17, 2)) cs = ax.scatter(df.index, df['PRES'], s=15, c=df['PSAL'], marker='o', edgecolor='none', vmin=28, vmax=33,cmap=cmocean.cm.haline) ax.invert_yaxis() ax.set_xlim(df.index[0], df.index[-1]) xfmt = mdates.DateFormatter('%d-%b') ax.xaxis.set_major_formatter(xfmt) cbar = fig.colorbar(cs, orientation='vertical', extend='both') cbar.ax.set_ylabel('Salinity (PSU)') ax.set_ylabel('Depth (m)') ax.set_xlabel(dfn) for dfn, df in dfs.items(): fig, ax = plt.subplots(figsize=(17, 2)) cs = ax.scatter(df.index, df['PRES'], s=15, c=df['CYCLE_NUMBER'], marker='o', edgecolor='none',cmap=cmocean.cm.gray_r) ax.invert_yaxis() ax.set_xlim(df.index[0], df.index[-1]) xfmt = mdates.DateFormatter('%d-%b') ax.xaxis.set_major_formatter(xfmt) cbar = fig.colorbar(cs, orientation='vertical', extend='both') cbar.ax.set_ylabel('Dive Count') ax.set_ylabel('Depth (m)') ax.set_xlabel(dfn) #depth averaged temp - interpolate to 1m bins before averaging colors = ['r','y','k','b','g','c','olive','lightblue','grey'] ccount=0 legend_label=[] fig, ax = plt.subplots(figsize=(17, 2)) for dfn, df in dfs.items(): fg = df.groupby('CYCLE_NUMBER') for ind,fgi in enumerate(fg.groups): IntTemp = np.interp(np.arange(0,50,1),fg.get_group(fgi)['PRES'], fg.get_group(fgi)['TEMP'], left=np.nan, right=np.nan) cs = ax.scatter(fg.get_group(fgi).index[0], np.nanmean(IntTemp), s=15, c=colors[ccount], marker='o', edgecolor='none') xfmt = mdates.DateFormatter('%d-%b') ax.xaxis.set_major_formatter(xfmt) ax.set_ylabel('Depth (m)') ax.set_xlabel('Time (UTC)') ccount+=1 legend_label.append(dfn) ax.legend(legend_label) leg = ax.get_legend() for i in range(0,len(legend_label),1): leg.legendHandles[i].set_color(colors[i])Create software releases> API for auto-generated tagged releases, and release notes (from GitHub issues)#export from fastcore.imports import * from fastcore.utils import * from fastcore.foundation import * from fastcore.script import * from ghapi.core import * from datetime import datetime from configparser import ConfigParser import shutil,subprocess #hide from nbdev.showdoc import show_doc #export GH_HOST = "https://api.github.com" #export def find_config(cfg_name="settings.ini"): cfg_path = Path().absolute() while cfg_path != cfg_path.parent and not (cfg_path/cfg_name).exists(): cfg_path = cfg_path.parent return Config(cfg_path, cfg_name) #export def _issue_txt(issue): res = '- {} ([#{}]({}))'.format(issue.title.strip(), issue.number, issue.html_url) if hasattr(issue, 'pull_request'): res += ', thanks to [@{}]({})'.format(issue.user.login, issue.user.html_url) res += '\n' if not issue.body: return res return res + f" - {issue.body.strip()}\n" def _issues_txt(iss, label): if not iss: return '' res = f"### {label}\n\n" return res + '\n'.join(map(_issue_txt, iss)) def _load_json(cfg, k): try: return json.loads(cfg[k]) except json.JSONDecodeError as e: raise Exception(f"Key: `{k}` in .ini file is not a valid JSON string: {e}")FastRelease -#export class FastRelease: def __init__(self, owner=None, repo=None, token=None, **groups): "Create CHANGELOG.md from GitHub issues" self.cfg = find_config() self.changefile = self.cfg.config_path/'CHANGELOG.md' if not groups: default_groups=dict(breaking="Breaking Changes", enhancement="New Features", bug="Bugs Squashed") groups=_load_json(self.cfg, 'label_groups') if 'label_groups' in self.cfg else default_groups os.chdir(self.cfg.config_path) owner,repo = owner or self.cfg.user, repo or self.cfg.lib_name token = ifnone(token, os.getenv('FASTRELEASE_TOKEN',None)) if not token and Path('token').exists(): token = Path('token').read_text().strip() if not token: raise Exception('Failed to find token') self.gh = GhApi(owner, repo, token) self.groups = groups def _issues(self, label): return self.gh.issues.list_for_repo(state='closed', sort='created', filter='all', since=self.commit_date, labels=label) def _issue_groups(self): return parallel(self._issues, self.groups.keys(), progress=False) def changelog(self, debug=False): "Create the CHANGELOG.md file, or return the proposed text if `debug` is `True`" if not self.changefile.exists(): self.changefile.write_text("# Release notes\n\n\n") marker = '\n' try: self.commit_date = self.gh.repos.get_latest_release().published_at except HTTP404NotFoundError: self.commit_date = '2000-01-01T00:00:004Z' res = f"\n## {self.cfg.version}\n" issues = self._issue_groups() res += '\n'.join(_issues_txt(*o) for o in zip(issues, self.groups.values())) if debug: return res res = self.changefile.read_text().replace(marker, marker+res+"\n") shutil.copy(self.changefile, self.changefile.with_suffix(".bak")) self.changefile.write_text(res) run(f'git add {self.changefile}') def release(self): "Tag and create a release in GitHub for the current version" ver = self.cfg.version notes = self.latest_notes() self.gh.create_release(ver, body=notes) return ver def latest_notes(self): "Latest CHANGELOG entry" if not self.changefile.exists(): return '' its = re.split(r'^## ', self.changefile.read_text(), flags=re.MULTILINE) if not len(its)>0: return '' return '\n'.join(its[1].splitlines()[1:]).strip()To create a markdown changelog, first create a `FastRelease` object, optionally passing a mapping from GitHub labels to markdown titles. Put your github token in a file named `token` at the root of your repo. `FastRelease` attempts to fetch values for arguments from the following locations if not supplied:- **owner:** fetched from the field `user` in `settings.ini`. This is the owner name of the repository on GitHub. For example for the repo `fastai/fastcore` the owner would be `fastai`.- **repo:** fetched from the field `lib_name` in `settings.ini`. This is the name of the repository on GitHub. For example for the repo `fastai/fastcore` the owner would be `fastcore`.- **token:** fetched from a file named `token` at the root of your repo. Creating a token is discussed in [the setup](https://fastrelease.fast.ai/Set-up) section.- **groups:** (optional) fetched from the field `label_groups` in `settings.ini`, which is a JSON string. This is a mapping from label names to titles in your release notes. If not specified, this defaults to:```python{"breaking": "Breaking Changes", "enhancement":"New Features", "bug":"Bugs Squashed"}```#slow rel = FastRelease() show_doc(FastRelease.changelog)All relevant pull requests and issues are fetched from the GitHub API, and are categorized according to a user-supplied mapping from labels to markdown headings.# print(rel.changelog(debug=True)) show_doc(FastRelease.release)This uses the version information from your `settings.ini`. CLI functions#export @call_parse def fastrelease_changelog( debug:store_true=False # Print info to be added to CHANGELOG, instead of updating file ): "Create a CHANGELOG.md file from closed and labeled GitHub issues" FastRelease().changelog(debug=debug) #export @call_parse def fastrelease_release( token:str=None # Optional GitHub token (otherwise `token` file is used) ): "Tag and create a release in GitHub for the current version" ver = FastRelease(token=token).release() print(f"Released {ver}") #export @call_parse def fastrelease( debug:store_true=False, # Print info to be added to CHANGELOG, instead of updating file token:str=None # Optional GitHub token (otherwise `token` file is used) ): "Calls `fastrelease_changelog`, lets you edit the result, then pushes to git and calls `fastrelease_release`" cfg = find_config() FastRelease().changelog() if debug: return subprocess.run([os.environ.get('EDITOR','nano'), cfg.config_path/'CHANGELOG.md']) if not input("Make release now? (y/n) ").lower().startswith('y'): sys.exit(1) run('git commit -am release') run('git push') ver = FastRelease(token=token).release() print(f"Released {ver}") #export def bump_version(version, part=2): version = version.split('.') version[part] = str(int(version[part]) + 1) for i in range(part+1, 3): version[i] = '0' return '.'.join(version) test_eq(bump_version('0.1.1' ), '0.1.2') test_eq(bump_version('0.1.1', 1), '0.2.0') #export @call_parse def fastrelease_bump_version( part:int=2 # Part of version to bump ): "Increment version in `settings.py` by one" cfg = find_config() print(f'Old version: {cfg.version}') cfg['version'] = bump_version(cfg.version, part) cfg.save() print(f'New version: {cfg.version}')Export-#hide from nbdev.export import notebook2script notebook2script()Converted 00_core.ipynb. Converted 01_conda.ipynb. Converted index.ipynb.Gaussian Filtering using cv2.GaussianBlurimport cv2 import numpy as np from skimage import io, img_as_float import matplotlib.pyplot as plt #image_as_float is used here to keep the values of the image in floating point value to avoid round-off errors img_gaussian_noise=img_as_float(io.imread('images/Gaussian_noise.jpg',as_gray=True)) #image containing gaussian noise img_saltandpepper_noise=img_as_float(io.imread('images/Salt&pepper_noise.png',as_gray=True)) #image containing salt and pepper noise img_speckle_noise=img_as_float(io.imread('images/Speckle_noise.jpg',as_gray=True)) #image containing speckle noise img_poission_noise=img_as_float(io.imread('images/Poission_noise.png',as_gray=True)) #image containing poission noise img=img_gaussian_noise #use as per preference : here gaussian noise image is used using_cv2 = cv2.GaussianBlur(img, (5,5), 0, borderType=cv2.BORDER_CONSTANT) #BORDER_CONSTANT - Pad the image with a constant value (i.e. black or 0) #BORDER_REPLICATE: The row or column at the very edge of the original is replicated to the extra border. cv2.imshow("Original", img) cv2.imshow("Using cv2 gaussian", using_cv2) cv2.waitKey(0) #display the window infinitely until any keypress cv2.destroyAllWindows() #simply destroys all the windows we created or popped-up # plt.subplot(1, 2, 1) # plt.title('Original') # plt.imshow(img,cmap='gray') # plt.subplot(1, 2, 2) # plt.title('Using cv2 gaussian') # plt.imshow(using_cv2,cmap='gray')Original Image > Filtered Image Gaussian Filtering using cv2.getGaussianKernelimport cv2 import numpy as np from skimage import io, img_as_float #image_as_float is used here to keep the values of the image in floating point value to avoid round-off errors img_gaussian_noise=img_as_float(io.imread('images/Gaussian_noise.jpg',as_gray=True)) #image containing gaussian noise img_saltandpepper_noise=img_as_float(io.imread('images/Salt&pepper_noise.png',as_gray=True)) #image containing salt and pepper noise img_speckle_noise=img_as_float(io.imread('images/Speckle_noise.jpg',as_gray=True)) #image containing speckle noise img_poission_noise=img_as_float(io.imread('images/Poission_noise.png',as_gray=True)) #image containing poission noise img=img_gaussian_noise #use as per preference : here gaussian noise image is used a = cv2.getGaussianKernel(5,1) # Creates a 1-D Gaussian kernel using_cv2 = cv2.sepFilter2D(img,-1,a,a, borderType=cv2.BORDER_CONSTANT) # Apply the above Gaussian kernel. Here, I have used the same kernel for both X and Y #BORDER_CONSTANT - Pad the image with a constant value (i.e. black or 0) #BORDER_REPLICATE: The row or column at the very edge of the original is replicated to the extra border. cv2.imshow("Original", img) cv2.imshow("Using cv2 gaussian", using_cv2) cv2.waitKey(0) #display the window infinitely until any keypress cv2.destroyAllWindows() #simply destroys all the windows we created or popped-upOriginal Image > Filtered Image Gaussian Filtering using Gaussian Kernel functionimport cv2 import numpy as np import numpy from skimage import io, img_as_float #image_as_float is used here to keep the values of the image in floating point value to avoid round-off errors def gaussian_kernel(size, size_y=None): size = int(size) if not size_y: size_y = size else: size_y = int(size_y) x, y = numpy.mgrid[-size:size+1, -size_y:size_y+1] g = numpy.exp(-(x**2/float(size)+y**2/float(size_y))) return g / g.sum() img_gaussian_noise=img_as_float(io.imread('images/Gaussian_noise.jpg',as_gray=True)) #image containing gaussian noise img_saltandpepper_noise=img_as_float(io.imread('images/Salt&pepper_noise.png',as_gray=True)) #image containing salt and pepper noise img_speckle_noise=img_as_float(io.imread('images/Speckle_noise.jpg',as_gray=True)) #image containing speckle noise img_poission_noise=img_as_float(io.imread('images/Poission_noise.png',as_gray=True)) #image containing poission noise img=img_gaussian_noise #use as per preference : here gaussian noise image is used gaussian_kernel_array = gaussian_kernel(1) #calling the gaussian_kernel function and passing "1" which is the kernel size conv_using_cv2 = cv2.filter2D(img, -1, gaussian_kernel_array, borderType=cv2.BORDER_CONSTANT) #BORDER_CONSTANT - Pad the image with a constant value (i.e. black or 0) #BORDER_REPLICATE: The row or column at the very edge of the original is replicated to the extra border. cv2.imshow("Original", img) cv2.imshow("Using cv2 gaussian", conv_using_cv2) cv2.waitKey(0) #display the window infinitely until any keypress cv2.destroyAllWindows() #simply destroys all the windows we created or popped-upA matrix multiplicationThis example demonstrates how to perform general matrix multiplication using Nengo. The matrix can change during the computation, which makes it distinct from doing static matrix multiplication with neural connection weights (as done in all neural networks).Note that the order of operands in matrix multiplication matters. We will be computing $A \cdot B$ which is equivalent to $(B \cdot A)^{\top}$.import numpy as np import matplotlib.pyplot as plt %matplotlib inline import nengo %load_ext nengo.ipynb N = 1000 Amat = np.asarray([[0.5, -0.5], [-0.2, 0.3]]) Bmat = np.asarray([[0.60, -1.0], [0.7, 0.1]]) # keep the values within the range radius = 1 mindmodel = nengo.Network(label="Matrix Multiplication", seed=123) with mindmodel: # Make two ensemblearrays to store the output A = nengo.networks.EnsembleArray(N, Amat.size, radius=radius) B = nengo.networks.EnsembleArray(N, Bmat.size, radius=radius) # connect inputs to them so we can set their value inputA = nengo.Node(Amat.ravel()) inputB = nengo.Node(Bmat.ravel()) nengo.Connection(inputA, A.input) nengo.Connection(inputB, B.input) A_probe = nengo.Probe(A.output, sample_every=0.01, synapse=0.01) B_probe = nengo.Probe(B.output, sample_every=0.01, synapse=0.01) with nengo.Simulator(mindmodel) as sim: sim.run(1) plt.figure() plt.subplot(1, 2, 1) plt.title('A') plt.plot(sim.trange(dt=0.01), sim.data[A_probe]) plt.subplot(1, 2, 2) plt.title('B') plt.plot(sim.trange(dt=0.01), sim.data[B_probe]) from nengo.dists import Choice with mindmodel: # The C matrix is composed of populations that each contain # one element of A and one element of B. # These elements will be multiplied together in the next step. # The appropriate encoders make the multiplication more accurate # Check the "multiplication" example to see how multiplication # can be implemented in neurons. c_size = Amat.size * Bmat.shape[1] C = nengo.networks.Product(N, dimensions=c_size) # Determine the transformation matrices to get the correct pairwise # products computed. This looks a bit like black magic but if # you manually try multiplying two matrices together, you can see # the underlying pattern. Basically, we need to build up D1*D2*D3 # pairs of numbers in C to compute the product of. If i,j,k are the # indexes into the D1*D2*D3 products, we want to compute the product # of element (i,j) in A with the element (j,k) in B. The index in # A of (i,j) is j+i*D2 and the index in B of (j,k) is k+j*D3. # The index in C is j+k*D2+i*D2*D3. transformA = np.zeros((c_size, Amat.size)) transformB = np.zeros((c_size, Bmat.size)) for i in range(Amat.shape[0]): for j in range(Amat.shape[1]): for k in range(Bmat.shape[1]): c_index = (j + k * Amat.shape[1] + i * Bmat.size) transformA[c_index][j + i * Amat.shape[1]] = 1 transformB[c_index][k + j * Bmat.shape[1]] = 1 print("A->C") print(transformA) print("B->C") print(transformB) with mindmodel: nengo.Connection(A.output, C.A, transform=transformA) nengo.Connection(B.output, C.B, transform=transformB) C_probe = nengo.Probe(C.output, sample_every=0.01, synapse=0.01) # How's C doing with nengo.Simulator(mindmodel) as sim: sim.run(1) plt.figure() plt.plot(sim.trange(dt=0.01), sim.data[C_probe]) plt.title('C'); with mindmodel: # Now do the appropriate summing D = nengo.networks.EnsembleArray( N, n_ensembles=Amat.shape[0] * Bmat.shape[1], radius=radius) # The mapping for this transformation is much easier, since we want to # combine D2 pairs of elements (we sum D2 products together) transformC = np.zeros((D.dimensions, c_size)) for i in range(c_size): transformC[i // Bmat.shape[0]][i] = 1 print("C->D") print(transformC) with mindmodel: nengo.Connection(C.output, D.input, transform=transformC) D_probe = nengo.Probe(D.output, sample_every=0.01, synapse=0.01) with nengo.Simulator(mindmodel) as sim: sim.run(1) plt.figure() plt.plot(sim.trange(dt=0.01), sim.data[D_probe]) for d in np.dot(Amat, Bmat).flatten(): plt.axhline(d, color='k') plt.title("D");Widget Javascript not detected. It may not be installed or enabled properly.grid search for the best Gaussian width#med = util.meddistance(tr.stack_xy()) #list_gwidth2 = (med*(2.0**np.linspace(-5, 5, 11)))**2 #tst.GammaMMDKGaussTest.grid_search_gwidth2(tr, list_gwidth2, alpha)Brief IntroWith no doubt, Convolutional Neural Networks (CNN) has been the most successful model for computer vision tasks. Convolutional operation has similar mechanism to the way that human eyes work in visual perception. When humans explore the visual world, the eyes behave in a pattern of alternative fixations and saccades. The saccadic eye movements bring the visual target to the fovea abruptly (about 20ms), and the target information is then processed during eye fixations when the eyes stay relatively stable (e.g 200ms). We are usually un-aware of the eye movements, as they are programed and executed automatically by cognitive brain process. Our brain then aggregate all these local information to a global decision, based on previous knowledge/experience. The visual field is not explored as a whole. Only a selective set of local positions are viewed, and that turns out to be enough to serve the perception needs in our daily lives (It means images are extremely redundant to serve the recognition/classification popurse. Duplicated and irrelavant information should be effectively discarded to gain efficiency, e.g through weighting and local operator (local operators also can be considered as weighting by penalizing weights of the positions outside the receptive field to 0). Images are too rich and also too costy.).From this perspective, CNN is very much bio-inspired methodology: local-to-global, like divide-and-conquer (e.g to sort a list, you can sort the sublists (local) then merge to have the global solution). It acts like information selector and aggragator, grab the needed and throw away the rest. OK, too much talking, stop brain storming and code it. Let code say## load libs %matplotlib inline import time import warnings warnings.filterwarnings('ignore') import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_mldata from sklearn.metrics import confusion_matrix, classification_report, accuracy_score from sklearn.preprocessing import OneHotEncoderLoad MNISTmnist = fetch_mldata('mnist original', data_home = 'datasets/') X, y = mnist['data'], mnist['target'] X.shape, y.shape ## shape checkC:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\deprecation.py:77: DeprecationWarning: Function fetch_mldata is deprecated; fetch_mldata was deprecated in version 0.20 and will be removed in version 0.22 warnings.warn(msg, category=DeprecationWarning) C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\deprecation.py:77: DeprecationWarning: Function mldata_filename is deprecated; mldata_filename was deprecated in version 0.20 and will be removed in version 0.22 warnings.warn(msg, category=DeprecationWarning)Preprocess MNISTX = X.T X = X / 255.0 Y = OneHotEncoder().fit_transform(y.reshape(-1,1).astype('int32')).toarray().T X.shape, Y.shapeMake Train/Test Splitsm = 60000 X_train, X_test = X[:,:m].reshape(1,28,28,-1), X[:,m:].reshape(1,28,28,-1) Y_train, Y_test = Y[:,:m], Y[:,m:] X_train.shape, Y_train.shape, X_test.shape, Y_test.shapeShuffle Train setnp.random.seed(54321) shuffle = np.random.permutation(m) X_train, Y_train = X_train[:,:,:,shuffle], Y_train[:,shuffle] X_train.shape, Y_train.shapeVisual checkidx = 134 plt.imshow(X_train[:,:,:,idx].squeeze(), cmap = 'binary_r') plt.title(np.argmax(Y_train[:,idx])) plt.axis('off') plt.show()Define network## input layer input_depth = 1 input_height = 28 input_width = 28 ## convolution layer conv_depth = 2 conv_height = 3 conv_width = 3 ## trainable parameters connecting input & convolution layers W1 = np.random.randn(conv_depth, input_depth, conv_height, conv_width) b1 = np.zeros((conv_depth, 1)) ## densely connected (fc) layer fc_dims = 32 flatten_dims = conv_depth * (input_height - conv_height + 1) * (input_width - conv_width + 1) ## trainable parameters connecting convolution & dense layers W2 = np.random.randn(fc_dims, flatten_dims) b2 = np.zeros((fc_dims, 1)) ## output layer output_dims = 10 ## trainable parameters connecting dense & output layers W3 = np.random.randn(output_dims, fc_dims) b3 = np.zeros((output_dims, 1))Training CNN## prepare inputs Input = X_train.copy() Target = Y_train.copy() Input.shape, Target.shape ## initialize convolution output conv_output_height = input_height - conv_height + 1 conv_output_width = input_width - conv_width + 1 conv_output = np.zeros((conv_depth, conv_output_height, conv_output_width, Input.shape[-1])) for epoch in range(20): #------------------------------------------------------------------FORWARD BLOCK ## feed forward: convolution operation for f in range(conv_depth): for r in range(conv_output_height): for c in range(conv_output_width): current_patch = Input[:, r : r + conv_height, c : c + conv_width] current_filter = np.expand_dims(W1[f,:,:,:], axis = 3) ## to match shape for broadcasting conv_output[f, r, c] = (current_patch * current_filter + b1[f]).reshape(-1, Input.shape[-1]).sum(axis = 0) ## reshape 2X faster # conv_output[f, r, c] += (current_patch * current_filter + b1[f]).sum(axis = 0).sum(axis = 0).sum(axis = 0) ## feed forward: flatten the convolution output conv_output_flatten = conv_output.reshape(-1, Input.shape[-1]) A1 = 1 / (1 + np.exp(-conv_output_flatten)) ## sigmoid ## feed forward: affine operation Z2 = W2 @ A1 + b2 A2 = 1/(1 + np.exp(-Z2)) ## geed forward: affine + softmax operation Z3 = W3 @ A2 + b3 Z3 = Z3 - np.max(Z3, axis = 0) A3 = np.exp(Z3)/np.exp(Z3).sum(axis = 0) #------------------------------------------------------------------BACKWARD BLOCK ## backpropagation: softmax layer dZ3 = A3 - Y_train dW3 = dZ3 @ A2.T / Input.shape[-1] db3 = dZ3.mean(axis = 1, keepdims = True) ## backpropagation: dense layer dA2 = W3.T @ dZ3 dZ2 = dA2 * A2 * (1 - A2) dW2 = dZ2 @ A1.T / Input.shape[-1] db2 = dZ2.mean(axis = 1, keepdims = True) ## backpropagation: convolution layer dA1 = W2.T @ dZ2 d_conv_flatten = dA1 * A1 * (1 - A1) d_conv_matrix = d_conv_flatten.reshape(conv_output.shape) ## backpropagation: convolution layer --> weight dW1 = np.zeros(W1.shape) for in_c in range(Input.shape[0]): for out_c in range(conv_output.shape[0]): for r in range(conv_height): for c in range(conv_width): conv_input_patch = Input[in_c, r : r + conv_output_height, c : c + conv_output_width, :] ## conv input conv_output_vals = d_conv_matrix[out_c] ## conv results dW1[out_c, in_c, r, c] = np.sum(conv_input_patch * conv_output_vals)/Input.shape[-1] ## backpropagation: convolution layer --> bias db1 = d_conv_matrix.sum(axis = 1).sum(axis = 1).mean(axis = 1, keepdims = True) # equivalent # db1 = np.zeros((b1.shape)) # for out_c in range(d_conv_matrix.shape[0]): # db1[out_c] += d_conv_matrix[out_c].sum()/Input.shape[-1] ## backpropagation: convolution layer --> Input dInput = np.zeros_like(Input) for in_c in range(Input.shape[0]): for out_c in range(conv_output.shape[0]): current_filter = np.expand_dims(W1[out_c, in_c], axis = 2) for r in range(conv_output_height): for c in range(conv_output_width): d_conv_val = d_conv_matrix[out_c, in_c, r, c] dInput[in_c, r : r+conv_height, c : c + conv_width, :] += d_conv_val * current_filter #------------------------------------------------------------------ UPDATE PARAMETERS ## update model lr = 1 W3 -= dW3 * lr W2 -= dW2 * lr W1 -= dW1 * lr b3 -= db3 * lr b2 -= db2 * lr b1 -= db1 * lr ## compute loss Loss = -np.mean(Y_train * np.log(A3), axis = 1) print('epoch:', epoch, ', loss:', Loss.sum())epoch: 0 , loss: [1.28140686 0.20822068 0.2338827 0.46949416 0.60362163 0.17409459 0.35062963 1.02211311 1.32937071 0.04804767] epoch: 1 , loss: [0.38369461 0.06354422 0.77907208 0.20330568 0.38941393 0.18225178 0.46335044 0.22145518 0.68721877 1.18083523] epoch: 2 , loss: [0.34118477 0.98553669 0.58973446 0.09616901 0.11638067 0.16605581 0.25018618 0.26129972 0.40057913 0.76627435] epoch: 3 , loss: [0.13881807 0.66915701 0.38335981 0.55071828 0.3880977 0.23016209 0.24817241 0.08562567 0.22102089 0.52127314] epoch: 4 , loss: [0.55372952 0.38240065 0.14696803 0.54680683 0.17156462 0.17519124 0.13035284 0.70305301 0.20451274 0.36796369] epoch: 5 , loss: [0.34151146 0.16762261 0.26628403 0.32332559 0.20978761 0.18224469 0.3076453 0.46574045 0.17083644 0.18504516] epoch: 6 , loss: [0.21586048 0.32785749 0.19406651 0.20420862 0.21118384 0.21637319 0.20095451 0.29251818 0.2560406 0.24154615] epoch: 7 , loss: [0.23242883 0.22555531 0.25360815 0.25162868 0.23244112 0.21426809 0.244[...]Test## initialize convolution output Input = X_test.copy() conv_output_height = input_height - conv_height + 1 conv_output_width = input_width - conv_width + 1 conv_output = np.zeros((conv_depth, conv_output_height, conv_output_width, Input.shape[-1])) ## feed forward: convolution operation for f in range(conv_depth): for r in range(conv_output_height): for c in range(conv_output_width): current_patch = Input[:, r : r + conv_height, c : c + conv_width] current_filter = np.expand_dims(W1[f,:,:,:], axis = 3) ## to match shape for broadcasting conv_output[f, r, c] = (current_patch * current_filter + b1[f]).reshape(-1, Input.shape[-1]).sum(axis = 0) ## reshape 2X faster # conv_output[f, r, c] += (current_patch * current_filter + b1[f]).sum(axis = 0).sum(axis = 0).sum(axis = 0) ## feed forward: flatten the convolution output conv_output_flatten = conv_output.reshape(-1, Input.shape[-1]) A1 = 1 / (1 + np.exp(-conv_output_flatten)) ## sigmoid ## feed forward: affine operation Z2 = W2 @ A1 + b2 A2 = 1/(1 + np.exp(-Z2)) ## geed forward: affine + softmax operation Z3 = W3 @ A2 + b3 Z3 = Z3 - np.max(Z3, axis = 0) A3 = np.exp(Z3)/np.exp(Z3).sum(axis = 0) preds = np.argmax(A3, axis = 0) truth = np.argmax(Y_test, axis = 0)Results Reportprint(accuracy_score(truth, preds)) print(confusion_matrix(truth, preds)) print(classification_report(truth, preds)) ## something wrong inside, to be correctedPhase 1 Progress Preprocess of datasetThe dataset of this assignments is from the file 'data/fake_or_real_news.csv', which contains thousands of news within. The very first opeartion I did is to trans the title_vectors from string into float list, because the although the string is made up of figures, it can not be processed before transfer into an parameter that can be use. And after that, I use 1 and 0 instead of Fake and Real in lebal column. And then I call the describe function... not for much use, I think.import pandas as pd df = pd.read_csv('data/fake_or_real_news.csv') df.head() list_str = lambda i: i.strip('[').strip(']').split() list_float = lambda i: [float(j) for j in i] str_to_list_float = lambda i: list_float(list_str(i)) df.title_vectors = df.title_vectors.apply(str_to_list_float) df.head() label_trans = lambda i: 0 if i == 'FAKE' else 1 df.label = df.label.apply(label_trans) df.head() import numpy as np df.describe(include=[np.number]) x = np.array(df.title_vectors.apply(lambda x: pd.Series(x))) y = np.array(df['label'])Split the datasetJust like the operation we did in lecture 1, I also use the 0.33 as the ratio of training set and testing set. I choose 2 as the random seed of spliting operation.from sklearn.model_selection import train_test_split seed = 2 test_size = 0.33 x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=seed) print (x_train.shape, x_test.shape, y_train.shape, y_test.shape)(4244, 300) (2091, 300) (4244,) (2091,)Logistic regressionThe first method I use is logistic regression. It is pretty fast than the other method and give us an 0.79 accuracy by calculating that modal out. Consider the time spend it turns out the logistic regression model is pretty acceptable.import time from sklearn.linear_model import LogisticRegression LR_model = LogisticRegression() start1 = time.time() LR_model = LR_model.fit(x_train, y_train) end1 = time.time() LR_model.score(x_test, y_test) LR_time = end1-start1 LR_time from sklearn.metrics import classification_report target_names = ['FAKE', 'REAL'] y_pred = LR_model.predict(x_test) print(classification_report(y_test, y_pred, target_names=target_names)) from sklearn.metrics import * LR_result = [accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred), f1_score(y_test, y_pred), LR_time] LR_resultRandom forestThere are so many parameter that we can change in the method of random forest, and I find that the time of model spend is highly correlate to the number of estimator we used, and that make perfect sense to the concept of random forest. The accuracy is a little bit lower than the accuracy of logistic regression and the time is also over 1 sec.from sklearn.ensemble import RandomForestClassifier RF_model = RandomForestClassifier(n_estimators = 20, max_features=20, random_state=seed) start2 = time.time() RF_model = RF_model.fit(x_train, y_train) end2=time.time() RF_model.score(x_test, y_test) RF_time = end2 - start2 RF_time y_pred = RF_model.predict(x_test) print(classification_report(y_test, y_pred, target_names=target_names)) RF_result = [accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred), f1_score(y_test, y_pred), RF_time] RF_resultXGboostLast but not least, I use the XGboost model to evaluate the perform of these data. XGboost require a lots of approach attempts, and that use a lot of time so the time spend of XGboost is much higher than the other two models, however, in the other site, that make sure the accuracy of result.from xgboost import XGBClassifier start3 = time.time() XG_model = XGBClassifier(max_depth=7, learning_rate=0.2, n_estimators=20, silent=True, objective='binary:logistic', nthread=-1, gamma=0, min_child_weight=1, max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, base_score=0.5, seed=0, missing=None) XG_model = XG_model.fit(x_train, y_train) end3 = time.time() XG_model.score(x_test, y_test) XG_time = end3 - start3 XG_time y_pred = XG_model.predict(x_test) print(classification_report(y_test, y_pred, target_names=target_names)) XG_result = [accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred), f1_score(y_test, y_pred), XG_time] XG_resultPlottingI use three list to store the data of the calculation result, and use pyplot to present three different models. Surprisingly it didn't distribute that much comparing in a large scale, maybe that is because the dataset is not choosing wisely or the data is really connect to each other that does not have much difference. Also, I plot a seperate graph to present time spent in each model.Y1 = np.array(LR_result[:4]) Y2 = np.array(RF_result[:4]) Y3 = np.array(XG_result[:4]) from matplotlib import pyplot as plt plt.figure(figsize=(11,6)) n = 4 X = np.arange(n)+1 plt.xticks([1.3,2.3,3.3,4.3],[r'$Accuracy$', r'$Precision$', r'$Recall$',r'$F1-score$']) plt.bar(X,Y1,width = 0.3,facecolor = 'lightskyblue',edgecolor = 'white',label='LR') plt.bar(X+0.3,Y2,width = 0.3,facecolor = 'yellowgreen',edgecolor = 'white',label='RF') plt.bar(X+0.6, Y3, width = 0.3,facecolor = 'coral',edgecolor = 'white',label='XG') for x,y in zip(X,Y1): plt.text(x, y, '%.2f' % y, ha='center', va= 'bottom') for x,y in zip(X,Y2): plt.text(x+0.3, y, '%.2f' % y, ha='center', va= 'bottom') for x,y in zip(X,Y3): plt.text(x+0.6, y, '%.2f' % y, ha='center', va= 'bottom') plt.ylabel('Percentage') plt.ylim(0,+1) plt.legend() plt.show() data = [LR_result[4], RF_result[4],XG_result[4]] plt.barh(range(len(data)), data) plt.yticks([0,1,2],[r'$LR$', r'$RF$', r'$XG$']) plt.xlabel('Time') plt.ylabel('Models') plt.show()**Create and validate 31-day running mean climatology for ERA-5 v1.1 tas, tmax, tmin.**! pip install git+https://github.com/ClimateImpactLab/xclim.git@qdm_add_year_selection %matplotlib inline import xarray as xr import numpy as np import os as os import matplotlib.pyplot as plt from cartopy import config import cartopy.crs as ccrs import matplotlib.lines as mlines import matplotlib.patches as mpatches import pandas as pd import itertools import datetime import toolz from datetime import date from pathlib import Path, PurePath from xclim.core.calendar import convert_calendar import sys import dask.distributed as dd import dask import rhg_compute_tools.kubernetes as rhgk client, cluster = rhgk.get_big_cluster() cluster.scale(10) client cluster.close() yrs = np.arange(1994,2016) def pull_ERA5_variable(variable): filenames = [] for num_yrs in range(len(yrs)): filename = '/gcs/impactlab-data/climate/source_data/ERA-5/{}/daily/netcdf/v1.1/{}_daily_{}-{}.nc'.format(variable, variable, yrs[num_yrs], yrs[num_yrs]) filenames.append(filename) era5_var = xr.open_mfdataset(filenames, concat_dim='time', combine='by_coords') var_all = era5_var[variable] return var_all da = pull_ERA5_variable('tas') da = da.persist() # remove leap days and convert calendar to no-leap da = convert_calendar(da, 'noleap') def assign_coords(da): years = np.arange(da.time.dt.year.min(),da.time.dt.year.max()+1) da_wcoords = da.assign_coords(dayofyear=xr.DataArray(np.array([np.arange(1,366)]*len(years)).flatten(), dims=('time'), coords={'time':da.time})).persist() return da_wcoords da = assign_coords(da) da_dayofyear_mean = da.sel(time=slice('1994-12-17','2015-01-15')).groupby('dayofyear').mean().persist() da_dayofyear_mean.sel(latitude=35.0,longitude=180.0).plot() da_dayofyear_mean.nbytes / (1024**3) da_dayofyear_mean = da_dayofyear_mean.load() def rolling_31day_mean(da): roll = da.pad(dayofyear=15, mode='wrap').rolling(dayofyear=31, center=True).mean().dropna('dayofyear') return roll da_rolling_mean = rolling_31day_mean(da_dayofyear_mean) da_rolling_mean da_rolling_mean.sel(latitude=35.0,longitude=180.0).plot()Before creating the netcdf file, run a quick validation of the climatology.def test_for_nans(da): # no nans assert da.isnull().sum() == 0, "there are nans!" def test_temp_range(da): # make sure temp values are in a valid range # asserts if statement below is false assert (da.min() > 150) or (ds[var].max() < 350), "temperature values are invalid" test_for_nans(da_rolling_mean) test_temp_range(da_rolling_mean) da_rolling_mean.mean(dim='dayofyear').plot() def create_netcdf(da, variable): ''' create netcdf file. ''' out_direc = ('/gcs/impactlab-data/climate/source_data/ERA-5/{}/climatology/'.format(variable)) out_filename = ('{}_1995_2015_climo.nc'.format(variable)) if not Path(out_direc).exists(): Path(out_direc).mkdir(parents=True, exist_ok=True) # add attributes to dataset and save file attrsdt = { 'author': '', 'contact': '', 'project': ('downscaling CMIP6'), 'source': ('/gcs/climate/source_data/ERA-5/day/tmin/v1.1'), 'method': ('31-rolling mean climatology, no leap days'), 'created': str(date.today()), 'units': 'K'} da.attrs.update(attrsdt) # save file da.to_netcdf(Path(PurePath(out_direc).joinpath(out_filename))) create_netcdf(da_rolling_mean, 'tas')**Validation back for the `pad` function**validate_pad = da_dayofyear_mean.sel(latitude=35.0,longitude=180.0).pad(dayofyear=15, mode='wrap') validate_pad[:15] validate_pad[365:380] validate_pad[15:30] validate_pad[380:]the `pad` function should wrap the last values to the beginning and the beginning values to the end. Given the original dataset of 1-365 we then add 15 days from the end to the beginning, which match the 351-365 days. Additionally, the 1-15 days match the days at the end of the new 'padded' `DataArray`. `Pad` function is validated. **Validation for the `rolling` function**validate_roll = da_dayofyear_mean.sel(latitude=35.0,longitude=180.0).rolling(dayofyear=31, center=True).mean().dropna('dayofyear') validate_roll validate_roll.sel(dayofyear=20) validate_roll_v2 = da_dayofyear_mean.sel(dayofyear=slice(5,35),latitude=35.0,longitude=180.0) validate_roll_v2 validate_roll_v2.mean()第6章: 英語テキストの処理英語のテキスト(nlp.txt)に対して,以下の処理を実行せよ.text_list = [] with open('./data/chapter06/nlp.txt', 'r') as f: text_list = f.read().splitlines() text_list[:10]50. 文区切り(. or ; or : or ? or !) → 空白文字 → 英大文字というパターンを文の区切りと見なし,入力された文書を1行1文の形式で出力せよ.import re sentences = [] for lst in text_list: sentence = re.split('[.:;?!]\s([A-Z])', lst) sentences = sentences + sentence for i,j in enumerate(sentences): if re.match(r"^[A-Z]$", j): sentences.insert(i+2, "".join([sentences[i], sentences[i+1]])) del sentences[i] del sentences[i] sentences[:10]51. 単語の切り出し空白を単語の区切りとみなし,50の出力を入力として受け取り,1行1単語の形式で出力せよ.ただし,文の終端では空行を出力せよ.for i in sentences[:5]: print("\n".join(i.split(" ")))Natural language processing From Wikipedia, the free encyclopedia Natural language processing (NLP) is a field of computer science, artificial intelligence, and linguistics concerned with the interactions between computers and human (natural) languages As such, NLP is related to the area of humani-computer interaction52. ステミング51の出力を入力として受け取り,Porterのステミングアルゴリズムを適用し,単語と語幹をタブ区切り形式で出力せよ. Pythonでは,Porterのステミングアルゴリズムの実装としてstemmingモジュールを利用するとよい.from nltk import stem # nltkにはPorterStemmerもついててお買い得!!! stemmer = stem.PorterStemmer() for i in sentences[:5]: for j in i.split(" "): print("%s\t%s" % (j, stemmer.stem(j)))Natural natur language languag processing process From from Wikipedia, wikipedia, the the free free encyclopedia encyclopedia Natural natur language languag processing process (NLP) (nlp) is is a a field field of of computer comput science, science, artificial artifici intelligence, intelligence, and and linguistics linguist concerned concern with with the the interactions interact between between computers comput and and human human (natural) (natural) languages languag As As such, such, NLP nlp is is related relat to to the the area area of of humani-computer humani-comput interaction interact53. TokenizationStanford Core NLPを用い,入力テキストの解析結果をXML形式で得よ.また,このXMLファイルを読み込み,入力テキストを1行1単語の形式で出力せよ.# corenlpまで実装されてて神 from nltk.parse import corenlp # corenlp本体はサーバーとして動かしてる # DockerですよDocker!!! parser = corenlp.CoreNLPDependencyParser(url='http://corenlp:9000') for sentence in parser.parse_text("\n".join(text_list[:4])): # 以下のテンプレートでprintされる # {i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n print(sentence.to_conll(10))1 Natural natural JJ JJ _ 2 amod _ _ 2 language language NN NN _ 3 compound _ _ 3 processing processing NN NN _ 18 nsubj _ _ 4 From from IN IN _ 5 case _ _ 5 Wikipedia Wikipedia NNP NNP _ 3 nmod _ _ 6 , , , , _ 3 punct _ _ 7 the the DT DT _ 14 det _ _ 8 free free JJ JJ _ 9 amod _ _ 9 encyclopedia encyclopedia NN NN _ 12 compound _ _ 10 Natural natural JJ JJ _ 11 amod _ _ 11 language language NN NN _ 12 compound _ _ 12 processing processing NN NN _ 14 dep _ _ 13 ( ( -LRB- -LRB- _ 14 punct _ _ 14 NLP nlp NN NN _ 3 appos _ _ 15 ) ) -RRB- -RRB- _ 18 punct _ _ 16 is be VBZ VBZ _ 18 cop _ _ 17 a a DT DT _ 18 det _ _ 18 field field NN NN _ 0 ROOT _ _ 19 of of IN IN _ 21 case _ _ 20 computer computer NN NN _ 21 compound _ _ 21 science science NN NN _ 18 nmod _ _ 22 , , , , _ 21 punct _ _ 23 artificial artificial JJ JJ _ 24 amod _ _ 24 intelligence intelligence NN NN _ 21 conj _ _ 25 , , , , _ 21 punct _ _ 26 and and CC CC _ 27 cc _ _ 27 linguistics linguistics NNS NNS _ 21 conj _ _ 28 concerne[...]54. 品詞タグ付けStanford Core NLPの解析結果XMLを読み込み,単語,レンマ,品詞をタブ区切り形式で出力せよ.for word in parser.parse_text("\n".join(text_list[:4])): dep_graphs = word.to_conll(10).splitlines() for i in dep_graphs: graph = i.split("\t") # {i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n print("\t".join([graph[1], graph[2], graph[4]]))Natural natural JJ language language NN processing processing NN From from IN Wikipedia Wikipedia NNP , , , the the DT free free JJ encyclopedia encyclopedia NN Natural natural JJ language language NN processing processing NN ( ( -LRB- NLP nlp NN ) ) -RRB- is be VBZ a a DT field field NN of of IN computer computer NN science science NN , , , artificial artificial JJ intelligence intelligence NN , , , and and CC linguistics linguistics NNS concerned concern VBN with with IN the the DT interactions interaction NNS between between IN computers computer NNS and and CC human human JJ ( ( -LRB- natural natural JJ ) ) -RRB- languages language NNS . . . As as IN such such JJ , , , NLP nlp NN is be VBZ related relate VBN to to IN the the DT area area NN of of IN humani humani JJ - - HYPH computer computer NN interaction interaction NN . . . Many many JJ challenges challenge NNS in in IN NLP nlp NN involve involve VBP natural natural JJ language language NN understanding understanding NN , , , t[...]55. 固有表現抽出入力文中の人名をすべて抜き出せ.from nltk.parse import CoreNLPParser tagger = CoreNLPParser(url='http://corenlp:9000', tagtype='ner') tagger.tag("Wikipedia is the free encyclopedia.".split(" ")) [ word[0] for word in tagger.tag(sentences) if word[1] == 'PERSON']56. 共参照解析Stanford Core NLPの共参照解析の結果に基づき,文中の参照表現(mention)を代表参照表現(representative mention)に置換せよ.ただし,置換するときは,「代表参照表現(参照表現)」のように,元の参照表現が分かるように配慮せよ. 57. 係り受け解析Stanford Core NLPの係り受け解析の結果(collapsed-dependencies)を有向グラフとして可視化せよ.可視化には,係り受け木をDOT言語に変換し,Graphvizを用いるとよい.また,Pythonから有向グラフを直接的に可視化するには,pydotを使うとよい.import pydot from IPython.display import SVG, display for sentence in parser.parse_text("\n".join(text_list[:4])): display(SVG(pydot.graph_from_dot_data(sentence.to_dot())[0].create(format='svg')))58. タプルの抽出Stanford Core NLPの係り受け解析の結果(collapsed-dependencies)に基づき,「主語 述語 目的語」の組をタブ区切り形式で出力せよ.ただし,主語,述語,目的語の定義は以下を参考にせよ.- 述語: nsubj関係とdobj関係の子(dependant)を持つ単語- 主語: 述語からnsubj関係にある子(dependent)- 目的語: 述語からdobj関係にある子(dependent)result = parser.parse_text("Wikipedia is the free encyclopedia.") # triplesを使うと依存関係の語がtupleで抽出される list(next(result).triples()) for sentence in parser.parse_text("\n".join(text_list)): # 主語と述語のtuple -> (主語, 述語) dep = ('', '') for i in list(sentence.triples()): if i[1] == 'nsubj': dep = (i[2][0], i[0][0]) continue if i[0][0] == dep[1] and i[1] == 'obj': print("%s\t%s\t%s" % (dep[0], dep[1], i[2][0])) word = ('', '')challenges involve understanding Turing published article ELIZA provided interaction patient exceeded base ELIZA provide response underpinnings discouraged sort that underlies approach Some produced systems which make decisions that contains errors implementations involved coding Some produced systems which make decisions models have advantage they express certainty Systems have advantages procedures make use that make decisions language understand moreFind [gameid] of games by a specific league in FIBA LiveStats**baseurl**: https://www.fibalivestats.com/u/[league]/[game_id]/sc.html[league] is the name of the league (e.g. UAAP)[gameid] is the gameid for the specific game (e.g. 936275)For example, the shotchart of the game between NU and UST during UAAP Season 81 (gameid:936275) is stored in FIBA LiveStats at: https://www.fibalivestats.com/u/UAAP/936275/sc.htmlimport requests league = 'UAAP' # the league you want to scrape data from clue = '(Seniors Division)' # a piece of text that appears on the webpage if the game/webpage exists, should be changed accordingly baseurl = 'https://www.fibalivestats.com/u/{}'.format(league) start_id = 5000 end_id = 10000 for g_id in range(start_id, end_id + 1): # for g_id in range(end_id, start_id, -1): # use this if you want to search from end_id to start_id url = "{}/{}/".format(baseurl, g_id) resp = requests.get(url) if resp.status_code == 200 and resp.text.find(clue) > -1: # if the webpage and the clue exists, get the URL print(url) else: pass # print(g_id) # print("{}: {}".format(g_id, resp.status_code))Once you have a list of URLS, you can:1. use the URLS directly in the next step2. download the whole HTML file3. download the parts of the HTML with **div id="shotchart_data"** (this is contains the shot chart information) - you can add another div with **id=gameInfo** to and add a class inside it with the following information: class="date team opponent venue" - see [sample shotchart data](/sample-shotchart-html/936275.txt) Extract information from LiveStats shot chart HTMLfrom bs4 import BeautifulSoup import pprint import csv import pandas as pdExtract the data for one game/fileshotchartfile = gameid white = '' black = '' fg_list = [] with open(shotchartfile) as f: soup = BeautifulSoup(f, 'html.parser') game_info = soup.find_all(id='gameInfo') date, white, black, venue = game_info[0].attrs['class'] shots = soup.find_all('span') for shot in shots: classes = shot.attrs['class'] loc = shot.attrs['style'] sh_info = shot.attrs['title'] team0, made0 = classes[1].split('_') if team0 == 'white': team0 = white opp0 = black else: team0 = black opp0 = white if made0 == 'made': made0 = 1 else: made0 = 0 bottom0, left0 = [float(p.split(': ')[1].strip('%')) for p in loc.split(';')[:2]] if left0 > 50: bottom0 = 100 - bottom0 left0 = 100 - left0 # in meters bottom0 = (100 - bottom0) * 0.15 left0 = left0 * 0.28 # if basket is origin bottom0 = bottom0 - 7.5 left0 = left0 - 1.43 # if 10cm cell bottom0 = int(bottom0/0.1) left0 = int(left0/0.1) num0, player0, pt_type0 = sh_info.split(', ') pt_, sh_type0 = pt_type0.split('pt ') pt0 = int(pt_) info = { 'team': team0, 'opponent': opp0, 'made': made0, 'x': bottom0, 'y': left0, 'num': num0, 'player': player0, 'points': pt0, 'shot_type': sh_type0, 'date': date, 'venue': venue } fg_list.append(info)Extract the data from a list of shotchart filesgames = [] # add location of files here fg_list = [] for game in games: with open('{}'.format(game)) as f: soup = BeautifulSoup(f, 'html.parser') game_info = soup.find_all(id='gameInfo') date, white, black, venue = game_info[0].attrs['class'] shots = soup.find_all('span') for shot in shots: classes = shot.attrs['class'] loc = shot.attrs['style'] sh_info = shot.attrs['title'] team0, made0 = classes[1].split('_') if team0 == 'white': team0 = white opp0 = black else: team0 = black opp0 = white if made0 == 'made': made0 = 1 else: made0 = 0 bottom0, left0 = [float(p.split(': ')[1].strip('%')) for p in loc.split(';')[:2]] if left0 > 50: bottom0 = 100 - bottom0 left0 = 100 - left0 # in meters bottom0 = (100 - bottom0) * 0.15 left0 = left0 * 0.28 # if basket is origin bottom0 = bottom0 - 7.5 left0 = left0 - 1.43 # if 10cm cell bottom0 = int(bottom0/0.1) left0 = int(left0/0.1) num0, player0, pt_type0 = sh_info.split(', ') pt_, sh_type0 = pt_type0.split('pt ') pt0 = int(pt_) info = { 'team': team0, 'opponent': opp0, 'made': made0, 'x': bottom0, 'y': left0, 'num': num0, 'player': player0, 'points': pt0, 'shot_type': sh_type0, 'date': date, 'venue': venue } fg_list.append(info)Save to CSVwith open('fg.csv', 'w', encoding='utf8', newline='') as output_file: fc = csv.DictWriter(output_file, fieldnames=fg_list[0].keys(), ) fc.writeheader() fc.writerows(fg_list)Lab 06: Recurrent Neural Network (RNN)Trong bài thực hành này:- Cài đặt 1 mạng RNN cơ bản LSTM- Sử dụng Word Embedding GLOVE của Stanford- Chạy trên data spam detectionReference:- Glove: https://github.com/stanfordnlp/GloVe- LSTM: Long Short-Term Memory layer - Hochreiter 1997.Đọc thêm:- LSTM: https://colah.github.io/posts/2015-08-Understanding-LSTMs/ Tiền xử lý dữ liệuChúng ta cần tách câu thành từng từ trước.import pandas as pd import numpy as np import nltk df = pd.read_csv("spam_detection.csv") df.head() texts = df["Text"].to_list() texts = [text.lower() for text in texts] # chuyển các đoạn text thành chữ thường (word embedding chỉ cho chữ thường) tokenized_texts = [nltk.tokenize.word_tokenize(text) for text in texts] # tách câu thành một list các từ print(tokenized_texts[0])['go', 'until', 'jurong', 'point', ',', 'crazy', '..', 'available', 'only', 'in', 'bugis', 'n', 'great', 'world', 'la', 'e', 'buffet', '...', 'cine', 'there', 'got', 'amore', 'wat', '...']Load embedding từ filePretrained Embeddings từ Glove-Stanford đã được rút gọn cho bài tập này và lưu thành file glove_embedding.txt.## không cần hiểu đống này lắm đâu import io import numpy as np def load_word_embeddings(fname): fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore') vocab, matrix = [], [] i=0 for line in fin: tokens = line.rstrip().split(' ') vocab.append(tokens[0]) matrix.append(list(map(float, tokens[1:]))) return vocab, np.asarray(matrix) vocab, matrix = load_word_embeddings("glove_embedding.txt")Sau khi đọc xong thì- vocab: một danh sách các từ vực có trong embedding- matrix: một ma trận, mỗi dòng là một embedding cho từ tương ứng trong vocab (xếp đúng thứ tự) Số hóa dataĐể số hóa 1 từ (word) trong ngôn ngữ tự nhiên, người ta sẽ biểu diễn từ đó thành một vector (gọi là embedding). 2 bước trước ta đã tách các câu trong data thành từ riêng biệt, và load một bộ embedding có sẵn. Bây giờ ta chuyển từng từ trong data thành một mã số biểu thị vị trí của từ đó trong ma trận embedding.Tuy nhiên, ta cần có vài mã số đặc biệt để giải quyết các vấn đề như: - từ không có trong embedding- Độ dài các câu không giống nhau. Cơ bản, các thư viện deep learning tính toán nhanh dựa trên các kĩ thuật tính toán ma trận (tensor), nên để tính các câu có độ dài ngắn khác nhau, các câu ngắn cần được nối thêm bởi các mã đặc biệt để có cùng kích thước.## Gán các mã __PADDED_INDEX__ = 0 # mã dùng cho các vị trí chỉ có tính nối dài cho cùng kích thước __UNKNOWN_WORD__ = 1 # mã cho những từ không có trong embedding # Tạo một dictionary, có nhiệm vụ là một ánh xạ từ ảnh sang mã số, mã số được bắt đầu từ 2 vì số 0 và 1 được dành cho trường hợp đặc biệt word_to_index = {word: index+2 for index, word in enumerate(vocab)} # Do do mã số được bắt đầu từ 2, nên cần thêm 2 vector vào đàu ma trận embedding_matrix = np.pad(matrix, [[2,0],[0,0]], mode='constant', constant_values =0.0) print(embedding_matrix) # Khi đó, __PADDED_INDEX__ dùng dòng đầu tiên của embedding_matrix # __UNKNOWN_WORD__ dùng dòng thứ hai của embedding_matrix ## Bây giờ ta sẽ chuyển data spam dection thành các mã số import tensorflow as tf X = [] for text in tokenized_texts: cur_text_indices = [] for word in text: if word in word_to_index: cur_text_indices.append(word_to_index[word]) ## map từ word sang index else: cur_text_indices.append(__UNKNOWN_WORD__) ## gán unknown cho từ không có trong bộ glove X.append(cur_text_indices) ## pad data cho có cùng chiều dài X = tf.keras.preprocessing.sequence.pad_sequences(sequences=X, # sequences: list các câu có độ dài không bằng nhau padding='post') # vị trí pad là 'pre' (trước) hoặc 'post' (sau) y = df['y'].values ## Label của bài toán, 0 là không phải spam, 1 là spam ## Chia data from sklearn.model_selection import train_test_split X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size= 0.2, random_state =0)RNN trong tensorflowfrom tensorflow.keras.layers import Input, Embedding, LSTM, Bidirectional, Dense from tensorflow.keras.models import Model inputs = Input(shape=(None,)) ## None biểu thị kích thước không xác định của câu embed = Embedding(input_dim=embedding_matrix.shape[0], ## Khai báo kích thước của vocab output_dim=embedding_matrix.shape[1], ## Khai báo kích thước của embedding embeddings_initializer = tf.keras.initializers.Constant(value=embedding_matrix), ## Khởi tạo cho embedding bằng ma trận có sẵn trainable=False, ## Không cần thiết train embedding mask_zero=True)(inputs) ## zero_mask: những vị trí có giá trị 0 không được tính toán, vì đó là giá trị thêm vào cho đủ độ dài mà thôi ## (__PADDED_INDEX__ gán bằng 0) lstm = LSTM(units=100, ## units: kích thước của hidden_state trong LSTM return_sequences=False)(embed) ## return_sequences: LSTM trả về toàn bộ hay là trả về hidden_state cuối cùng dense = Dense(units=2, activation='softmax')(lstm) model = Model(inputs=inputs, outputs=dense) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() # Checkpoint Callback mc = tf.keras.callbacks.ModelCheckpoint(filepath="lstm_spam.h5", monitor='val_loss', mode='min', verbose=0, save_best_only=True) # Train model.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=10, callbacks=[mc]) model.load_weights("lstm_spam.h5") _, val_acc = model.evaluate(X_valid, y_valid) print("Accuracy on valid: ", val_acc)Train on 4459 samples, validate on 1115 samples Epoch 1/10 32/4459 [..............................] - ETA: 10:54WARNING:tensorflow:Can save best model only with val_loss available, skipping.Functional Programming Functions Are Just Objects What's the difference between an integer and a function?a = 42 def add_one(x): return x + 1A function has a type, just like an integer:type(a), type(add_one)A function has an identity, just like an integer:id(a), id(add_one)`add_one` is just a _name_ of a function object, just as `a` is the _name_ of the integer objectadd_one, add_one(a)A function has attributes, like other objects:dir(add_one)Note the `__call__()` magic method:add_one.__call__(42) from dataclasses import dataclassWe can define classes having a `__call__()` method@dataclass class AddN: n: int def __call__(self, x): return x + self.n... and call objects of these classes as if they were functions:add_two = AddN(2) add_two(42)So a function is nothing else than an object with a `__call__()` method implemented. Every mention of _callable_ in documentation or type hints means that the "callable" object has a `__call__()` implementation.callable(add_one), callable(add_two)__Discussion__: What would be a good use case for having an (obviously more complex) class with a `__call__()` method instead of a simple function definition? Functions as Arguments Because functions are objects, we can pass them as arguments to other functions:from typing import Callable, Iterable, List, Any def mapper(fn: Callable, l: Iterable[Any]) -> List[Any]: return [fn(x) for x in l] mapper(add_one, [1, 10, 100]) def is_even(n: int) -> bool: return n % 2 == 0 def filters(filter_fn: Callable, l: Iterable[Any]) -> List[Any]: return [x for x in l if filter_fn(x)] filters(is_even, [1, 2, 3, 4])Our `mapper()` and `filters()` functions were just there for illustration purposes: Python has the built-in functions [`map()`](https://docs.python.org/3/library/functions.htmlmap) and [`filter()`](https://docs.python.org/3/library/functions.htmlfilter).import pandas as pd transaction_df = pd.DataFrame({ 'amount': [42., 100., 999.], 'from': ['bob', 'alice', 'bob'], 'to': ['alice', 'bob', 'alice'] }) transaction_dfLet's define a function that indicates if the amount of a transaction is larger than 100:def select_large_transactions(transaction_df): return transaction_df['amount'] > 100 # Where does transaction_df refer to?Since `.loc[]` accepts a _callable_ as input, we can pass our function to it:transaction_df.loc[select_large_transactions]Note that we didn't _call_ the function ourselves. It was `.loc[]` that called our function, and it was `loc[]` that passed whatever DataFrame it was bound to at that moment as the first argument to our function. We can similarly pass functions to `assign()`:def get_commission(transaction_df): return transaction_df['amount'] * 0.05 transaction_df.assign(commission=get_commission)We can also _freeze_ some arguments of a function before passing it using [`functools.partial()`](https://docs.python.org/3/library/functools.htmlfunctools.partial) from the standard library:from functools import partial def add_n(x, n=1): return x + n add_three = partial(add_n, n=3) add_three(10) def get_commission(transaction_df, commission_percent=5): return transaction_df['amount'] * (commission_percent / 100) transaction_df.assign(commission=partial(get_commission, commission_percent=10))Functions Returning Functions If functions can accept other functions as arguments, they surely can also _return_ functions:def add_n(n): def adder(x): return x + n return adder add_two = add_n(2) add_two, type(add_two) add_two(42) mapper(add_two, [1, 42, 100]) mapper(add_two, [1, None, 100])There's a problem when we pass `None` (or other unexpected types) to the function that we apply on our iterable. We can fix it by modifying the adder function:def add_one(x): if x is not None: return x + 1 else: return None mapper(add_one, [1, None, 100])Why is this sub-optimal? What if there's an `add_two()` etc? Do we need to repeat all the checking for None everywhere?def skip_None(fn): def fn_wrapper(n): if n is not None: return fn(n) else: return None return fn_wrapperA solution is to define a _wrapper_ function `fn_wrapper()` that intercepts calls to the wrapped function `fn`, and only calls the wrapped function (and returns its result) if the arguments passed to the wrapped function are valid. Note that this wrapper function is defined inside the body of another function `skip_None()`, which accepts the actual function to be wrapped.def add_one(x): return x + 1 def add_two(x): return x + 2 add_one_wrapped = skip_None(add_one) add_two_wrapped = skip_None(add_two) mapper(add_one_wrapped, [1, None, 100]), mapper(add_two_wrapped, [1, None, 100])Instead of manually calling `skip_None()` to wrap our adder functions as above, Python has the `@` _decorator_ construct, which is just a syntactic shortcut to help us write more concise code. The following snippets are all equivalent:def add_one(x): return x + 1 skip_None(add_one)(1), skip_None(add_one)(None) def add_one(x): return x + 1 add_one = skip_None(add_one) add_one(1), add_one(None) @skip_None def add_one(x): return x + 1 add_one(1), add_one(None)We can also write decorators that accept arguments. These add a second layer of inner function nesting:def check_value(max_expected): def value_check_decorator(fn): def fn_wrapper(n): if n > max_expected: print(f'Unusual value {n}, expected a maximum of {max_expected}') return fn(n) return fn_wrapper return value_check_decorator @check_value(max_expected=42) def add_one(x): return x + 1 mapper(add_one, [1, 42, 100])Seeing decorators like these for the first time may be confusing. The following snippets clarify the nesting of functions:def add_one(x): return x + 1 max_41_checker = check_value(max_expected=41) add_one_with_max_41_checking = max_41_checker(add_one) add_one_with_max_41_checking(40), add_one_with_max_41_checking(42) ( check_value(max_expected=41)(add_one)(40), check_value(max_expected=41)(add_one)(42) )Anonymous Functions When we intend to use a single-purpose function that consists of returning the evaluation of one expression, we can use _lambda expressions_ instead of defining a (named) function:mapper(lambda x: x + 10, [1, 42, 100])Even a lambda expression ... is just an object (of type `function`)!type(lambda x: x + 10) id(lambda x: x + 10) dir(lambda x: x + 10) (lambda x: x + 10)(42)We can even assign a lambda expression to a variable, but this is discouraged in the "official" [Python Style Guide](https://www.python.org/dev/peps/pep-0008/).# Don't do this IRL add_ten = lambda x: x + 10 add_ten(42)Lambda expressions can have more than one argument:from functools import reduce reduce(lambda x, y: x + y, [1, 2, 3])Exercises Let's look back at our Vector class from the previous module.from dataclasses import dataclass @dataclass class Vector: values: List[float] def __getitem__(self, index: int): return self.values[index] def __len__(self): return len(self.values) def __mul__(self, scalar: float): return Vector([v * scalar for v in self.values]) def __add__(self, other: 'Vector'): return Vector([self[i] + other[i] for i in range(len(self))])__Exercise__: Add a method `pipe()` to our `Vector` class, which accepts as argument a function that transforms its input vector argument to another vector. As an example of such transformation functions, consider `rotate_right()` and `rotate_left()` given below. Test your implementation with the assertions below.def rotate_right(v: Vector) -> Vector: # rotates input by 90 degrees clockwise return Vector([v[1], -v[0]]) def rotate_left(v: Vector) -> Vector: # rotates input by 90 degrees counterclockwise return Vector([-v[1], v[0]]) # Your solution: # %load solutions/vector_pipe.py assert Vector([1, 1]).pipe(rotate_right) == Vector([1, -1]) assert Vector([1, 1]).pipe(rotate_left).pipe(rotate_left) == Vector([-1, -1])__Bonus Exercise__: Make sure that our `pipe()` method can accept additional arguments: create a generic `rotate()` function that accepts an optional `direction` keyword argument with possible values `'right'`, `'clockwise'`, `'left'`, `'counterclockwise'`. If an invalid or no `direction` keyword argument is provided, `rotate()` should return the original vector. See the assertions below for the expected behavior of the solution.# Your solution: # %load solutions/vector_pipe_vararg.py # bonus, have one generic rotation function assert Vector([1, 1]).pipe(rotate) == Vector([1, 1]) assert Vector([1, 1]).pipe(rotate, direction='right').pipe(rotate, direction='counterclockwise') == Vector([1, 1]) assert Vector([1, 1]).pipe(rotate, direction='clockwise').pipe(rotate, direction='left') == Vector([1, 1]) # bonus, deal with any Vector manipulation function assert Vector([1, 1]).pipe(lambda v: Vector([v[0] * 42, v[1] * 99])) == Vector([42, 99])By now, the pandas `pipe()` method should have no secrets anymore:transaction_df def select_amount_greater_than(tx_df, amount=100): return tx_df.loc[lambda df: df['amount'] > amount] transaction_df.pipe(select_amount_greater_than) transaction_df.pipe(select_amount_greater_than, amount=99) transaction_df.pipe(lambda df: df.loc[df['to'].isin(['bob', 'carol'])])05_Logistic_Regression_ModelsIn this notebook, we will see how to define simple logistic regression models.import torch import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt %matplotlib inline torch.manual_seed(777) # reproducibilityLogistic Regression ModelsLogistic regression models are the same as linear regression models, except that they use logistic sigmoid (or softmax) function for computing the probabilities of target classes.You can define and train logistic regression models in the same way as linear regression models. We will look at all the processes with a concrete example, MNIST.The MNIST databse is a large database of handwritten digits, with the image size of 28x28.The training set consists of 60,000 images and the test set consists of 10,000 images. DataLoaderbatch_size = 100 # MNIST dataset train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor()) # Data loader train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # plot one example print(train_dataset.train_data.size()) # (60000, 28, 28) print(train_dataset.train_labels.size()) # (60000) idx = 0 plt.title('%d' % train_dataset.train_labels[idx].item()) plt.imshow(train_dataset.train_data[idx,:,:].numpy(), cmap='gray')Define Linear Regression Models# Hyper-parameters input_size = 784 num_classes = 10 # Device configuration # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device = torch.device('cpu') # Logistic regression model model = nn.Linear(input_size, num_classes).to(device)Loss function and OptimizerA loss function takes the (output, target) pair of inputs, and computes a value that estimates how far away the output is from the target.We use `nn.CrossEntropyLoss()` for logistic regression.# nn.CrossEntropyLoss() computes softmax internally criterion = nn.CrossEntropyLoss()Furtheremore, PyTorch supports several optimizers from `torch.optim`.We use an Adam optimizer.learning_rate = 0.001 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)TensorboardTo visualize model training, you can use [Tensorboard](https://www.tensorflow.org/tensorboard).It was originally developed for Tensorflow, but you can also use it for PyTorch via [TensorboardX](https://github.com/lanpa/tensorboardX).!pip install tensorboardX==1.2 from tensorboardX import SummaryWriter summary = SummaryWriter("runs/experiment") %load_ext tensorboardTrain the networknum_epochs = 5 # Train the model total_step = len(train_loader) step_i = 0 for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): # Reshape images to (batch_size, input_size) images = images.reshape(-1, 28*28).to(device) # Forward pass outputs = model(images) loss = criterion(outputs, labels) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() step_i += 1 if (i+1) % 100 == 0: print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' .format(epoch+1, num_epochs, i+1, total_step, loss.item())) # tensorboard logging summary.add_scalar("loss", loss.item(), step_i) summary.add_histogram("weight", model.weight.clone().detach().cpu().numpy(), step_i) summary.add_histogram("bias", model.bias.clone().detach().cpu().numpy(), step_i)Tensorboard log%tensorboard --logdir runs/experimentTest the network# Test the model # In test phase, we don't need to compute gradients (for memory efficiency) with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: images = images.reshape(-1, input_size).to(device) labels = labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))Save/Load the network parameters# Save the model checkpoint torch.save(model.state_dict(), './data/logistic_regression_model.ckpt') # Load the model checkpoint if needed # new_model = nn.Linear(input_size, num_classes).to(device) # new_model.load_state_dict(torch.load('./data/logistic_regression_model.ckpt'))Dataset Source Dataset used here for the analysis and its license agreement information can be found can be found by clicking dataset link - DATASETLINK = [dataset_link](https://www.kaggle.com/libinmathew264/youtube-top-4000-channels-based-on-subscribers)df = pd.read_csv('YouTube.csv', encoding='ISO-8859-1') df.head()Part I : The most sought-after contents by the YouTube audience# which type of channels mostly exist on youtube popular_channels = df.channeltype.value_counts().to_frame('count') display(popular_channels) most_popular_channels.plot(kind = 'bar')PART II: Curating content for segment-specific audience# which type of channels has most subscribers most_subscribed_channels = df.groupby('channeltype').sum()['subscribers'].sort_values(ascending = False).to_frame('subscribers_count') #most_subscribed_channels.reset_index(level=0, inplace=True) most_subscribed_channels #most_subscribed_channels.plot(kind = 'bar') most_subscribed_channels.plot(kind = 'bar') # which country has most subscribers for education channel type education_subscriber = df[df.channeltype == 'Education'].groupby('country').sum()['subscribers'].sort_values(ascending = False).to_frame('Educational_Subscriber') education_subscriber # NOTE INDIA is the second largest educational subscriber education_subscriber.plot(kind = 'bar', figsize=(10, 5), title="Top Educational Subscribers") # which country has most subscribers for Music channel type music_subscriber = df[df.channeltype == 'Music'].groupby('country').sum()['subscribers'].sort_values(ascending = False).to_frame('Music_Subscriber') music_subscriber # Top 10 music subscriber based countries music_subscriber[:10].plot(kind = 'bar', rot=0, figsize=(15, 4), title="Music Subscribers") # which country has most subscribers for Sports channel type sports_subscriber = df[df.channeltype == 'Sports'].groupby('country').sum()['subscribers'].sort_values(ascending = False).to_frame('Sports_Subscriber') sports_subscriber sports_subscriber.plot(kind = 'bar', figsize=(15, 4), title="Sports Subscriber") # merging all segments into a dataframe channel_subscribers = None for chl_type in df.channeltype.dropna().unique(): print(chl_type) df_ = df[df.channeltype == chl_type].groupby('country').sum()['subscribers'].to_frame(chl_type) print(df_.shape) if channel_subscribers is None: channel_subscribers = df_ else: channel_subscribers = channel_subscribers.join(df_) df_channel_subscribers = df_channel_subscribers.fillna(0) df_channel_subscribers df_channel_subscribers.shape # Correlation of channeltype subscribers count sns.heatmap(df_channel_subscribers.corr())PART III: Monetising your contenthighest_earning = df.sort_values('MonthlyEarningsMin', ascending=False) highest_earning = highest_earning[['name', 'MonthlyEarningsMin']] highest_earning = highest_earning.set_index('name') highest_earning.head(15).plot(kind = 'bar', figsize=(10, 5), title="Highest Earning") highest_earning.head(25)Cases in Malaysia> Updates on the respiratory illness that has infected more than one million people and killed tens of thousands.- toc:false- branch: master- badges: false- hide: false- comments: false- permalink:/covid-my-overview/#hide #@title Import modules import pandas as pd import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots # import ipywidgets as widgets # from jinja2 import Template #hide #@title Load datasets from source and define functions #@markdown source: [Novel Coronavirus (COVID-19) Cases, provided by JHU CSSE](https://github.com/CSSEGISandData/COVID-19) #@markdown * `cases_global` Global dataset by type #@markdown * `country_cases_df` Filter cases by country and dataset type # source data # [Novel Coronavirus (COVID-19) Cases, provided by JHU CSSE](https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series) base_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' time_series_url = f'{base_url}/csse_covid_19_time_series' covid_time_series_df = lambda type: pd.read_csv(f'{time_series_url}/time_series_covid19_{type}_global.csv') # confirmed, deaths, recovered def cases_global(type): _dff = covid_time_series_df(type) _cols = _dff.columns[~_dff.columns.isin(['Province/State', 'Country/Region', 'Lat', 'Long'])] _dff = (_dff.groupby('Country/Region')[_cols].sum().stack().reset_index(name = 'Cases') .rename(columns = {'level_1': 'Date', 'Country/Region': 'Country'})) _dff['Date'] = pd.to_datetime(_dff['Date'], format='%m/%d/%y') return _dff country_cases_df = lambda _df, country: _df[_df['Country'] == country][['Date', 'Cases']].reset_index(drop=True) #hide #@title Prepare datasets # Country filter global_cases = { 'confirmed': cases_global('confirmed'), 'deaths': cases_global('deaths'), 'recovered': cases_global('recovered') } def country_cases(country): confirmed_df = country_cases_df(global_cases['confirmed'], country) deaths_df = country_cases_df(global_cases['deaths'], country) recovered_df = country_cases_df(global_cases['recovered'], country) active_df = pd.concat([confirmed_df['Date'], confirmed_df['Cases'] - deaths_df['Cases'] - recovered_df['Cases']], axis=1) return { 'country': country, 'active': active_df, 'confirmed': confirmed_df, 'deaths': deaths_df, 'recovered': recovered_df } cases_summary = lambda c: { # c country_cases(country) 'country': c['country'], 'date_latest': c['confirmed']['Date'].max(), 's_date_latest': c['confirmed']['Date'].max().strftime("%B %d, %Y"), #.strftime('%m/%d/%Y') 'total_active': c['active'].iloc[-1]['Cases'], 'total_confirmed': c['confirmed'].iloc[-1]['Cases'], 'total_deaths': c['deaths'].iloc[-1]['Cases'], 'total_recovered': c['recovered'].iloc[-1]['Cases'], 'new_active': c['active'].diff().iloc[-1]['Cases'], 'new_confirmed': c['confirmed'].diff().iloc[-1]['Cases'], 'new_deaths': c['deaths'].diff().iloc[-1]['Cases'], 'new_recovered': c['recovered'].diff().iloc[-1]['Cases'], } #hide #@title Define plot functions { form-width: "100px" } # def show_summary(country_cases): # summary = cases_summary(country_cases) # return HTML( # f'
Summary of {summary["country"]} COVID19 cases as of {summary["s_date_latest"]} 12PM
' # f'
' # f'
Confirmed Cases

{summary["total_confirmed"]:,}

(+{summary["new_confirmed"]:,.0f})
' # f'
Deaths

{summary["total_deaths"]:,}

{summary["total_deaths"]/summary["total_confirmed"]:.2%} (+{summary["new_deaths"]:,.0f})
' # f'
Recovered

{summary["total_recovered"]:,}

{summary["total_recovered"]/summary["total_confirmed"]:.2%} (+{summary["new_recovered"]:,.0f})
' # f'
Active

{summary["total_active"]:,}

{summary["total_active"]/summary["total_confirmed"]:.2%} ({summary["new_active"]:,.0f})
' # f'
' # ) # def plot_cases(cases_df): # _plot = alt.Chart(cases_df).mark_bar().encode( # x='Date:T', # y='Cases:Q', # tooltip=list(cases_df) # ) # return _plot def fig_summary(country_cases): fig = go.Figure() summary = cases_summary(country_cases) fig.add_trace(go.Indicator( mode = "number+delta", value = summary['total_confirmed'], number = {'valueformat': ','}, align = "left", title = {"text": f'Confirmed', "align": "left", "font": {"family": "sans-serif"}}, delta = {'reference': summary['total_confirmed'] - summary['new_confirmed'], 'position': 'right', 'valueformat': ','}, domain = {'x': [0, .2], 'y': [.5, 1]})) fig.add_trace(go.Indicator( mode = "number+delta", value = summary['total_deaths'], number = {'valueformat': ','}, align = "left", title = {"text": f'Deaths {summary["total_deaths"]/summary["total_confirmed"]:.2%}', "align": "left", "font": {"family": "sans-serif"}}, delta = {'reference': summary['total_deaths'] - summary['new_deaths'], 'position': 'right', 'valueformat': ','}, domain = {'x': [0, .2], 'y': [0, .5]})) fig.add_trace(go.Indicator( mode = "number+delta", value = summary['total_recovered'], number = {'valueformat': ','}, align = "left", title = {"text": f'Recovered {summary["total_recovered"]/summary["total_confirmed"]:.2%}', "align": "left", "font": {"family": "sans-serif"}}, delta = {'reference': summary['total_recovered'] - summary['new_recovered'], 'position': 'right', 'valueformat': ','}, domain = {'x': [.4, .6], 'y': [0, .5]})) fig.add_trace(go.Indicator( mode = "number+delta", value = summary['total_active'], number = {'valueformat': ','}, align = "left", title = {"text": f'Active {summary["total_active"]/summary["total_confirmed"]:.2%}', "align": "left", "font": {"family": "sans-serif"}}, delta = {'reference': summary['total_active'] - summary['new_active'], 'position': 'right', 'valueformat': ','}, domain = {'x': [.4, .6], 'y': [.5, 1]})) fig.update_layout(title=f'Summary of {summary["country"]} COVID19 cases as of {summary["s_date_latest"]} 12PM') return fig, summaryCOVID-19: Malaysia at a Glance Malaysia Movement Control Order> The 2020 Malaysia movement control order, commonly referred to as the MCO, is a cordon sanitaire implemented as a preventive measure by the federal government of Malaysia in response to the COVID-19 pandemic in the country on 18 March 2020.#hide #@title Define functions for MCO { form-width: "100px" } #@markdown * my_cases #@markdown * my_summary #country = 'Malaysia' my_cases = country_cases('Malaysia') my_summary = cases_summary(my_cases) # def plot_mco_date(date, label, max): # _df = pd.DataFrame({'Date': [date, date], 'Cases': [0, max]}) # _base_ref = alt.Chart(_df).encode(x='Date:T', y='Cases:Q') # return (_base_ref.mark_line(color='black', opacity=.5, strokeDash=[3,3]) + # _base_ref.transform_filter(alt.datum['Cases'] > 0).mark_text(text=label, dx=-20, dy=-10, angle=270)) # def plot_mco_cases(case, label, max): # _df = pd.DataFrame({'Cummulative Cases': [case, case], 'Cases': [0.1, max]}) # _base_ref = alt.Chart(_df).encode(x='Cummulative Cases:Q', y='Cases:Q') # return (_base_ref.mark_line(color='black', opacity=.5, strokeDash=[3,3]) + # _base_ref.transform_filter(alt.datum['Cases'] > 1).mark_text(text=label, dx=-20, dy=-10, angle=270)) # _plot_mco_date = lambda n: plot_mco_date('2020-03-18', 'MCO1', n) + plot_mco_date('2020-04-01', 'MCO2', n) \ # + plot_mco_date('2020-04-15', 'MCO3', n) + plot_mco_date('2020-04-29', 'MCO4', n) \ # + plot_mco_date('2020-05-04', 'CMCO', n) # _plot_mco_case = plot_mco_cases(790, 'MCO1', 1000) + plot_mco_cases(2908, 'MCO2', 1000) + plot_mco_cases(5072, 'MCO3', 1000) + plot_mco_cases(5945, 'MCO4', 1000) + plot_mco_cases(6353, 'CMCO', 1000) # Add latest data which not exist in source _latest_case = { 'date': np.datetime64('2020-06-12'), 'confirmed': 8402, 'deaths': 119, 'recovered': 7168 } if len(my_cases['confirmed'][my_cases['confirmed']['Date'] == _latest_case['date']]) == 0: my_cases['active'] = my_cases['active'].append(pd.DataFrame([[_latest_case['date'], _latest_case['confirmed'] - _latest_case['deaths'] - _latest_case['recovered']]], columns = ['Date', 'Cases']), ignore_index=True) my_cases['confirmed'] = my_cases['confirmed'].append(pd.DataFrame([[_latest_case['date'], _latest_case['confirmed']]], columns = ['Date', 'Cases']), ignore_index=True) my_cases['deaths'] = my_cases['deaths'].append(pd.DataFrame([[_latest_case['date'], _latest_case['deaths']]], columns = ['Date', 'Cases']), ignore_index=True) my_cases['recovered'] = my_cases['recovered'].append(pd.DataFrame([[_latest_case['date'], _latest_case['recovered']]], columns = ['Date', 'Cases']), ignore_index=True) #hide_input #@title Summary fig,_summary = fig_summary(my_cases) fig.show() labels = ['Recovered','Active','Deaths'] colors = ['#2e7d32', '#f9a825', '#c62828'] values = [_summary['total_recovered'], _summary['total_active'], _summary['total_deaths']] fig = go.Figure(data=[go.Pie(labels=labels, values=values)]) fig.update_traces(hoverinfo='label+percent', textinfo='value+percent', textfont_size=20, marker=dict(colors=colors, line=dict(color='gray', width=2))) fig.show() #hide_input #@title Cummulative cases in Malaysia mco_periods = [ {'name': 'MCO1', 'date_start': '2020-03-18', 'cases': 790}, {'name': 'MCO2', 'date_start': '2020-04-01', 'cases': 2908}, {'name': 'MCO3', 'date_start': '2020-04-15', 'cases': 5072}, {'name': 'MCO4', 'date_start': '2020-04-29', 'cases': 5945}, {'name': 'CMCO', 'date_start': '2020-05-04', 'cases': 6353}, {'name': 'RMCO', 'date_start': '2020-06-10', 'cases': 8338} ] def add_mco_periods(fig, line_y_max, text_y_pos): for period in mco_periods: fig.add_shape(dict(x0=period['date_start'],x1=period['date_start'])) fig.add_annotation(x=period['date_start'],y=text_y_pos,text=period['name']) fig.update_shapes(dict( xref='x', yref='y', type="line", y0=0, y1=line_y_max, line=dict( color='black', width=3, dash='dot' ) ) ) fig.update_layout( #showlegend=False, annotations=[ dict( xref="x", yref="y", textangle=270, showarrow=False, xshift=-10, font=dict(color='black') ) ] ) return fig _data = my_cases['confirmed'] _data_mco = my_cases['confirmed'][my_cases['confirmed']['Date'].isin(['2020-03-18'])] fig = go.Figure( data=[go.Bar(x=_data['Date'], y=_data['Cases'])], layout_title_text=f'Cummulative COVID-19 cases in Malaysia (n = {my_summary["total_confirmed"]})' ) add_mco_periods(fig, my_cases['confirmed']['Cases'].max(), 6000) fig.show()Is the curve is flatterning in Malaysia?> Inflection-sensitive chart for detecting successful interventions, from the article "How To Tell If We're Beating COVID-19". Please refer _minutephysics_ for [How To Tell If We're Beating COVID-19](https://youtu.be/54XLXg4fYsc)#hide #@title Define flatterning figure def fig_flattern(fig, country): _data = country_cases(country)['confirmed'] _data['New Cases'] = _data['Cases'].diff() _data['Average New Cases'] = round(_data['New Cases'].rolling(window=7).mean(),0) _data = _data[(_data['Cases'] > 100) & (_data['Average New Cases'] > 0)] fig.add_trace(go.Scatter( name=country, x=_data['Cases'], y=_data['Average New Cases'] )) return fig #hide_input #@title Plot flatterning curve of countries fig = go.Figure() fig_flattern(fig, 'Malaysia') fig_flattern(fig, 'Singapore') fig_flattern(fig, 'Korea, South') fig_flattern(fig, 'Sweden') fig_flattern(fig, 'China') fig_flattern(fig, 'Italy') fig_flattern(fig, 'United Kingdom') fig_flattern(fig, 'US') fig_flattern(fig, 'Taiwan*') fig.update_layout(legend=dict(x=0, y=1, orientation='h'), title=f'Trajectory of COVID-19 Confirmed Cases vs New Cases (7 Days Average)') fig.update_xaxes(title_text="Cummulative cases (Log scale)", type='log') fig.update_yaxes(title_text="New cases (Log scale)", type='log') fig.show() #hide_input #@title Plot daily reported cases rolling_window = 7 _data = my_cases['confirmed'].copy() _data['New Cases'] = _data['Cases'].diff() _data['Average New Cases'] = round(_data['New Cases'].rolling(window=rolling_window).mean(),0) fig = go.Figure() fig.add_trace(go.Bar( name='Daily New Cases', x=_data['Date'], y=_data['New Cases'] )) fig.add_trace(go.Scatter( name='7 Days Average', x=_data['Date'], y=_data['Average New Cases'] )) add_mco_periods(fig, _data['New Cases'].max(), _data['New Cases'].max() - 15) fig.update_layout(legend=dict(x=0, y=1, orientation='h'),title=f'Daily Reported Cases in Malaysia') fig.update_xaxes(title_text="Reported Date") fig.update_yaxes(title_text="New cases (Log scale)") fig.show() #hide global_cases['confirmed']['Country'].unique() #hide df_states = pd.read_csv("https://docs.google.com/spreadsheets/d/e/2PACX-1vTzT-B2triWGPE74rfUT4XOsF-5qsB1tM6OfMPVKiRHX95tE9tPubdTbxY/pub?gid=1726267961&single=true&output=csv", parse_dates=['Date']) df_states.set_index('Date').sum(axis=1) #hide df_districts = pd.read_csv("https://docs.google.com/spreadsheets/d/e/2PACX-1vTzT9vUJNiKV2yN4sb_VvxKcq-B2triWGPE74rfUT4XOsF-5qsB1tM6OfMPVKiRHX95tE9tPubdTbxY/pub?gid=1667946793&single=true&output=csv") col_latest = df_districts.columns.values[-1] df_districts[['Districts', 'State', col_latest]].sort_values(col_latest, ascending=False).reset_index(drop=True).head(10) #hide df_districts_last2 = df_districts.set_index(['Districts', 'State']).transpose().tail(2) df_districts_new = df_districts_last2.diff().tail(1).transpose() df_districts_new = df_districts_new.sort_values(by=df_districts_new.columns[0], ascending=False).head(10) df_districts_new[df_districts_new[df_districts_new.columns[0]] > 0]Sympy - Symbolic algebra in Python () - minor edits to perform plotting using the Bokeh package. 2015-05-17 () http://dml.riken.jp/~rob/The latest version of the original [IPython notebook](http://ipython.org/notebook.html) lecture is available at [http://github.com/jrjohansson/scientific-python-lectures](http://github.com/jrjohansson/scientific-python-lectures).The other notebooks in this lecture series are indexed at [http://jrjohansson.github.com](http://jrjohansson.github.com).from bokeh.plotting import output_notebook output_notebook() from bokeh.plotting import figure, showIntroduction There are two notable Computer Algebra Systems (CAS) for Python:* [SymPy](http://sympy.org/en/index.html) - A python module that can be used in any Python program, or in an IPython session, that provides powerful CAS features. * [Sage](http://www.sagemath.org/) - Sage is a full-featured and very powerful CAS enviroment that aims to provide an open source system that competes with Mathematica and Maple. Sage is not a regular Python module, but rather a CAS environment that uses Python as its programming language.Sage is in some aspects more powerful than SymPy, but both offer very comprehensive CAS functionality. The advantage of SymPy is that it is a regular Python module and integrates well with the IPython notebook. In this lecture we will therefore look at how to use SymPy with IPython notebooks. If you are interested in an open source CAS environment I also recommend to read more about Sage.To get started using SymPy in a Python program or notebook, import the module `sympy`:from sympy import *To get nice-looking $\LaTeX$ formatted output run:init_printing() # or with older versions of sympy/ipython, load the IPython extension #%load_ext sympy.interactive.ipythonprinting # or #%load_ext sympyprintingSymbolic variables In SymPy we need to create symbols for the variables we want to work with. We can create a new symbol using the `Symbol` class:x = Symbol('x') (pi + x)**2 # alternative way of defining symbols a, b, c = symbols("a, b, c") type(a)We can add assumptions to symbols when we create them:x = Symbol('x', real=True) x.is_imaginary x = Symbol('x', positive=True) x > 0Complex numbers The imaginary unit is denoted `I` in Sympy.1+1*I I**2 (x * I + 1)**2Rational numbers There are three different numerical types in SymPy: `Real`, `Rational`, `Integer`:r1 = Rational(4,5) r2 = Rational(5,4) r1 r1+r2 r1/r2Numerical evaluation SymPy uses a library for arbitrary precision as numerical backend, and has predefined SymPy expressions for a number of mathematical constants, such as: `pi`, `e`, `oo` for infinity.To evaluate an expression numerically we can use the `evalf` function (or `N`). It takes an argument `n` which specifies the number of significant digits.pi.evalf(n=50) y = (x + pi)**2 N(y, 5) # same as evalfWhen we numerically evaluate algebraic expressions we often want to substitute a symbol with a numerical value. In SymPy we do that using the `subs` function:y.subs(x, 1.5) N(y.subs(x, 1.5))The `subs` function can of course also be used to substitute Symbols and expressions:y.subs(x, a+pi)We can also combine numerical evolution of expressions with NumPy arrays:import numpy x_vec = numpy.arange(0, 10, 0.1) y_vec = numpy.array([N(((x + pi)**2).subs(x, xx)) for xx in x_vec], dtype=float) #TODO: note, weird issue with "Float" object (vs numpy float type) serialization to JSON N(y, 5) title = N(y, 5).__str__() p = figure(title=title, x_axis_label='x', y_axis_label='y') p.line(x_vec, y_vec) show(p)However, this kind of numerical evolution can be very slow, and there is a much more efficient way to do it: Use the function `lambdify` to "compile" a Sympy expression into a function that is much more efficient to evaluate numerically:f = lambdify([x], (x + pi)**2, 'numpy') # the first argument is a list of variables that # f will be a function of: in this case only x -> f(x) y_vec = f(x_vec) # now we can directly pass a numpy array and f(x) is efficiently evaluated # Note: this does not have the same "Float" object problem as above.The speedup when using "lambdified" functions instead of direct numerical evaluation can be significant, often several orders of magnitude. Even in this simple example we get a significant speed up:%%timeit y_vec = numpy.array([N(((x + pi)**2).subs(x, xx)) for xx in x_vec]) %%timeit y_vec = f(x_vec)The slowest run took 13.65 times longer than the fastest. This could mean that an intermediate result is being cached. 100000 loops, best of 3: 2.48 µs per loopAlgebraic manipulations One of the main uses of an CAS is to perform algebraic manipulations of expressions. For example, we might want to expand a product, factor an expression, or simply an expression. The functions for doing these basic operations in SymPy are demonstrated in this section. Expand and factor The first steps in an algebraic manipulation(x+1)*(x+2)*(x+3) expand((x+1)*(x+2)*(x+3))This is "reversible" with the `factor` command. The `expand` function takes a number of keywords arguments which we can tell the functions what kind of expansions we want to have performed. For example, to expand trigonometric expressions, use the `trig=True` keyword argument:sin(a+b) expand(sin(a+b), trig=True)See `help(expand)` for a detailed explanation of the various types of expansions the `expand` functions can perform. The opposite of product expansion is of course factoring. The factor an expression in SymPy use the `factor` function:factor(x**3 + 6 * x**2 + 11*x + 6)Simplify The `simplify` tries to simplify an expression into a nice looking expression, using various techniques. More specific alternatives to the `simplify` functions also exists: `trigsimp`, `powsimp`, `logcombine`, etc. The basic usages of these functions are as follows:# simplify expands a product simplify((x+1)*(x+2)*(x+3)) # simplify uses trigonometric identities simplify(sin(a)**2 + cos(a)**2) simplify(cos(x)/sin(x))apart and together To manipulate symbolic expressions of fractions, we can use the `apart` and `together` functions:f1 = 1/((a+1)*(a+2)) f1 apart(f1) f2 = 1/(a+2) + 1/(a+3) f2 together(f2)Simplify usually combines fractions but does not factor:simplify(f2)Calculus In addition to algebraic manipulations, the other main use of CAS is to do calculus, like derivatives and integrals of algebraic expressions. Differentiation Differentiation is usually simple. Use the `diff` function. The first argument is the expression to take the derivative of, and the second argument is the symbol by which to take the derivative:y y**2 diff(y**2, x)For higher order derivatives we can do:diff(y**2, x, x) diff(y**2, x, 2) # same as aboveTo calculate the derivative of a multivariate expression, we can do:x, y, z = symbols("x,y,z") f = sin(x*y) + cos(y*z)$\frac{d^3f}{dxdy^2}$diff(f, x, 1, y, 2)Integration Integration is done in a similar fashion:f integrate(f, x)By providing limits for the integration variable we can evaluate definite integrals:integrate(f, (x, -1, 1))and also improper integralsintegrate(exp(-x**2), (x, -oo, oo))Remember, `oo` is the SymPy notation for inifinity. Sums and products We can evaluate sums and products using the functions: 'Sum'n = Symbol("n") Sum(1/n**2, (n, 1, 10)) Sum(1/n**2, (n,1, 10)).evalf() Sum(1/n**2, (n, 1, oo)).evalf()Products work much the same way:Product(n, (n, 1, 10)) # 10!Limits Limits can be evaluated using the `limit` function. For example,limit(sin(x)/x, x, 0)We can use 'limit' to check the result of derivation using the `diff` function:f diff(f, x)$\displaystyle \frac{\mathrm{d}f(x,y)}{\mathrm{d}x} = \frac{f(x+h,y)-f(x,y)}{h}$h = Symbol("h") limit((f.subs(x, x+h) - f)/h, h, 0)OK! We can change the direction from which we approach the limiting point using the `dir` keywork argument:limit(1/x, x, 0, dir="+") limit(1/x, x, 0, dir="-")Series Series expansion is also one of the most useful features of a CAS. In SymPy we can perform a series expansion of an expression using the `series` function:series(exp(x), x)By default it expands the expression around $x=0$, but we can expand around any value of $x$ by explicitly include a value in the function call:series(exp(x), x, 1)And we can explicitly define to which order the series expansion should be carried out:series(exp(x), x, 1, 10)The series expansion includes the order of the approximation, which is very useful for keeping track of the order of validity when we do calculations with series expansions of different order:s1 = cos(x).series(x, 0, 5) s1 s2 = sin(x).series(x, 0, 2) s2 expand(s1 * s2)If we want to get rid of the order information we can use the `removeO` method:expand(s1.removeO() * s2.removeO())But note that this is not the correct expansion of $\cos(x)\sin(x)$ to $5$th order:(cos(x)*sin(x)).series(x, 0, 6)Linear algebra Matrices Matrices are defined using the `Matrix` class:m11, m12, m21, m22 = symbols("m11, m12, m21, m22") b1, b2 = symbols("b1, b2") A = Matrix([[m11, m12],[m21, m22]]) A b = Matrix([[b1], [b2]]) bWith `Matrix` class instances we can do the usual matrix algebra operations:A**2 A * bAnd calculate determinants and inverses, and the like:A.det() A.inv()Solving equations For solving equations and systems of equations we can use the `solve` function:solve(x**2 - 1, x) solve(x**4 - x**2 - 1, x)System of equations:solve([x + y - 1, x - y - 1], [x,y])In terms of other symbolic expressions:solve([x + y - a, x - y - c], [x,y])Quantum mechanics: noncommuting variables How about non-commuting symbols? In quantum mechanics we need to work with noncommuting operators, and SymPy has a nice support for noncommuting symbols and even a subpackage for quantum mechanics related calculations!from sympy.physics.quantum import *States We can define symbol states, kets and bras:Ket('psi') Bra('psi') u = Ket('0') d = Ket('1') a, b = symbols('alpha beta', complex=True) phi = a * u + sqrt(1-abs(a)**2) * d; phi Dagger(phi) Dagger(phi) * dUse `qapply` to distribute a mutiplication:qapply(Dagger(phi) * d) qapply(Dagger(phi) * u)OperatorsA = Operator('A') B = Operator('B')Check if they are commuting!A * B == B * A expand((A+B)**3) c = Commutator(A,B) cWe can use the `doit` method to evaluate the commutator:c.doit()We can mix quantum operators with C-numbers:c = Commutator(a * A, b * B) cTo expand the commutator, use the `expand` method with the `commutator=True` keyword argument:c = Commutator(A+B, A*B) c.expand(commutator=True) Dagger(Commutator(A, B)) ac = AntiCommutator(A,B) ac.doit()Example: Quadrature commutator Let's look at the commutator of the electromagnetic field quadatures $x$ and $p$. We can write the quadrature operators in terms of the creation and annihilation operators as:$\displaystyle x = (a + a^\dagger)/\sqrt{2}$$\displaystyle p = -i(a - a^\dagger)/\sqrt{2}$X = (A + Dagger(A))/sqrt(2) X P = -I * (A - Dagger(A))/sqrt(2) PLet's expand the commutator $[x,p]$Commutator(X, P).expand(commutator=True).expand(commutator=True)Here we see directly that the well known commutation relation for the quadratures$[x,p]=i$is a directly related to$[A, A^\dagger]=1$ (which SymPy does not know about, and does not simplify). For more details on the quantum module in SymPy, see:* http://docs.sympy.org/0.7.2/modules/physics/quantum/index.html* http://nbviewer.ipython.org/urls/raw.github.com/ipython/ipython/master/docs/examples/notebooks/sympy_quantum_computing.ipynb Further reading * http://sympy.org/en/index.html - The SymPy projects web page.* https://github.com/sympy/sympy - The source code of SymPy.* http://live.sympy.org - Online version of SymPy for testing and demonstrations. Versions# must have version_information.py file in local dir or appropriate PATH %reload_ext version_information %version_information numpy, sympy, bokehVisualizationfig = plt.figure(figsize=(7,10)) ax = fig.add_subplot(212) ax.hist(df['ratingnh'], bins=50, range=(df['ratingnh'].min(),df['ratingnh'].max())) plt.title('Rating Distribution with 50 Bins') plt.xlabel('Rating') plt.ylabel('Count of Rating') plt.show() df.ratingnh.unique() #Nation-wide distribution of Satisfaction levels for NEIGHBORHOOD? sns.relplot(x="ratingnh", y='control',data=df) #filter on 47900: Washington-Arlington-Alexandria, DC-VA-MD-WV|| states only dmv = df[(df['omb13cbsa'] == 47900)] #DMV wide distribution of Satisfaction levels for NEIGHBORHOOD? sns.relplot(x="ratingnh", y="control", data=dmv) df.omb13cbsa.unique() #Regression and Residual plot between total household income and neigborhood rating #tips = sns.load_dataset("ratingnh") sns.regplot(x="hincp", y="ratingnh", data=df, ci=None) plt.title("Rating Regression") plt.xlabel('Total Household Income', size=12) plt.ylabel('Neighborhood Rating', size=12) plt.show() sns.residplot(x="hincp", y="ratingnh", data=df) plt.title("Rating Residuals") plt.xlabel('Total Household Income', size=12) plt.ylabel('Neighborhood Rating', size=12) plt.show() # Label the state names LABEL_MAP = { 12060: "GA", 12580: "MD", 13820: "AL", 14460: "MA-NH", 16980: "IL-IN-WI", 17140: "OH-KY-IN", 17460: "OH", 19100: "TX", 19740: "CO", 19820: "MI", 26420: "TX", 28140: "MO-KS", 29820: "NV", 31080: "CA", 32820: "TN-MS-AR", 33100: "FL", 33340: "WI", 33460: "MN-WI", 35380: "LA", 35620: "NY-NJ-PA", 36420: "OK", 37980: "PA-NJ-DE-MD", 38060: "AZ", 38300: "PA", 38900: "OR-WA", 39580: "NC", 40060: "VA", 40140: "CA", 40380: "NY", 41700: "TX", 41860: "San Francisco, CA", 41940: "San Jose-Sunnyvale-Santa Clara, CA", 42660: "WA", 45300: "Tampa-St. Petersburg-Clearwater, FL", 47900: "DC-VA-MD-WV", 99998: "All other metropolitan areas", 99999: "Not in a metropolitan area" } ## Convert state column labels into text df["omb13cbsa"] = df["omb13cbsa"].map(LABEL_MAP) df["omb13cbsa"] f, ax = plt.subplots(figsize=(20,20)) sns.boxplot(x='omb13cbsa', y='ratingnh', data=df, palette='vlag') sns.swarmplot(x='omb13cbsa', y='ratingnh', data=df, size=2, color='0.3') plt.title('Rating By State', size=14) plt.xlabel('State', size=12) plt.ylabel('Rating', size=12) plt.show()XGBoostfrom sklearn.model_selection import cross_val_score, KFold from sklearn.metrics import roc_auc_score from bayes_opt import BayesianOptimization from IPython.display import display from ipywidgets import IntProgress from sklearn import metrics import xgboost as xgb import pandas as pd import os if '__file__' in locals(): current_folder = os.path.dirname(os.path.abspath(__file__)) else: current_folder = os.getcwd() set_de_entrenamiento_testing_y_prediccion = '"{}"'.format(os.path.join( current_folder, '..', 'Set de entrenamiento, testing y predicción.ipynb' )) merge_features = '"{}"'.format(os.path.join(current_folder, '..', 'Features', 'Merge features.ipynb')) calcular_auc = '"{}"'.format(os.path.join(current_folder, '..', 'Calcular AUC.ipynb')) predicciones_csv = os.path.join(current_folder, '..', 'predictions.csv') hiperparametros_csv = os.path.join(current_folder, 'hiperparametros', 'xgboost.csv')Cargo el df con los features.pd.options.mode.chained_assignment = None %run $merge_features assert(df_features.shape[0] == df['person'].unique().shape[0])Cargo los sets de entrenamiento, testing y predicción.%run $set_de_entrenamiento_testing_y_prediccion labels_with_features = labels.merge(df_features, how='inner', on='person') data = labels_with_features.drop('label', axis=1) target = labels_with_features['label']Entrenamiento rápido Con cross validation de xgboost. Lo bueno de esto es que al final me da el *num_boost_round* óptimo.param = { 'silent': 1, 'objective': 'reg:logistic', 'alpha': 3.845311207046479, 'colsample_bylevel': 0.6605668347627213, 'colsample_bytree': 0.5279014819087092, 'eta': 0.15803667962605694, 'gamma': 6.219264874528072, 'lambda': 1.1181195507921775, 'max_delta_step': 7.592652591386328, 'max_depth': 9, 'min_child_weight': 4.302125582335056, 'subsample': 0.43744176565530823 } cv = 10 # cantidad de splits en el cross validation num_round = 100 # cantidad de veces que se boostea %%time dtrain = xgb.DMatrix(data, label=target) result = xgb.cv(param, dtrain, nfold=cv, metrics='auc', verbose_eval=False, shuffle=False, stratified=False, num_boost_round=num_round)/home/sebas/.envs/trocafone/lib/python3.6/site-packages/xgboost/core.py:587: FutureWarning: Series.base is deprecated and will be removed in a future version if getattr(data, 'base', None) is not None and \ /home/sebas/.envs/trocafone/lib/python3.6/site-packages/xgboost/core.py:588: FutureWarning: Series.base is deprecated and will be removed in a future version data.base is not None and isinstance(data, np.ndarray) \El índice + 1 es el *num_boost_round* óptimo.result.loc[[result['test-auc-mean'].idxmax()]]Búsqueda de hiperparámetros con Grid SearchVamos a hacer un kfold con sklearn.**Nota**: está busqueda ya no es óptima, es mejor realizarla usando GridSearchCV de sklearn o Bayesian Optimization que está más abajo.splits = 2 max_depth_values = 2 eta_values = 2 gamma_values = 2 num_round_values = 2 param = { 'max_depth': 30, 'eta': 0, 'gamma': 0, 'silent': 1, 'objective': 'binary:logistic', 'nthread': 10, 'eval_metric': 'auc' } def calculate_auc(): return metrics.roc_auc_score(labels_test['label'], labels_test['label_predicted']) labels_with_features = labels.merge(df_features, how='inner', on='person') columns = list(labels_with_features.columns) columns.remove('label') f = IntProgress(min=0, max=splits*max_depth_values*eta_values*gamma_values*num_round_values) display(f) # display the bar kf = KFold(n_splits=splits, shuffle=False) results = pd.DataFrame(columns=['k', 'max_depth', 'eta', 'gamma', 'num_round', 'auc']) index = 0 k = 0 for train_index, test_index in kf.split(labels): labels_training = labels_with_features.iloc[train_index] labels_test = labels_with_features.iloc[test_index] assert(labels_training.merge(labels_test, how='inner', on='person').shape[0] == 0) train_matrix = xgb.DMatrix(labels_training.loc[:, columns], label=labels_training['label']) test_matrix = xgb.DMatrix(labels_test.loc[:, columns]) for max_depth, eta, gamma, num_round in np.ndindex((max_depth_values, eta_values, gamma_values, num_round_values)): eta=eta/eta_values param['max_depth'] = max_depth param['eta'] = eta param['gamma'] = gamma bst = xgb.train(param, train_matrix, num_round) labels_test['label_predicted'] = bst.predict(test_matrix) results.loc[index] = k, max_depth, eta, gamma, num_round, calculate_auc() index+=1 f.value += 1 k += 1Promedio los hiperparámetros.results_mean = results.groupby(['max_depth', 'eta', 'gamma', 'num_round'])[['auc']].mean() mejor_resultado = results_mean.loc[results_mean.idxmax()] mejor_resultadoEscribo los nuevos resultados en un archivo.params = mejor_resultado.reset_index().to_dict('records')[0] auc = params.pop('auc') hyperparameter_data = { 'algorithm': 'xgboost', 'hyperparameters': params, 'cv_splits': splits, 'auc': auc, 'features': data.columns } %run -i write_hyperparameters.pyHiperparámetros con Bayesian Optimizationpbounds = { 'max_depth': (2, 30), 'eta': (0, 1), 'gamma': (0, 20), 'min_child_weight': (1, 8), 'max_delta_step': (1, 8), 'subsample': (0, 1), 'colsample_bytree': (0, 1), 'colsample_bylevel': (0, 1), 'lambda': (1, 10), 'alpha': (0, 8) } discrete = ['max_depth'] # parámetros discretos cv_splits = 10 # cantidad de splits en el cv num_round = 100 # cantidad máxima de boostsFalta optimizar otros parámetros discretos: - booster - min_child_weight - max_delta_step - etc...dtrain = xgb.DMatrix(data, label=target) def cv_score_xgb(**param): param['silent'] = 1 param['objective'] = 'reg:logistic' # param['scale_pos_weight'] = 19 # transformo los valores que deben ser discretos for d in discrete: param[d] = int(param[d]) # hago el cv scores = xgb.cv(param, dtrain, nfold=cv_splits, metrics='auc', verbose_eval=False, shuffle=False, stratified=False, num_boost_round=num_round, early_stopping_rounds=20) return scores['test-auc-mean'].max() %%time optimizer = BayesianOptimization(f=cv_score_xgb, pbounds=pbounds) optimizer.probe( params = { 'alpha': 3.845311207046479, 'colsample_bylevel': 0.6605668347627213, 'colsample_bytree': 0.5279014819087092, 'eta': 0.15803667962605694, 'gamma': 6.219264874528072, 'lambda': 1.1181195507921775, 'max_delta_step': 7.592652591386328, 'max_depth': 9, 'min_child_weight': 4.302125582335056, 'subsample': 0.43744176565530823 } ) optimizer.maximize( init_points=0, n_iter=100, ) optimizer.maxGuardo el resultado en un archivo.params = optimizer.max['params'].copy() params['max_depth'] = int(params['max_depth']) params['silent'] = 1 result = xgb.cv(params, dtrain, nfold=cv_splits, metrics='auc', verbose_eval=False, shuffle=False, stratified=False, num_boost_round=num_round) params['num_round'] = result['test-auc-mean'].idxmax() + 1 del params['silent'] hyperparameter_data = { 'algorithm': 'xgboost', 'hyperparameters': params, 'cv_splits': cv_splits, 'auc': optimizer.max['target'], 'features': data.columns } %run -i write_hyperparameters.pyPredecir labels desconocidosdtrain = xgb.DMatrix(data, label=target) param = { 'silent': 1, 'objective': 'reg:logistic', 'alpha': 3.845311207046479, 'colsample_bylevel': 0.6605668347627213, 'colsample_bytree': 0.5279014819087092, 'eta': 0.15803667962605694, 'gamma': 6.219264874528072, 'lambda': 1.1181195507921775, 'max_delta_step': 7.592652591386328, 'max_depth': 9, 'min_child_weight': 4.302125582335056, 'subsample': 0.43744176565530823 } num_round = 10 bst = xgb.train(param, dtrain, num_boost_round=65)Predigo:labels_to_predict_with_features = labels_to_predict.merge(df_features, how='inner', on='person') assert(labels_to_predict.shape[0] == labels_to_predict_with_features.shape[0]) matrix = xgb.DMatrix(labels_to_predict_with_features.loc[:, columns]) labels_to_predict['label'] = bst.predict(matrix) labels_to_predict.to_csv(predicciones_csv)File, path variableslink_key = 'strat_wellbores'Import Formation tops from NPD and Petreldef importNPD(key): """ Import NPD tops from website (https://www.npd.no/en) This will only keep FORMATION level tops, deleting GROUP and MEMBER assigned tops """ # load csv file from link or downloaded file npdtops = load(key) # Rename columns in order to merge with separate file (checkshot from Petrel) columns = list(npdtops.columns) rename = ["Well", "TopMD", "BaseMD", "Surface", "Level", "LithId", "CompletionDate", "WellId", "upDate", "Sync"] rename_cols = dict(zip(columns, rename)) npdtops.rename(columns=rename_cols, inplace=True) # Dealing with duplicate (repeated) tops in each well #tops[ tops.duplicated(subset=['Well', 'MD'], keep='last')].Surface.str.contains("GP") #data[ data.duplicated(subset=['Well', 'MD'], keep='last')].Surface.str.contains("MBR") # Remove Group level tops npdtops = npdtops[ ~npdtops['Surface'].str.contains("GP")] # Remove Member level tops npdtops = npdtops[ ~npdtops['Surface'].str.contains("MBR")] # Drop most columns, leaving...? #npdtops.drop(['Level', 'Parent', 'Id', 'LithoStrat', 'LithoStratParent', 'upDate', 'Sync'], axis=1, inplace=True) return npdtops npdtops = importNPD(link_key) npdtops.head() def importPetrel(tops_csv): """ Import Petrel tops with time and depth info """ # Load tops via file tops = pd.read_csv(tops_csv) # Rename columns tops.rename(columns={'Well identifier':'Well', 'Z':'TVDSS','TWT auto':'TWT'}, inplace=True) # Change TVDSS values to positive down borehole tops.TVDSS = tops['TVDSS']*-1 # Remove rows that have Surface named in del_rows del_rows = ['UNDIFFERENTIATED', 'NO FORMAL NAME', 'NO GROUP DEFINED', 'UNDEFINED GP'] for i in range(len(del_rows)): tops = tops[ tops.Surface != del_rows[i]] # Replace misspelled Formation names with correct spelling typos = {'Surface' : {'STリ FM':'STØ FM', 'TUBナEN FM':'TUBÅEN FM', '?RRET FM':'ØRRET FM', 'R?YE FM':'RØYE FM', '?RN FM':'ØRN FM', 'ISBJ?RN FM':'ISBJØRN FM', 'BL坦EROT FM':'BLÆREROT FM'}} tops.replace(typos, inplace=True) # Deal with duplicate names down each borehole #data[ data.duplicated(subset=['Well', 'MD'], keep='last')].Surface.str.contains("GP") #data[ data.duplicated(subset=['Well', 'MD'], keep='last')].Surface.str.contains("MBR") # Remove Group and Member level tops tops = tops[ ~tops['Surface'].str.contains("GP")] tops = tops[ ~tops['Surface'].str.contains("MBR")] """ tops.drop(['Unnamed: 0', 'TWT picked', 'Geological age', 'TVT', 'TST', 'Interpreter', 'Observation number', 'Dip angle', 'Dip azimuth', 'Missing', 'Confidence factor', 'Used by dep.conv.', 'Used by geo mod', 'Symbol', 'Last edited'], axis=1, inplace=True) """ return tops petreltops = importPetrel(tops_csv) tdr_list = [] file_list = open('../sonic_list', 'r') for line in file_list: tdr_list.append(line.rstrip()) def correctedTDR(file): """ Imports Petrel TDR (checkshot corrected sonic log) Imports all logs into one DataFrame X, Y, Z, TWT picked, MD, Well, Average velocity, Interval velocity """ frame = pd.DataFrame() for file in tdr_list: df = pd.read_csv(os.path.join(path, file), sep='\s+', skiprows=14, header=None, na_values='-999') df = df.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) df.TVDSS = df['TVDSS']*-1 df.TWT = df['TWT']*-1 frame = frame.append(df) return frame corrTDR = correctedTDR(tdr_list)Merge NPD tops and Petrel topsdef mergeTops(df1, df2): """ Merge NPD tops with Petrel tops Merge the two because NPD tops have a base column that is used in the isopach calculation """ df = pd.merge(npdtops, petreltops) return df merge = mergeTops(npdtops, petreltops) merge.head()C:\Users\Dunbar\Anaconda3\lib\site-packages\pandas\core\reshape\merge.py:969: UserWarning: You are merging on int and float columns where the float values are not equal to their int representation 'representation', UserWarning)Edit merged tops DataFrame. Add isopach, isochron and interval velocitydef isopach(df): """ # Calculate isopach, df input from mergeTops """ df['Thickness'] = df['Base'] - df['MD'] return df def wellDict(df): """ create well dictionary as input to isochron """ wells = list(df.Well.unique()) well_list = [] for well in wells: well = well.replace('/','_') well = well.replace('-', '_') well = well.replace(" ","") well = 'W' + well well_list.append(well) well_dict = {} for i,j in zip(well_list, wells): well_dict[i]=j return well_dict def isochron(df): """ # df input from mergeTops """ frame = pd.DataFrame() well_dict = wellDict(df) for k,v in well_dict.items(): k = df[df['Well']==v].sort_values(by=['MD']) k['TWT_d'] = k['TWT'].shift(-1) k['Isochron'] = (k['TWT_d'] - k['TWT']) / 2000 frame = frame.append(k) return frame def vint(df): """ Add interval velocity column """ df['Vint'] = df['Thickness'] / df['Isochron'] return df def midPointDepth(df): """ Add a midpoint depth value """ df['MidPointDepth'] = df['TVDSS'] + (df['Thickness'] / 2) return df merge = isopach(merge) merge = isochron(merge) merge = vint(merge) merge = midPointDepth(merge) plot_tops = merge.drop(['MD', 'Base', 'X', 'Y', 'TWT', 'Thickness', 'TWT_d', 'Isochron', 'Vint'], axis=1) well = '7120/1-4 S' tops = plot_tops[plot_tops['Well'] == well]['Surface'] depths = plot_tops[plot_tops['Well'] == well]['TVDSS'] merge[ merge['Surface']=='SNADD FM']Plottingfig, ax = plt.subplots() ax.scatter(merge[ merge['Surface'] == 'SNADD FM']['Vint'], merge[ merge['Surface'] == 'SNADD FM']['MidPointDepth']) ### def velocity_logs(top_depth, bottom_depth): # logs=data[(data.TVDSS >= top_depth) & (data.TVDSS <= bottom_depth)] fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(15,15), sharey=True) # fig.suptitle("7321/4-1 T2", fontsize=22) fig.subplots_adjust(top=0.9,wspace=0.1) #General setting for all axis for axes in ax: axes.set_ylim (top_depth,bottom_depth) axes.invert_yaxis() axes.yaxis.grid(True) axes.get_xaxis().set_visible(False) for (i,j) in zip(depths,tops): if ((i>=top_depth) and (i<=bottom_depth)): axes.axhline(y=i, linewidth=0.5, color='black') axes.text(0.1, i ,j, horizontalalignment='center',verticalalignment='center') ########################################################### # 1st track: DT ax01=ax[0].twiny() ax01.grid(True) ax01.set_xlim(1000,0) ax01.spines['top'].set_position(('outward',0)) ax01.set_xlabel('Vint[m/s]') ax01.plot(data.data['AC'], data.data['TVDSS'], label='DT[usec/m]', color='blue') ax01.set_xlabel('DT[usec/m]', color='blue') ax01.tick_params(axis='x', colors='blue') # 2nd track: Velocity from sonic ax02=ax[1].twiny() ax02.grid(True) ax02.set_xlim(0,7000) ax02.spines['top'].set_position(('outward',0)) ax02.set_xlabel('TDR (corrected) [m/s]') ax02.plot(corrTDR[ corrTDR['Well']=='7120/1-3']['Vint'], corrTDR[ corrTDR['Well']=='7120/1-3']['TVDSS'], label='TDR (corr) [m/s]', color='blue') ax02.set_xlabel('TDR (corr) [m/s]', color='blue') ax02.tick_params(axis='x', colors='blue') # 3rd track: Checkshot ax03=ax[2].twiny() ax03.grid(True) ax03.set_xlim(0,7000) ax03.spines['top'].set_position(('outward',0)) ax03.set_xlabel('Vint[m/s]') ax03.scatter(merge[ merge['Well']=='7120/1-4 S']['Vint'], merge[ merge['Well']=='7120/1-4 S']['MidPointDepth'], label='Vint[m/s]', color='green') ax03.set_xlabel('Vint[m/s]', color='green') ax03.tick_params(axis='x', colors='green') # 4th track: TDR calibrated synthetic interval velocity ax04=ax[3].twiny() ax04.grid(True) ax04.set_xlim(1000,0) ax04.spines['top'].set_position(('outward',0)) ax04.set_xlabel('Median [m/s]') ax04.plot(sonicMedFilt, data.data['TVDSS'], label='Median [m/s]', color='red') ax04.set_xlabel('Median [m/s]', color='orange') ax04.tick_params(axis='x', colors='orange') velocity_logs(0,2500) file1 = r'./sonic_logs/7120_1-1R2_TDR_CalTDR_DTvp-RHOB_CSDea' data1 = pd.read_csv(file1, sep='\s+', skiprows=14, header=None, na_values='-999') data1 = data1.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data1.TVDSS = data1['TVDSS']*-1 data1.TWT = data1['TWT']*-1 file2 = r'./sonic_logs/7120_1-3_TDR_CalTDR_DTvp_CS-DEA_Corr1400-1900ms_BSp6ms' data2 = pd.read_csv(file2, sep='\s+', skiprows=14, header=None, na_values='-999') data2 = data2.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data2.TVDSS = data2['TVDSS']*-1 data2.TWT = data2['TWT']*-1 file3 = r'./sonic_logs/7120_1-4S_TDR_CalTDR_DTvp_DEN_CSDea_BS0ms' data3 = pd.read_csv(file3, sep='\s+', skiprows=14, header=None, na_values='-999') data3 = data3.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data3.TVDSS = data3['TVDSS']*-1 data3.TWT = data3['TWT']*-1 file4 = r'./sonic_logs/7120_1-5_TDR_CalTDR_AC-DEN_CSDiskos' data4 = pd.read_csv(file4, sep='\s+', skiprows=14, header=None, na_values='-999') data4 = data4.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data4.TVDSS = data4['TVDSS']*-1 data4.TWT = data4['TWT']*-1 file5 = r'./sonic_logs/7120_2-1_TDR_CalTDR_DTvp-RHOB_BS0ms' data5 = pd.read_csv(file5, sep='\s+', skiprows=14, header=None, na_values='-999') data5 = data5.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data5.TVDSS = data5['TVDSS']*-1 data5.TWT = data5['TWT']*-1 file6 = r'./sonic_logs/7120_2-2_TDR_CalTDR_LN17_EW' data6 = pd.read_csv(file6, sep='\s+', skiprows=14, header=None, na_values='-999') data6 = data6.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data6.TVDSS = data6['TVDSS']*-1 data6.TWT = data6['TWT']*-1 file7 = r'./sonic_logs/7120_2-3S_TDR_CalTDR_EW1500-2006_BS0ms' data7 = pd.read_csv(file7, sep='\s+', skiprows=14, header=None, na_values='-999') data7 = data7.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data7.TVDSS = data7['TVDSS']*-1 data7.TWT = data7['TWT']*-1 file8 = r'./sonic_logs/7219_12-1_TDR_CalTDR_AC_CSDiskos' data8 = pd.read_csv(file8, sep='\s+', skiprows=14, header=None, na_values='-999') data8 = data8.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data8.TVDSS = data8['TVDSS']*-1 data8.TWT = data8['TWT']*-1 file9 = r'./sonic_logs/7219_12-3S_TDR_CalTDR' data9 = pd.read_csv(file9, sep='\s+', skiprows=14, header=None, na_values='-999') data9 = data9.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data9.TVDSS = data9['TVDSS']*-1 data9.TWT = data9['TWT']*-1 file10 = r'./sonic_logs/7220_10-1_TDR_CalTDR_AC_CSDea_EW' data10 = pd.read_csv(file10, sep='\s+', skiprows=14, header=None, na_values='-999') data10 = data10.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data10.TVDSS = data10['TVDSS']*-1 data10.TWT = data10['TWT']*-1 file11 = r'./sonic_logs/7220_11-1_TDR_CalTDR_AC-RHOB_CSDea' data11 = pd.read_csv(file11, sep='\s+', skiprows=14, header=None, na_values='-999') data11 = data11.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data11.TVDSS = data11['TVDSS']*-1 data11.TWT = data11['TWT']*-1 file12 = r'./sonic_logs/7220_11-2_TDR_CalTDR_AC-DEN_LN17_CS' data12 = pd.read_csv(file12, sep='\s+', skiprows=14, header=None, na_values='-999') data12 = data12.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data12.TVDSS = data12['TVDSS']*-1 data12.TWT = data12['TWT']*-1 file13 = r'./sonic_logs/7220_11-3AR_TDR_CalTDR_AC-DEN_LN17_BS0ms' data13 = pd.read_csv(file13, sep='\s+', skiprows=14, header=None, na_values='-999') data13 = data13.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data13.TVDSS = data13['TVDSS']*-1 data13.TWT = data13['TWT']*-1 file14 = r'./sonic_logs/7220_11-4_TDR_CalTDR_AC-DEN_LN17_CSDea' data14 = pd.read_csv(file14, sep='\s+', skiprows=14, header=None, na_values='-999') data14 = data14.rename(columns=({0:'X', 1:'Y', 2:'TVDSS', 3:'TWT', 4:'MD', 5:'Well', 6:'Vavg', 7:'Vint'})) data14.TVDSS = data14['TVDSS']*-1 data14.TWT = data14['TWT']*-1 colors = ["red", "blue", "green", "orange", "black", "purple", "pink", "teal"] def velocity_logs(top_depth, bottom_depth): logs=data1[(data1.TVDSS >= top_depth) & (data1.TVDSS <= bottom_depth)] fig, ax = plt.subplots(nrows=1, ncols=8, figsize=(15,15), sharey=True) fig.suptitle("Alta", fontsize=22) fig.subplots_adjust(top=0.9,wspace=0.1) #General setting for all axis for axes in ax: axes.set_ylim (top_depth,bottom_depth) axes.invert_yaxis() axes.yaxis.grid(True) axes.get_xaxis().set_visible(False) # for (i,j) in zip(W7120_1_1_R2['TVDSS'], W7120_1_1_R2['Surface']): # if ((i>=top_depth) and (i<=bottom_depth)): # axes.axhline(y=i, linewidth=0.5, color='black') # axes.text(0.1, i ,j, horizontalalignment='center',verticalalignment='center') ax[0]=ax[0].twiny() ax[0].grid(True) ax[0].set_xlim(0,7000) ax[0].spines['top'].set_position(('outward',0)) ax[0].set_xlabel('Vint[m/s]') ax[0].plot(data1.Vint, data1.TVDSS, label='Vint[m/s]', color=colors[0]) ax[0].set_xlabel('Vint[m/s]', color=colors[0]) ax[0].tick_params(axis='x', colors=colors[0]) ax[0].set_xlabel('Vavg[m/s]') ax[0].plot(data1.Vavg, data1.TVDSS, label='Vavg[m/s]', color=colors[1]) ax[0].set_xlabel('Vavg[m/s]', color=colors[1]) ax[1]=ax[1].twiny() ax[1].grid(True) ax[1].set_xlim(0,7000) ax[1].spines['top'].set_position(('outward',0)) ax[1].set_xlabel('Vint[m/s]') ax[1].plot(data2.Vint, data2.TVDSS, label='Vint[m/s]', color=colors[0]) ax[1].set_xlabel('Vint[m/s]', color=colors[0]) ax[1].tick_params(axis='x', colors=colors[0]) ax[1].set_xlabel('Vavg[m/s]') ax[1].plot(data2.Vavg, data2.TVDSS, label='Vavg[m/s]', color=colors[1]) ax[1].set_xlabel('Vavg[m/s]', color=colors[1]) ax[2]=ax[2].twiny() ax[2].grid(True) ax[2].set_xlim(0,7000) ax[2].spines['top'].set_position(('outward',0)) ax[2].set_xlabel('Vint[m/s]') ax[2].plot(data3.Vint, data3.TVDSS, label='Vint[m/s]', color=colors[0]) ax[2].set_xlabel('Vint[m/s]', color=colors[0]) ax[2].tick_params(axis='x', colors=colors[0]) ax[2].set_xlabel('Vavg[m/s]') ax[2].plot(data3.Vavg, data3.TVDSS, label='Vavg[m/s]', color=colors[1]) ax[2].set_xlabel('Vavg[m/s]', color=colors[1]) ax[3]=ax[3].twiny() ax[3].grid(True) ax[3].set_xlim(0,7000) ax[3].spines['top'].set_position(('outward',0)) ax[3].set_xlabel('Vint[m/s]') ax[3].plot(data4.Vint, data4.TVDSS, label='Vint[m/s]', color=colors[0]) ax[3].set_xlabel('Vint[m/s]', color=colors[0]) ax[3].tick_params(axis='x', colors=colors[0]) ax[3].set_xlabel('Vavg[m/s]') ax[3].plot(data4.Vavg, data4.TVDSS, label='Vavg[m/s]', color=colors[1]) ax[3].set_xlabel('Vavg[m/s]', color=colors[1]) ax[4]=ax[4].twiny() ax[4].grid(True) ax[4].set_xlim(0,7000) ax[4].spines['top'].set_position(('outward',0)) ax[4].set_xlabel('Vint[m/s]') ax[4].plot(data5.Vint, data5.TVDSS, label='Vint[m/s]', color=colors[0]) ax[4].set_xlabel('Vint[m/s]', color=colors[0]) ax[4].tick_params(axis='x', colors=colors[0]) ax[4].set_xlabel('Vavg[m/s]') ax[4].plot(data5.Vavg, data5.TVDSS, label='Vavg[m/s]', color=colors[1]) ax[4].set_xlabel('Vavg[m/s]', color=colors[1]) ax[5]=ax[5].twiny() ax[5].grid(True) ax[5].set_xlim(0,7000) ax[5].spines['top'].set_position(('outward',0)) ax[5].set_xlabel('Vint[m/s]') ax[5].plot(data6.Vint, data6.TVDSS, label='Vint[m/s]', color=colors[0]) ax[5].set_xlabel('Vint[m/s]', color=colors[0]) ax[5].tick_params(axis='x', colors=colors[0]) ax[5].set_xlabel('Vavg[m/s]') ax[5].plot(data6.Vavg, data6.TVDSS, label='Vavg[m/s]', color=colors[1]) ax[5].set_xlabel('Vavg[m/s]', color=colors[1]) ax[6]=ax[6].twiny() ax[6].grid(True) ax[6].set_xlim(0,7000) ax[6].spines['top'].set_position(('outward',0)) ax[6].set_xlabel('Vint[m/s]') ax[6].plot(data7.Vint, data7.TVDSS, label='Vint[m/s]', color=colors[0]) ax[6].set_xlabel('Vint[m/s]', color=colors[0]) ax[6].tick_params(axis='x', colors=colors[0]) ax[6].set_xlabel('Vavg[m/s]') ax[6].plot(data7.Vavg, data7.TVDSS, label='Vavg[m/s]', color=colors[1]) ax[6].set_xlabel('Vavg[m/s]', color=colors[1]) ax[7]=ax[7].twiny() ax[7].grid(True) ax[7].set_xlim(0,7000) ax[7].spines['top'].set_position(('outward',0)) ax[7].set_xlabel('Vint[m/s]') ax[7].plot(data8.Vint, data8.TVDSS, label='Vint[m/s]', color=colors[0]) ax[7].set_xlabel('Vint[m/s]', color=colors[0]) ax[7].tick_params(axis='x', colors=colors[0]) ax[7].set_xlabel('Vavg[m/s]') ax[7].plot(data8.Vavg, data8.TVDSS, label='Vavg[m/s]', color=colors[1]) ax[7].set_xlabel('Vavg[m/s]', color=colors[1])class Array ( objeto ): def __init__ ( self, n ): self.__n = n self.__array = [ 0 for x in range(self.__n)] def to_string ( self ): print( "----" ) print( self.__array ) def len ( yo ): return self.__n def set_item ( self , index , valor ): if self.__check_limits__(índice): self.__array[índice] = valor else : raise Exception('Error de limites en index') def get_item ( self , index ): return self.__array[índice] def clear ( self , valor ): self.__array = [ valor for x in range(self.__n)] def __check_limits__ ( self , index ): return index > = 0 and index < self.__n class Array2D : def __init__ ( self , rows , cols , valor ): self. __cols = cols self.__rows = filas self.__array = [[ valor for x in range( self.__cols )] for y in range( self.__rows)] def to_string ( self ): [ print( "---" , end = "" ) for x in range(self.__cols)] print( "" ) for i in self.__array: print( i ) [ print( "---" , end = "" ) for x in range( self.__cols)] print( "" ) def get_num_rows ( self ): return self.__rows def get_num_cols ( self ): return self.__cols def get_item ( self , rows , cols ): return self.__array[rows][cols] def set_item ( self , row , col , valor ): self.__array[fila][col] = valor def clearing ( self , valor = 0 ): for i in range(self.__rows): for j in range(self.__cols): self.__array[i][j] = valor class Array3D : def __init__ ( self , depth , rows , cols , valor ): self.__depth = depth self.__cols = cols self.__rows = rows self.__array = [[[ valor for x in range( self.__cols)] for y in range(self.__rows)] for z in range(self.__depth)] def to_string ( self ): [ print( "---" , end = "" ) for x in range( self . __cols )] print( "" ) dim = 1 for d self.__array : print( f "========= Atenuar { atenuar } ===========" ) for row in d : print( row ) tenue + = 1 [ print( "---" , end = "" ) for x in range(self.__cols)] print( "" ) def get_num_rows ( self ): return self.__rows def get_num_cols ( self ): return self.__cols def get_depth ( self ): return self.__depth def get_item ( self , depth , row , col ): self.__array[depth][row][col] def set_item ( self , depth , row , col , valor ): self.__array[depth][row][col] = valor def clear ( self , valor = 0 ): for i in range(self.__depth ): for j in range(self.__rows): for k in range(self.__cols): self.set_item(depth , row , col , valor)Zero-Noise Extrapolation*Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved.* Outline**Note: The number of credit points in the Quantum Hub account and time spent running this tutorial program will vary based on the parameters users input. Users need 28 points to obtain the results for the default parameters in this tutorial. If you want to get more points, please contact us on [Quantum Hub](https://quantum-hub.baidu.com). First, you should log into [Quantum Hub](https://quantum-hub.baidu.com), then enter the "Feedback" page, choose "Get Credit Point", and input the necessary information. Submit your feedback and wait for a reply.**This tutorial introduces an efficient and general method for Quantum Error Mitigation: Zero-Noise Extrapolation (ZNE), covering its theory and implementation in Quanlse. We use the single-qubit random Clifford sequence as benchmark to illustrate how to use the ZNE method in Quanlse step-by-step. The outline of this tutorial is as follows:- ZNE: Theory - Introduction - Noise rescaling - Extrapolation- ZNE: Practice - Computation task description - Quanlse implementation- Summary- Reference ZNE: Theory IntroductionZero-Noise Extrapolation (ZNE) is a powerful technique for mitigating quantum errors in quantum computing. Notice that ZNE does not directly reduce the inherent noise in the quantum computing process, but instead infers the ideal computation result by repeating the same quantum computing process many times with different levels of noise \[1, 2\]. The advantage of ZNE is that we do not need to know the exact form of the noise as well as how to control the noise source.The implementation process of this method is shown in the figure below. The figure shows that the ZNE method is composed of two steps: rescaling noise and extrapolating. Among various noise rescaling techniques, time-variant rescaling is a robust and promising one. This technique stretches the system Hamiltonian in time domain according to some rescaling coefficient to obtain an equivalently noise-rescaled final quantum state. For simplicity, we use the Richardson extrapolation in our Quanlse implementation, a mature numeric algorithm that can eliminate error of any order in principle. We remark that there are many other extrapolation methods such as polynomial and exponential extrapolation methods \[3\].![zne-profile](figures/zne-profile.png) Noise rescalingOn the physical level, a quantum computing process with noise can be described by the Lindblad master equation:$$\frac{\partial}{\partial t}\rho(t) = -i[K,\rho]+\lambda\mathcal{L}(\rho),$$for time $t\in[0,T]$. In this formulation, the Hamiltonian $K$ (which might be time-dependent) represents the ideal coherent evolution we aim to implement, while the Lindblad operator $\mathcal{L}$ represents the noisy process we hope to mitigate. We emphasize that there is no need to know the exact form of the generator $\mathcal{L}$. We only require that it is *time-invariant* and its effect is dominated by a scalar noise parameter $\lambda$. Let $\rho_\lambda(T)$ be the final state after evolution time $T$. Given a positive coefficient $c$, can we obtain a noise-rescaled final state $\rho_{c\lambda}(T)$? Surprisingly, this is possible whenever the Lindblad operator is time-invariant. Consider the following procedure. We implement a time-stretching and amplitude-contracting version of the system Hamiltonian via:$$K(t)\to K'(t) = \frac{K(t/c)}{c}.$$What's more, we stretch the system evolution time to $cT$. It has been proven that this rescaled Hamiltonian $K'(t)$ will lead to a new evaluation whose final state is exactly $\rho_{c\lambda}(T)$ numerically \[1\].Experimentally, stretching the evolution time ($T\to cT$) is easy to implement. Now let's analyze how to obtain the rescaled Hamiltonian $K'(t)$. In general, the systematic Hamiltonian is composed of time-independent drift items and time-dependent control ones, and the latter act on quantum states in the form of driving pulses. As an example, we learn from the [Single-Qubit Gate Tutorial](https://quanlse.baidu.com//doc/tutorial-single-qubit) in Quanlse that the driving pulses of the Hadamard gate$$H=\frac{1}{\sqrt{2}}\begin{pmatrix}1&1\\1&-1\end{pmatrix}$$are optimized as one $X$-channel pulse and one $Y$-channel pulse. As so, to implement the rescaled Hamiltonian is to stretch the corresponding driving pulses. In the following, we show by case the rescaled driving pulses of the optimized Hadamard gate with rescaling coefficients $1$ (does not rescale), $1.25$, and $1.5$.![zne-profile](figures/zne-pulse-rescale-h.png)To close this section, we comment that the noise parameter $\lambda$ might also be other physical-relevant quantities, such as infidelity, temperature, error probability, variational parameter, etc. For example, we implement this ZNE method in Quanlse by treating the infidelity of the quantum circuit as the noise parameter $\lambda$. ExtrapolationIn numeric analysis, Richardson extrapolation is an efficient numerical method commonly used to eliminate low-order estimation errors. This method assumes that the estimated value $E(\lambda)$ could be expressed as a power series of $\lambda$ with respect to the ideal value $E^{\ast}\equiv E(\lambda=0)$: $$E(\lambda) = \sum_{k=0}^{d} a_k\lambda^k + O(\lambda^{d+1}),$$where $E^{\ast} = a_0$, $\{a_k\}$ is a set of coefficients to be determined, and $d$ is the order we aim to extrapolate. If we can obtain a set of estimators $\left\{E(\lambda_j)\right\}_{j=1}^{d+1}$ with different parameters, we can construct a new estimator $E^d(\lambda)$ from this set. In comparison with the original noisy estimator $E(\lambda)$, this new estimator has a higher-precision estimation error (to $d$-order) \[4\].![extrapolation](figures/zne-extrapolation.png)In the above figure, we demonstrate the Richardson extrapolation by setting $d=2$. From the figure, we can see that the data points are linearly fitted, and the ideal value $E^{\ast}$ can be inferred via extrapolation. It is worth noting that the Richardson extrapolation is just one of many extrapolation methods. It works well only when the power series assumption is valid. Luckily, this assumption holds naturally within the above Lindblad master equation framework, as justified in \[1\]. ZNE: Practice Computation task description**Random Clifford circuit**A random Clifford circuit is a quantum circuit composed of randomly generated Clifford unitary gates, which has been intensively applied to benchmark the average error rates of quantum circuits. Here we consider the identity-equivalent single-qubit Clifford circuit composed of $n$ sequential random Clifford gates with the corresponding inverse gate attached to the end. As shown in the figure below, each $C_j$ is a randomly generated Clifford unitary gate while $C_{\rm inv}$ is the inverse gate of all the preceding $n$ Clifford gates, that is,$$C_{\rm inv}C_n C_{n-1}\cdots C_1=I.$$**Computation task**Consider the following quantum computation task. The initial state is $|0\rangle = \begin{pmatrix} 1\\0\end{pmatrix}$, the evolution circuit is an identity-equivalent Clifford circuit of size $n+1$, and the quantum observable is $A=|0\rangle\langle 0|=\begin{pmatrix}1&0\\0&0 \end{pmatrix}$. ![zne-clifford-circuit](figures/zne-clifford-circuit.png)Ideally, the final output quantum state will be $|0\rangle$ since the evolution circuit is identity-equivalent. As so, the expectation value of $A$ will be $\langle A\rangle_{\rm ideal}=1$, no matter how long the Clifford circuit is. However, due to the inevitable quantum noise when implementing the quantum circuit, the output state is no longer $|0\rangle$, resulting in an incorrect expectation value $\langle A\rangle_{\rm noisy}$. What's worse, the deeper the identity-equivalent quantum circuit is, the more that $\langle A\rangle_{\rm noisy}$ deviates from the ideal value $1$. Notice that we compute the expectation value numerically after we obtain the final output state.In the following, we show that using the ZNE method offered by the Quanlse Cloud Service, we can mitigate the quantum noise dramatically, and the mitigated expectation value $\langle A\rangle_{\rm miti}$ approaches the ideal value $\langle A\rangle_{\rm ideal}$ for deep Clifford circuits. **Data processing procedure**We describe the data processing procedure in detail to fully reveal the power of the ZNE method implemented in Quanlse. For each $k=1,2,\cdots,n$, we select the first $k$ gates of length $n$ Clifford sequence, compute the corresponding inverse gate, and construct the identity-equivalent circuit of length $k+1$. Then, for this circuit, we calculate the expectation value with the input state being $|0\rangle$ and the quantum observable being $A$. We set the maximal extrapolation order to $d$ and compute the error-mitigated values of orders ranging from $1$ to $d$. Finally, we obtain $n\times d$ extrapolated values and $n\times (d+1)$ rescaling values. Quanlse implementation **Import necessary modules and functions**To run the program below, you need to install [Quanlse](https://quanlse.baidu.com//doc/install) first. Then you need to import the following packages from Quanlse and some supporting Python libraries:from Quanlse.remoteZNE import remoteZNEMitigation as zneMitigation from Quanlse.ErrorMitigation.ZNE.Extrapolation import extrapolate from Quanlse.ErrorMitigation.Utils.Utils import computeIdealExpectationValue, \ computeIdealEvolutionOperator, fromCircuitToHamiltonian, randomCircuit, \ computeInverseGate from Quanlse.ErrorMitigation.Utils.Visualization import plotZNESequences from Quanlse.Utils.Functions import project, expect from Quanlse.Utils.Infidelity import unitaryInfidelity from Quanlse.remoteSimulator import remoteSimulatorRunHamiltonian import numpy as np from copy import deepcopyUsually, the zero-noise extrapolation method are computationally expensive. To deal with this issue, we provide our cloud service that could speed up this process significantly. To use the Quanlse Cloud Service, the users need to acquire a token from the [Quantum Leaf](http://quantum-hub.baidu.com) platform.from Quanlse import Define Define.hubToken = ''**Construct random Clifford circuit**We use the built-in `randomCircuit` function to create a random Clifford sequence of length `numSeq`, whose data type is a `List` including a series of `CircuitLine` objects. Each `CircuitLine` describes a layer of the target quantum circuit. In this example, each layer consists of only one single-qubit gate.# Set the maximal length of the random Clifford circuit numSeq = 5 numQubits = 1 # Set the input state as |0> and the quantum observable as |0><0| state = np.diag([1, 0]).astype(complex) A = np.diag([1, 0]).astype(complex) # Set the maximal extrapolation order order = 2 # Considering the reproducibility of our calculation result, we may as well set the "random seed" as a fixed value (e.g. 123) circuit = randomCircuit(qubits=1, numSeq=numSeq, seed=123)**Compute the ideal and noisy expectation values**For a quantum circuit of length $n$, we could use the built-in `computeInverseGate` function to calculate its inverse gate and then attach it to the end of the original quantum circuit. In this way, we construct an identity-equivalent quantum circuit totally including $n+1$ gates.Based on this quantum circuit and other initial parameters, we could compute both the ideal expectation value (via numerical simulation) and the noisy expectation value suffering from implementation error. For reference, we compute the infidelity between the ideal evolutionary operator and the noisy evolutionary operator.# Construct the identity-equivalent quantum circuit by appending an inverse gate to the end circuitIdentity = circuit + [computeInverseGate(circuit)] # Compute the ideal expectation value (should be 1.0) and the ideal evolution operator (should be an identity operator) valueIdeal = computeIdealExpectationValue(state, circuitIdentity, A) unitaryIdeal = computeIdealEvolutionOperator(circuitIdentity) # Compute the optimized Hamiltonian for implementing the quantum circuit # The built-in Quanlse Scheduler will be called ham = fromCircuitToHamiltonian(circuitIdentity) # Use the given Hamiltonian to compute the implemented evolution unitary, the infidelity, and the noisy expectation value result = remoteSimulatorRunHamiltonian(ham) unitaryNoisy = project(result.result[0]["unitary"], ham.subSysNum, ham.sysLevel, 2) infid = unitaryInfidelity(unitaryIdeal, unitaryNoisy, numQubits) noisyValue = expect(A, unitaryNoisy @ state @ unitaryNoisy.conj().T) # Print the ideal and noisy expectation values print("The ideal expectation value: {}; The noisy expectation: {}".format(valueIdeal, noisyValue)) print("The ideal evolutionary operator:") print(unitaryIdeal.round(3)) print('The noisy evolutionary operator:') print(unitaryNoisy.round(3)) print("The implemented evolution unitary has infidelity: ", infid)**Error mitigation via ZNE**There exists a deviation between the ideal expectation value and the noisy expectation value. As we have explained in the Theory section, ZNE is a feasible and efficient method to mitigate this kind of deviation.Using the built-in `extrapolate` function, we could calculate the mitigated expectation value from a set of rescaling coefficients and corresponding noise-rescaling values. In comparison with the original noisy expectation value, The mitigated expectation value has a higher estimation precision. In Quanlse, the ZNE method is implemented and is available via the `zneMitigation` interface. It includes both the noise-rescaling and the extrapolating procedures. `zneMitigation` returns a mitigated expectation value (to the $d$-th order), a set of infidelities (a list of $d+1$ real numbers), and a set of noisy expectation values of different noise levels (a list of $d+1$ real numbers).According to the data processing procedure described above, we need to execute the `zneMitigation` function for `numSeq` times. The process for optimizing the target Hamiltonian will perform `numSeq` times in total, which is computationally expensive. As so, we use the Quanlse Cloud Service to accelerate the optimizing process.EsRescaled = [] # EsRescaled size: [numSeq, order + 1] EsExtrapolated = [] # EsExtrapolated size: [numSeq, order] EsIdeal = [] # EsIdeal size: [numSeq,] Infidelities = [] # Infidelities size: [numSeq, order + 1] for length in range(1, numSeq + 1): print('==' * 20) print("Clifford circuit length:", length) # For each sequence, append the equivalent-inverse gate of all the preceding quantum gates # For each sequence, its length becomes: [1, 2, ..., numSeq] + 1 circuitPart = deepcopy(circuit[:length]) lastGate = computeInverseGate(circuitPart) circuitPart.append(lastGate) # Compute ideal expectations firstly for subsequent comparison in figure EsIdeal.append(computeIdealExpectationValue(state, circuitPart, A)) # Temporary extrapolated values of each order for each-length circuit mitigatedValues = [] # Use the Scheduler to compute the optimal Hamiltonian for this circuit ham = fromCircuitToHamiltonian(circuitPart) # Rescale order: [c_0, c_1, ..., c_d]; extrapolation order: d mitigatedValueHighest, infidelities, noisyValues = zneMitigation(state, circuitPart, A, ham=ham, order=order) # Rescale order: [c_0, c_1], [c_0, c_1, c_2], ...., [c_0, ..., c_{d-1}] # for d in [1, ..., d - 1]: for d in range(1, order): mitigatedValue = extrapolate(infidelities[:(d + 1)], noisyValues[:(d + 1)], type='richardson', order=d) mitigatedValues.append(mitigatedValue) mitigatedValues.append(mitigatedValueHighest) EsExtrapolated.append(mitigatedValues) EsRescaled.append(noisyValues) Infidelities.append(infidelities)**Result and discussion**# X-axis represents length of quantum circuit, Y-axis represents expectation values plotZNESequences(EsRescaled, EsExtrapolated, EsIdeal, fileName='zne-single-qubit-clifford')As we can tell from the figure, our noise-rescaling strategy *does* improve the precision of the estimated expectation value. What's more, the larger the rescaling coefficient is, the larger the resulting noisy expectation value bias. It anticipates that rescaling would lead to *worse* Hamiltonian for the quantum circuit implementation since the Hamiltonian optimized by Quanlse `Scheduler` is already the best. The power of extrapolation is self-evident as the precision of mitigated expectation values is improved significantly. Interestingly, just first-order or second-order extrapolation yield estimated expectation values could approach the ideal expectation to a great extent. One might notice that in the above extrapolation plot, the $1$-order rescaled expectation values, which are obtained via the optimized Hamiltonians without rescaling, are very close to the ideal expectation value. It is because Quanlse can generate the single-qubit driving Hamiltonian with extremely high fidelity. To better illustrate the extrapolation technique, we compute the error mitigated values using only the $2$ and $3$-order rescaled expectation values. Remarkably, the mitigated expectation values are pretty close to the ideal expectation value, witnessing the power of the Richardson extrapolation method.InfidelitiesPartial = np.array(Infidelities)[:, 1:] EsRescaledPartial = np.array(EsRescaled)[:, 1:] orderPartial = order - 1 EsExtrapolatedPartial = [] # size: [numSeq, order + 1] for i in range(numSeq): mitigatedValues = [] for d in range(1, orderPartial + 1): mitigatedValue = extrapolate(InfidelitiesPartial[i][:(d + 1)], EsRescaledPartial[i][:(d + 1)], type='richardson', order=d) mitigatedValues.append(mitigatedValue) EsExtrapolatedPartial.append(mitigatedValues) plotZNESequences(EsRescaledPartial, EsExtrapolatedPartial, EsIdeal, fileName='zne-single-qubit-clifford-2')1. SemAxis, pretrained embedding# define the functions to be used def polar_dict1(path): polar = pd.read_csv(path) polar = polar.dropna() # remove NaN values po = dict(list(zip(polar.Word, polar.Sim))) # convert the dataframe to key-value pairs return po # calculate the sentiment of the articles def calculate_polar1(path, text): po = polar_dict1(path) polar = 0 for t in text: try: polar += po.get(t) except TypeError: polar += 0 return polar # create the dataframe of the sentiment series def senti(path, Embedding, GType, BType, Method): df1 = pd.DataFrame(df['Date']) df1['Score'] = df['corpus'].apply(lambda x: calculate_polar1(path, x)) # get the sentiment of each article df1 = df1.groupby(by = ['Date'], as_index = False).mean() # calculate the daily sentiment df1['Embedding'] = Embedding df1['GType'] = GType df1['BType'] = BType df1['Method'] = Method return df1 df11 = senti('SemAxis11.csv', 'PT', 'F', 'F', 'SemAxis') df11.head() df12 = senti('SemAxis12.csv', 'PT', 'NF', 'F', 'SemAxis') df12.head() df13 = senti('SemAxis13.csv', 'PT', 'F', 'NF', 'SemAxis') df14 = senti('SemAxis14.csv', 'PT', 'NF', 'NF', 'SemAxis')2. SemAxis, Self-trained embeddingdf21 = senti('SemAxis21.csv', 'T', 'F', 'F', 'SemAxis') # F + F, self trained embedding df22 = senti('SemAxis22.csv', 'T', 'NF', 'F', 'SemAxis') # NF + F, self trained embedding df23 = senti('SemAxis23.csv', 'T', 'F', 'NF', 'SemAxis') # F + NF, self trained embedding df24 = senti('SemAxis24.csv', 'T', 'NF', 'NF', 'SemAxis') # NF + NF, self trained embedding3. SentiProp, pre-trained embedding# define the functions to be used def polar_dict2(path): polar = pd.read_csv(path) polar = polar.dropna() # remove NaN values po = dict(list(zip(polar.words, polar.polarity))) # convert the dataframe to key-value pairs return po # calculate the sentiment of the articles def calculate_polar2(path, text): po = polar_dict2(path) polar = 0 for t in text: try: polar += po.get(t) except TypeError: polar += 0 return polar # create the dataframe of the sentiment series def senti(path, Embedding, GType, BType, Method): df1 = pd.DataFrame(df['Date']) df1['Score'] = df['corpus'].apply(lambda x: calculate_polar2(path, x)) # get the sentiment of each article df1 = df1.groupby(by = ['Date'], as_index = False).mean() # calculate the daily sentiment df1['Embedding'] = Embedding df1['GType'] = GType df1['BType'] = BType df1['Method'] = Method return df1 df31 = senti('SentiProp11.csv', 'PT', 'F', 'F', 'SentiProp') df32 = senti('SentiProp12.csv', 'PT', 'NF', 'F', 'SentiProp') df33 = senti('SentiProp13.csv', 'PT', 'F', 'NF', 'SentiProp') df34 = senti('SentiProp14.csv', 'PT', 'NF', 'NF', 'SentiProp')4. SentiProp, self-trained embeddingdf41 = senti('SentiProp21.csv', 'T', 'F', 'F', 'SentiProp') df42 = senti('SentiProp22.csv', 'T', 'NF', 'F', 'SentiProp') df43 = senti('SentiProp23.csv', 'T', 'F', 'NF', 'SentiProp') df44 = senti('SentiProp24.csv', 'T', 'NF', 'NF', 'SentiProp') # concate all the dataframes dfs = [df11, df12, df13, df14, df21, df22, df23, df24, df31, df32, df33, df34, df41, df42, df43, df44] result = pd.concat(dfs) res = result.sort_values(by = ['Date', 'Method']) res.to_csv('WSJ_sentiment.csv', index = False)Using Variational Autoencoder and Deep Feature Loss to Generate Faces From the "Using Variational Autoencoder to Generate Faces" example, we see that using VAE, we can generate realistic human faces, but the generated image is a little blury. Though, you can continue to tuning the hyper paramters or using more data to get a better result, in this example, we adopted the approach in [this paper](https://arxiv.org/abs/1610.00291). That is, instead of using pixel-by-pixel loss of between the original images and the generated images, we use the feature map generated by a pre-trained CNN network to define a feature perceptual loss. As you will see, the generated images will become more vivid.import numpy as np from zoo.pipeline.api.keras.layers import * from zoo.pipeline.api.keras.models import Model,Sequential from zoo.pipeline.api.keras.utils import * from utils import * from glob import glob from zoo.pipeline.api.net import Net # Adjust to your own data_path DATA_PATH = os.getenv("ANALYTICS_ZOO_HOME") + "/apps/variational_autoencoder/img_align_celeba" # Adjust to your own model_path VGG_PATH = os.getenv("ANALYTICS_ZOO_HOME") + "/apps/variational_autoencoder/bigdl_vgg-16_imagenet_0.4.0.model" IMAGE_SIZE = 148 IMAGE_ROW = 64 IMAGE_COL = 64 Z_DIM = 100 ENCODER_FILTER_NUM = 32 IMAGE_CHANNELS = 3 init_engine()Define the Model We are using the same model as "Using Variational Autoencoder to Generate Faces" example.def conv_bn_lrelu(out_channles, in_channels, in_row, in_col,kw=4, kh=4, sw=2, sh=2): input0 = Input(shape=(in_channels, in_row, in_col)) conv1 = Convolution2D(out_channles, kw, kh, subsample=(sw, sh), border_mode='same')(input0) batch = BatchNormalization(out_channles)(conv1) relu = LeakyReLU(0.2)(batch) model = Model([input0], [relu]) return model def upsample_conv_bn_lrelu(out_channles, in_channels, in_row, in_col, out_width, out_height, kw=3, kh=3, sw=1, sh=1): input0 = Input(shape=(in_channels, in_col, in_row)) resize = ResizeBilinear(out_width, out_height)(input0) conv1 = Convolution2D(out_channles, kw, kh, subsample=(sw, sh), border_mode='same')(resize) batch1 = BatchNormalization(out_channles)(conv1) relu = LeakyReLU(0.2)(batch1) model = Model([input0], [relu]) return model def get_encoder_cnn(): input0 = Input(shape=(IMAGE_CHANNELS, IMAGE_ROW, IMAGE_COL)) #conv conv1 = conv_bn_lrelu(ENCODER_FILTER_NUM, IMAGE_CHANNELS, IMAGE_ROW, IMAGE_COL)(input0) # 32 * 32 * 32 conv2 = conv_bn_lrelu(ENCODER_FILTER_NUM * 2, ENCODER_FILTER_NUM, 32, 32)(conv1) # 16 * 16 * 64 conv3 = conv_bn_lrelu(ENCODER_FILTER_NUM * 4, ENCODER_FILTER_NUM * 2, 16, 16)(conv2) # 8 * 8 * 128 conv4 = conv_bn_lrelu(ENCODER_FILTER_NUM * 8, ENCODER_FILTER_NUM * 4, 8, 8)(conv3) # 4 * 4 * 256 flatten = Flatten()(conv4) inter = Dense(2048)(flatten) inter = Reshape((1, 1, 2048))(inter) inter = BatchNormalization()(inter) inter = Activation('relu')(inter) inter = Flatten()(inter) # fully connected to generate mean and log-variance mean = Dense(Z_DIM)(inter) log_variance = Dense(Z_DIM)(inter) model = Model([input0], [mean, log_variance]) return model def get_decoder_cnn(): input0 = Input(shape=(Z_DIM,)) linear = Dense(2048)(input0) linear = Dense(4 * 4 * ENCODER_FILTER_NUM * 8)(linear) reshape = Reshape((ENCODER_FILTER_NUM * 8, 4, 4))(linear) bn = BatchNormalization(ENCODER_FILTER_NUM * 8)(reshape) # upsampling up1 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM * 4, ENCODER_FILTER_NUM * 8, 4, 4, 8, 8)(bn) # 8 * 8 * 128 up2 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM * 2, ENCODER_FILTER_NUM * 4, 8, 8, 16, 16)(up1) # 16 * 16 * 64 up3 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM, ENCODER_FILTER_NUM * 2, 16, 16, 32, 32)(up2) # 32 * 32 * 32 up4 = upsample_conv_bn_lrelu(IMAGE_CHANNELS, ENCODER_FILTER_NUM, 32,32, 64, 64)(up3) # 64 * 64 * 3 output = Activation('sigmoid')(up4) model = Model([input0], [output]) return model def get_autoencoder_cnn(): input0 = Input(shape=(IMAGE_CHANNELS, IMAGE_ROW, IMAGE_COL)) encoder = get_encoder_cnn()(input0) sampler = GaussianSampler()(encoder) decoder_model = get_decoder_cnn() decoder = decoder_model(sampler) model = Model([input0], [encoder, decoder]) return model, decoder_modelLoad the pre-trained CNN modeldef get_vgg(): vgg_whole = Net.load_bigdl(VGG_PATH) vgg_light = vgg_whole.new_graph(["relu1_2"]) vgg_light.freeze() return vgg_light vgg = get_vgg() print("Get VGG Model") autoencoder, decoder_model = get_autoencoder_cnn()creating: createZooKerasInput creating: createZooKerasInput creating: createZooKerasInput creating: createZooKerasConvolution2D creating: createZooKerasBatchNormalization creating: createZooKerasLeakyReLU creating: createZooKerasModel creating: createZooKerasInput creating: createZooKerasConvolution2D creating: createZooKerasBatchNormalization creating: createZooKerasLeakyReLU creating: createZooKerasModel creating: createZooKerasInput creating: createZooKerasConvolution2D creating: createZooKerasBatchNormalization creating: createZooKerasLeakyReLU creating: createZooKerasModel creating: createZooKerasInput creating: createZooKerasConvolution2D creating: createZooKerasBatchNormalization creating: createZooKerasLeakyReLU creating: createZooKerasModel creating: createZooKerasFlatten creating: createZooKerasDense creating: createZooKerasReshape creating: createZooKerasBatchNormalization creating: createZooKerasActivation creating: createZooKerasFlatten creating: createZooKerasDense creati[...]Load the Datasetsdef get_data(): data_files = glob(os.path.join(DATA_PATH, "*.jpg")) rdd_train_images = sc.parallelize(data_files[:100000]) \ .map(lambda path: inverse_transform(get_image(path, IMAGE_SIZE)).transpose(2, 0, 1)) rdd_train_sample = rdd_train_images.map(lambda img: Sample.from_ndarray(img, [np.array(0.0), img])) return rdd_train_sample from pyspark import SparkContext sc =SparkContext.getOrCreate() train_data = get_data()Define the Training Objectivebatch_size = 64 criterion = ParallelCriterion() criterion.add(KLDCriterion(), 0.005) # You may want to twick this parameter criterion.add(TransformerCriterion(MSECriterion(), vgg, vgg), 1.0)creating: createParallelCriterion creating: createKLDCriterion creating: createMSECriterion creating: createTransformerCriterionCompile the Modelautoencoder.compile(optimizer=Adam(0.001), loss=criterion) import os import datetime as dt if not os.path.exists("./log"): os.makedirs("./log") app_name='vae-faces-deep-loss'+dt.datetime.now().strftime("%Y%m%d-%H%M%S") autoencoder.set_tensorboard(log_dir='./log/',app_name=app_name) print("Saving logs to ", app_name)creating: createAdam ('Saving logs to ', 'vae-faces-deep-loss20180607-190859')Spin Up the Training This could take a while. It took about 6 hours on a desktop with a intel i7-6700 cpu and 40GB java heap memory. You can reduce the training time by using less data (some changes in the "Load the Dataset" section), but the performce may not as good.autoencoder.fit(x=train_data, batch_size=batch_size, nb_epoch = 6) def gen_image_row(): return np.column_stack([decoder_model.forward(np.random.randn(1, Z_DIM)).reshape(3, 64, 64).transpose(1, 2, 0) for s in range(8)]) def gen_image(): return np.row_stack([gen_image_row() for i in range(8)]) import matplotlib matplotlib.use('Agg') %pylab inline from matplotlib.pyplot import imshow train_summary = TrainSummary('./log/', app_name) loss = np.array(train_summary.read_scalar("Loss")) plt.figure(figsize = (12,12)) plt.plot(loss[:,0], loss[:,1], label='loss') plt.xlim(0, loss.shape[0]+10) plt.grid(True) plt.title("loss")creating: createTrainSummaryRandom Sample Some Imagesimg = gen_image() imshow(img)[![imagenes/pythonista.png](imagenes/pythonista.png)](https://pythonista.io) Es posible identificar elementos dentro de un documento HTMl utilizando comandos de XPath en Javascript. La especificación XPath.[XPath](https://www.w3.org/TR/xpath/all/) es una especificación de la W3C aplicable a documentos XML, capaz de identificar elementos dentro de dichos documentos mediante un lenguaje capaz de desplazarse en una estructura por medio de rutas que tienen como origen un "nodo raíz" a partir del cual se bifurcan otros nodos de forma similar a las ramas de un árbol a partir de su tronco.Hasta antes de HTML5, la especificación de HTML era definida como un derivado de XML y aún cuando HTML5 ya no se se apega por completo a la especificación de XML, conserva muchas características y funcionalidades. Componentes de una estructura según XPath.La especificación de XPath es capaz de identificar los siguientes nodos en un documento XML:* Elementos.* Atributos.* Textos.* Comentarios.* Nodos de documento.* Espacios de nombres (namespaces).* Instrucciones de proceso. En el caso de HTML se utilizan primordialmente:* Elementos.* Atributos.* Textos.* Comentarios.* Nodos de documento. https://www.w3schools.com/xml/xpath_syntax.asp Expresiones de ruta de XPath.Las rutas de búsqueda con XPAth peuden ser creadas a partir de la siguientes expresiones.* ``````: realiza una búsqueda de los nodos con el nombre espcificado a partir de la posición.* ```\``` busca en la raíz. * ```\\``` busca en todos los nodos desde la raíz. XPath en la consola de FIrefox.%%javascript document.expression('//@id');Deployment Pipeline NotebookThis notebook will exercise the drift detection MLOps `deployment pipeline` SetupRetrieve the project name from your build pipeline%store -r project_nameGet back the project id and regionimport sagemaker import json sess = sagemaker.session.Session() region_name = sess._region_name sm_client = sess.sagemaker_client project_id = sm_client.describe_project(ProjectName=project_name)["ProjectId"] artifact_bucket = f"sagemaker-project-{project_id}-{region_name}" print(f"Project: {project_name} ({project_id})")Your batch pipeline should now be running, click the link below to open the AWS CodePipeline in a new window.from IPython.core.display import HTML HTML( f'Open
Code Pipeline in a new window' )Data PrepDownload the test dataset output from the pre-processing job in our build pipeline, which we will use for input to batch scoring.import boto3 import pandas as pd import random from sagemaker.s3 import S3Downloader, S3Uploader def get_latest_processed_data(pipeline_name, step_name, output_name): execution_arn = sm_client.list_pipeline_executions( PipelineName=pipeline_name, SortBy="CreationTime" )["PipelineExecutionSummaries"][0]["PipelineExecutionArn"] steps = sm_client.list_pipeline_execution_steps( PipelineExecutionArn=execution_arn, SortOrder="Ascending" )["PipelineExecutionSteps"] preprocess_arn = next( item["Metadata"]["ProcessingJob"]["Arn"] for item in steps if item["StepName"] == step_name ) job_outputs = sm_client.describe_processing_job( ProcessingJobName=preprocess_arn.split("/")[1] )["ProcessingOutputConfig"]["Outputs"] return next( item["S3Output"]["S3Uri"] for item in job_outputs if item["OutputName"] == output_name ) pipeline_name = f"{project_name}-build" test_uri = get_latest_processed_data(pipeline_name, "PreprocessData", "test") S3Downloader().download(test_uri, "preprocessed") # Load the test scores into a dataframe test_df = pd.read_csv("preprocessed/test.csv") print(test_df.shape) test_df.head()Test StagingThe staging SageMaker endpoint is created by AWS CloudFormation in the `Batch_CFN_Staging` stage of the AWS CodePipelineOnce its created, run the next cell to wait for the staging endpoint to be in service.from botocore.exceptions import WaiterError from sagemaker.predictor import Predictor from sagemaker.serializers import CSVSerializer from sagemaker.deserializers import JSONDeserializer # Define the predictor for staging def wait_for_predictor(stage_name): try: endpoint_name = f"sagemaker-{project_name}-{stage_name}" predictor = Predictor( endpoint_name, serializer=CSVSerializer(), deserializer=JSONDeserializer() ) print( f"Waiting for {stage_name} endpoint: {predictor.endpoint_name} to be deployed..." ) sm_client.get_waiter("endpoint_in_service").wait( EndpointName=predictor.endpoint_name ) print("Ready") return predictor except WaiterError as err: error_message = err.last_response["Error"]["Message"] if error_message.startswith("Could not find endpoint"): err = Exception(f"Endpoint {endpoint_name} not found.") raise err predictor = wait_for_predictor("staging")Let's send some traffic to the staging endpoint with the following payload:| passenger_count | pickup_latitude | pickup_longitude | dropoff_latitude | dropoff_longitude | geo_distance | hour | weekday | month || -| - | - | - | - | - | - | - | - || 1 | -73.986114 | 40.685634 | -73.936794 | 40.715370 | 5.318025 | 7 | 0 | 2 |We expect approximately a $20 fare:payload = "1,-73.986114,40.685634,-73.936794,40.715370,5.318025,7,0,2" predictor.predict(data=payload)Approve Staging🛑 Head back to the AWS Code Pipeline and approve the staging deployment to kick off the production deployment. Test ProductionAfter a few minutes our production endpoint will start to be deployed.predictor = wait_for_predictor("prod")And confirm that data capture is enabled.data_capture = sm_client.describe_endpoint(EndpointName=predictor.endpoint_name)[ "DataCaptureConfig" ] print(f"Data capture is: {data_capture['CaptureStatus']}")Inspect Data CaptureLet's send some traffic to the producition endpoint, which our [Data Quality Monitor](https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-data-quality.html) should detect as drifting from the baseline.%%time for n in range(100): predictor.predict(data=payload)Let's see if we have received some outputs to our data capturedata_capture_uri = data_capture["DestinationS3Uri"] data_capture_files = S3Downloader.list(data_capture_uri) print("Found {} files".format(len(data_capture_files))) if data_capture["EnableCapture"] and len(data_capture_files) > 0: # Get the first line of the most recent file event = json.loads(S3Downloader.read_file(data_capture_files[-1]).split("\n")[0]) print("\nLast file:\n{}".format(json.dumps(event, indent=2))) elif len(data_capture_files) == 0: print("No files yet, please rerun this cell in a few seconds")Before we test production, let's tweak some of the columns to change the distribution of the data. This represents a simulation of reality where the distribution of the incoming data has changed due to changes in the environment.test_df["passenger_count"] = random.choices( [1, 2, 3, 4, 5, 6], weights=[2, 1, 2, 5, 2, 1], k=test_df.shape[0] ) test_df["geo_distance"] = test_df["passenger_count"].apply( lambda x: 70 * random.betavariate(2.5, 2) ) tweaked_rows = ( test_df.drop("fare_amount", axis=1).to_csv(header=False, index=False).split("\n") )Then make a series of prediction requests in the background every 10 minutes with this data to cause an artificial model monitoring alarm to be triggered.from threading import Thread import time def invoke_endpoint_forever(): while True: for i in range(10000): predictor.predict(data=tweaked_rows[i % len(tweaked_rows)]) time.sleep(10 * 60) Thread(target=invoke_endpoint_forever).start()In the above code can change the sleep time, the number of requests per batch, or the randomly generated data, to different values. This will allow you to test your endpoint with more requests per second or to see how different changes in data would affect your monitoring. You can even completely remove the sleep time so that the kernel will be hitting the endpoint as fast as it can. However, this will cause the endpoint to work harder and trigger the automatic scaling to increase the underlying infrastructure used by the endpoint, which might incur higher costs. MonitorLet's check that we have a monitor configured and that its schedule.from datetime import datetime, timedelta from dateutil.tz import tzlocal model_monitor = predictor.list_monitors()[0] model_monitor_status = model_monitor.describe_schedule()["MonitoringScheduleStatus"] print(f"Model Monitoring: {model_monitor_status}") now = datetime.now(tzlocal()) next_hour = (now + timedelta(hours=1)).replace(minute=0) scheduled_diff = (next_hour - now).seconds // 60 print("Next schedule in {} minutes".format(scheduled_diff))List the latest execution and output the statusmonitor_executions = model_monitor.list_executions() if len(monitor_executions) == 0: raise (Exception("Please wait, no monitor executions available yet")) # Get the latest monitor status monitor_status = monitor_executions[0].describe()["ProcessingJobStatus"] if monitor_status == "Completed": monitor_message = monitor_executions[0].describe()["ExitMessage"] print(f"Latest execution: {monitor_message}") else: print(f"Latest execution: {monitor_status}")Inspect Model Monitor report🛑 Browse to the model monitoring results in SageMaker Studio to download and run a report RetrainWhen the model monitoring schedule runs it will publish Amazon [CloudWatch Metrics](https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-interpreting-cloudwatch.html). If drift is detected for a metric above the threshold defined in the `prod-config.json` in the deployment pipeline, then the Amazon CloudWatch will Alarm resulting in the SageMaker pipeline to be re-trained.You can simulate drift by putting a metric value above the threshold of `0.5` directly into CloudWatch.import boto3 from datetime import datetime, timedelta from dateutil.tz import tzlocal import random cloudwatch = boto3.client("cloudwatch") # Define the metric name and threshold endpoint_name = predictor.endpoint_name schedule_name = f"{endpoint_name}-threshold" metric_name = "feature_baseline_drift_fare_amount" metric_threshold = 0.5 # Put a new metric to trigger an alaram def put_drift_metric(value): print("Putting metric: {}".format(value)) response = cloudwatch.put_metric_data( Namespace="aws/sagemaker/Endpoints/data-metrics", MetricData=[ { "MetricName": metric_name, "Dimensions": [ {"Name": "MonitoringSchedule", "Value": schedule_name}, {"Name": "Endpoint", "Value": endpoint_name}, ], "Timestamp": datetime.now(), "Value": value, "Unit": "None", }, ], ) def get_drift_stats(): response = cloudwatch.get_metric_statistics( Namespace="aws/sagemaker/Endpoints/data-metrics", MetricName=metric_name, Dimensions=[ {"Name": "MonitoringSchedule", "Value": schedule_name}, {"Name": "Endpoint", "Value": endpoint_name}, ], StartTime=datetime.now() - timedelta(minutes=2), EndTime=datetime.now(), Period=1, Statistics=["Average"], Unit="None", ) if "Datapoints" in response and len(response["Datapoints"]) > 0: return response["Datapoints"][0]["Average"] return 0 print("Simluate drift on endpoint: {}".format(endpoint_name)) while True: put_drift_metric(round(random.uniform(metric_threshold, 1.0), 4)) drift_stats = get_drift_stats() print("Average drift amount: {}".format(get_drift_stats())) if drift_stats > metric_threshold: break time.sleep(1)To see the CloudWatch metric Alarm click on the link below.HTML( f'Open CloudWatch Alarm in new window' )This will result in a new SageMaker pipeline execution starting.latest_pipeline_execution = sm_client.list_pipeline_executions( PipelineName=pipeline_name, )["PipelineExecutionSummaries"][0] latest_execution_status = latest_pipeline_execution["PipelineExecutionStatus"] time_ago = datetime.now(tzlocal()) - latest_pipeline_execution["StartTime"] print( f"Latest pipeline: {pipeline_name} execution: {latest_execution_status} started {time_ago.total_seconds()/60:0.2f} mins ago" )We can verify that this was triggered by Drift by inspecting the InputSource:params = sm_client.list_pipeline_parameters_for_execution( PipelineExecutionArn=latest_pipeline_execution["PipelineExecutionArn"], ) input_source = [ p["Value"] for p in params["PipelineParameters"] if p["Name"] == "InputSource" ][0] print(f"Pipeline execution started with InputSource: {input_source}")And let's list the steps of that execution.execution_steps = sm_client.list_pipeline_execution_steps( PipelineExecutionArn=latest_pipeline_execution["PipelineExecutionArn"], )["PipelineExecutionSteps"] for step in execution_steps: print("Step: {}, Status: {}".format(step["StepName"], step["StepStatus"]))✅ Great now you have completed all the steps. Clean upExecute the following cell to delete cloudformation stacks1. SageMaker prod endpoint2. SageMaker staging endpointimport boto3 cfn = boto3.client("cloudformation") for stack_name in [ f"sagemaker-{project_name}-deploy-prod", f"sagemaker-{project_name}-deploy-staging", ]: print("Deleting stack: {}".format(stack_name)) cfn.delete_stack(StackName=stack_name) cfn.get_waiter("stack_delete_complete").wait(StackName=stack_name)Manifold**Manifold** is a tool for model-agnostic evaluation with visual support developed by Uber — you can find its repo [here](https://github.com/uber/manifold).In this quick demo, the [California housing dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_california_housing.html) (regression) and part of the setup available in this [example](https://scikit-learn.org/stable/auto_examples/inspection/plot_partial_dependence.html) on the [scikit-learn](https://scikit-learn.org/stable/index.html) website are used.The main idea for this demo is to test **Manifold**'s [integration with Jupyter Notebook](https://eng.uber.com/manifold-open-source/) and the [Geo Feature View](https://github.com/uber/manifoldgeo-feature-view) map.from sklearn.datasets import fetch_california_housing import pandas as pd import numpy as np # Installation: https://github.com/uber/manifold/tree/master/bindings/jupyter from mlvis import Manifold from sklearn.model_selection import train_test_split from sklearn.preprocessing import QuantileTransformer from sklearn.pipeline import make_pipeline from sklearn.neural_network import MLPRegressor california_housing = fetch_california_housing() california_housing.data.shape california_housing.feature_names california_housing.target.shape california_housing.keys() X = pd.DataFrame(california_housing.data, columns=california_housing.feature_names) y = california_housing.target # The Latitude feature must be called "lat". # The Longitude feature must be called "lng". X.rename(columns={'Latitude':'lat', 'Longitude': 'lng'}, inplace=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0) est = make_pipeline(QuantileTransformer(), MLPRegressor(hidden_layer_sizes=(50, 50), learning_rate_init=0.01, early_stopping=True)) est.fit(X_train, y_train) print("Test R2 score: {:.2f}".format(est.score(X_test, y_test))) yPred = est.predict(X_test) # Mapbox access token: https://docs.mapbox.com/help/how-mapbox-works/access-tokens/ # It must be replaced with a valid token. TOKEN = "" # Props: https://github.com/uber/manifold/blob/master/bindings/jupyter-modules/jupyter-manifold/src/manifold.js # Classification: https://github.com/uber/manifold/blob/master/bindings/jupyter/notebooks/manifold.ipynb Manifold(props={'data': { 'x': X_test[['lat', 'lng']], 'yPred': [pd.DataFrame(yPred, columns=["Target"])], # Each element in this list contains the predictions for a model. 'yTrue': pd.DataFrame(y_test, columns=["Target"]) }, 'mapboxAccessToken': TOKEN, 'width': 1000, 'height': 700 })Load DatasetDownload 4 files from:http://yann.lecun.com/exdb/mnist/import os path = "/content/drive/My Drive/Colab Notebooks/mnist_from_scratch" os.listdir(path) # locate files train_image_source = os.path.join(path, "train-images-idx3-ubyte") train_label_source = os.path.join(path, "train-labels-idx1-ubyte") test_image_source = os.path.join(path, "t10k-images-idx3-ubyte") test_label_source = os.path.join(path, "t10k-labels-idx1-ubyte") def get_file_stat(source, is_img, desc = None): print("==", desc, "==") total = os.path.getsize(source) header = 16 if is_img else 8 data_size = total - header data_cnt = data_size // 784 if is_img else data_size print("TOTAL SIZE: ", total) print("HEADER: ", header) print("DATA SIZE: ", data_size) print("DATA COUNT: ", data_cnt) return data_cnt tr_im_cnt = get_file_stat(train_image_source, 1, "TRAIN_IMAGE") tr_lb_cnt = get_file_stat(train_label_source, 0, "TRAIN_LABEL") assert(tr_im_cnt == tr_lb_cnt) te_im_cnt = get_file_stat(test_image_source, 1, "TEST_IMAGE") te_lb_cnt = get_file_stat(test_label_source, 0, "TEST_LABEL") assert(te_im_cnt == te_lb_cnt) import numpy as np import struct from array import array import matplotlib.pyplot as plt class MnistDataloader(object): def __init__(self, training_images_filepath,training_labels_filepath, test_images_filepath, test_labels_filepath): self.training_images_filepath = training_images_filepath self.training_labels_filepath = training_labels_filepath self.test_images_filepath = test_images_filepath self.test_labels_filepath = test_labels_filepath def read_images_labels(self, images_filepath, labels_filepath): ''' Read images, labels ''' # labels labels = [] ''' http://yann.lecun.com/exdb/mnist/, file formats for mnist [offset] [type] [value] [description] 0000 32 bit integer 0x00000801(2049) magic number (MSB first) 0004 32 bit integer 60000 number of items 0008 unsigned byte ?? label 0009 unsigned byte ?? label ........ xxxx unsigned byte ?? label ''' with open(labels_filepath, 'rb') as file: magic, size = struct.unpack(">II", file.read(8)) # in Big Endian, read two unsigned ints if magic != 2049: raise ValueError('Expected 2049, got {}'.format(magic)) labels = array("B", file.read()) #images ''' http://yann.lecun.com/exdb/mnist/, file formats for mnist [offset] [type] [value] [description] 0000 32 bit integer 0x00000803(2051) magic number 0004 32 bit integer 60000 number of images 0008 32 bit integer 28 number of rows 0012 32 bit integer 28 number of columns 0016 unsigned byte ?? pixel 0017 unsigned byte ?? pixel ........ xxxx unsigned byte ?? pixel ''' with open(images_filepath, 'rb') as file: magic, size, rows, cols = struct.unpack(">IIII", file.read(16)) # in Big Endian, read four unsigned ints if magic != 2051: raise ValueError('Expected 2051, got {}'.format(magic)) image_data = array("B", file.read()) images = [] for i in range(size): imsize = rows * cols img = np.array(image_data[i * imsize : (i + 1) * imsize]) img = img.reshape(28, 28, 1) images.append(img) ''' To numpy ''' images = np.array(images) labels = np.eye(10)[np.array(labels).reshape(-1)] # one hot return images, labels def load_data(self): x_train, y_train = self.read_images_labels(self.training_images_filepath, self.training_labels_filepath) x_test, y_test = self.read_images_labels(self.test_images_filepath, self.test_labels_filepath) return (x_train, y_train),(x_test, y_test) mnist_dataloader = MnistDataloader(train_image_source, train_label_source, test_image_source, test_label_source) (x_train, y_train), (x_test, y_test) = mnist_dataloader.load_data() plt.imshow(x_train[0].reshape(28, 28)) plt.show() print(y_train[0]) print(np.shape(x_train)) print(np.shape(y_train)) print(np.shape(x_test)) print(np.shape(y_test))(60000, 28, 28, 1) (60000, 10) (10000, 28, 28, 1) (10000, 10)Split Train/Validation SetVALIDATION_RATIO = 0.2 val_length = int(len(x_train) * VALIDATION_RATIO) x_validation = x_train[:val_length] x_train = x_train[val_length:] y_validation = y_train[:val_length] y_train = y_train[val_length:] print(np.shape(x_train)) print(np.shape(y_train)) print(np.shape(x_validation)) print(np.shape(y_validation)) print(np.shape(x_test)) print(np.shape(y_test))(48000, 28, 28, 1) (48000, 10) (12000, 28, 28, 1) (12000, 10) (10000, 28, 28, 1) (10000, 10)Modeling with Keras Model Setupsimport tensorflow as tf model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(tf.keras.layers.MaxPooling2D((2, 2))) model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D((2, 2))) model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu')) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit( x_train, y_train, epochs = 100, validation_data = (x_validation, y_validation), )Epoch 1/100 1500/1500 [==============================] - 43s 29ms/step - loss: 0.2234 - accuracy: 0.9429 - val_loss: 0.0807 - val_accuracy: 0.9737 Epoch 2/100 1500/1500 [==============================] - 42s 28ms/step - loss: 0.0641 - accuracy: 0.9806 - val_loss: 0.0661 - val_accuracy: 0.9796 Epoch 3/100 1500/1500 [==============================] - 40s 27ms/step - loss: 0.0518 - accuracy: 0.9838 - val_loss: 0.0838 - val_accuracy: 0.9769 Epoch 4/100 1500/1500 [==============================] - 41s 27ms/step - loss: 0.0429 - accuracy: 0.9872 - val_loss: 0.0635 - val_accuracy: 0.9840 Epoch 5/100 1500/1500 [==============================] - 40s 27ms/step - loss: 0.0396 - accuracy: 0.9875 - val_loss: 0.1008 - val_accuracy: 0.9744 Epoch 6/100 1500/1500 [==============================] - 40s 27ms/step - loss: 0.0338 - accuracy: 0.9895 - val_loss: 0.0669 - val_accuracy: 0.9841 Epoch 7/100 1500/1500 [==============================] - 41s 27ms/step - loss: 0.0313 - accuracy: 0.9905 - val_loss: 0[...]Visualize Training Historyplt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['training', 'validation']) plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['training', 'validation']) plt.show()Model Evaluation with Test Datasetmodel.evaluate(x_test, y_test) TCNUM = 20 test_image = x_test[TCNUM].reshape(28, 28) answer = np.argmax(y_test[TCNUM]) plt.imshow(test_image) plt.show() test_image = np.reshape(test_image, (-1, 28, 28, 1)) print("answer: ", answer) print("prediction: ", np.argmax(model.predict(test_image)))Modeling with Torch Model Setups# -*- coding: utf-8 -*- import torch # code from torch tutorial class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.layer1 = torch.nn.Sequential( torch.nn.Conv2d(1, 32, kernel_size = 3, stride = 1, padding = 1), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size = 2) ) self.layer2 = torch.nn.Sequential( torch.nn.Conv2d(32, 64, kernel_size = 3, stride = 1, padding = 1), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size = 2) ) self.layer3 = torch.nn.Sequential( torch.nn.Conv2d(64, 64, kernel_size = 3, stride = 1, padding = 1) ) self.fc1 = torch.nn.Linear(7 * 7 * 64, 64) self.fc2 = torch.nn.Linear(64, 10) def forward(self, x): x = x.permute(0,3,1,2) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = x.view(x.size(0), -1) x = self.fc1(x) x = self.fc2(x) return x # modeling model = Net().to('cpu') loss_fn = torch.nn.CrossEntropyLoss()Trainingdef get_accuracy(t, v): # t : true # v : prediction assert (len(t) == len(v)) total = len(t) correct = torch.sum(t==v).item() accuracy = correct / total return accuracy # training learning_rate = 1e-3 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) EPOCHS = 100 BATCH_SIZE = 100 TRAIN_STEPS = int(len(x_train) * (1-VALIDATION_RATIO)) // BATCH_SIZE VALIDATION_STEPS = int(len(x_train) * (VALIDATION_RATIO)) // BATCH_SIZE for e in range(1,EPOCHS+1): #train train_acc = 0 train_loss = 0 for i in range(TRAIN_STEPS): X = torch.as_tensor(x_train[BATCH_SIZE * i: BATCH_SIZE * (i+1)]).float() Y = torch.as_tensor(y_train[BATCH_SIZE * i: BATCH_SIZE * (i+1)]).float() # predict y_pred = model(X) acc = get_accuracy(torch.argmax(Y, dim = 1), torch.argmax(y_pred, dim = 1)) train_acc += acc # loss loss = loss_fn(y_pred, torch.argmax(Y, dim = 1)) train_loss += loss.item() # back propagation optimizer.zero_grad() loss.backward() optimizer.step() print("EPOCH {}: accuracy {}% loss {}".format(e, train_acc / TRAIN_STEPS, train_loss / TRAIN_STEPS), end = ' | ') # validation validation_acc = 0 validation_loss = 0 with torch.no_grad(): for i in range(VALIDATION_STEPS): X_v = torch.as_tensor(x_validation[BATCH_SIZE * i: BATCH_SIZE * (i+1)]).float() Y_v = torch.as_tensor(y_validation[BATCH_SIZE * i: BATCH_SIZE * (i+1)]).float() # predict y_pred = model(X_v) acc = get_accuracy(torch.argmax(Y_v, dim = 1), torch.argmax(y_pred, dim = 1)) validation_acc += acc # loss loss = loss_fn(y_pred, torch.argmax(Y_v, dim = 1)) validation_loss += loss.item() # report print("val_acc {}% val_loss {} ".format(validation_acc/VALIDATION_STEPS, validation_loss/VALIDATION_STEPS))EPOCH 1: accuracy 0.9546614583333349% loss 0.16209788745375894 | val_acc 0.9656250000000002% val_loss 0.1128886064204077 EPOCH 2: accuracy 0.9720052083333348% loss 0.09147307617725649 | val_acc 0.9645833333333331% val_loss 0.12978297203759817 EPOCH 3: accuracy 0.97940104166667% loss 0.067380384951169 | val_acc 0.9692708333333332% val_loss 0.1135420647939706 EPOCH 4: accuracy 0.980937500000003% loss 0.06153265781722439 | val_acc 0.9686458333333327% val_loss 0.12122099376089561 EPOCH 5: accuracy 0.9841145833333367% loss 0.05216857497953242 | val_acc 0.972708333333333% val_loss 0.1218388251606181 EPOCH 6: accuracy 0.9825781250000034% loss 0.056457175660852954 | val_acc 0.9719791666666663% val_loss 0.12229683584640345 EPOCH 7: accuracy 0.9854166666666693% loss 0.04717592918298882 | val_acc 0.9760416666666664% val_loss 0.10767958875112527 EPOCH 8: accuracy 0.9858072916666698% loss 0.04675776284136646 | val_acc 0.9766666666666662% val_loss 0.10007874861912569 EPOCH 9: accurac[...]Prediction# test test_acc = 0 test_loss = 0 BATCH_SIZE = 100 TEST_STEPS = int(len(x_test)) // BATCH_SIZE with torch.no_grad(): for i in range(TEST_STEPS): X_t = torch.as_tensor(x_test[BATCH_SIZE * i: BATCH_SIZE * (i+1)]).float() Y_t = torch.as_tensor(y_test[BATCH_SIZE * i: BATCH_SIZE * (i+1)]).float() # predict y_pred = model(X_t) acc = get_accuracy(torch.argmax(Y_t, dim = 1), torch.argmax(y_pred, dim = 1)) test_acc += acc # loss loss = loss_fn(y_pred, torch.argmax(Y_t, dim = 1)) test_loss += loss.item() # report print("accuracy {}% loss {}".format(test_acc/TEST_STEPS, test_loss / TEST_STEPS))accuracy 0.9861999999999995% loss 0.7211516889933214Crawler for Amazon Bestsellers of 2020 written by Import libraries required for scrapingimport pandas as pd import numpy as np from urllib.request import urlopen from bs4 import BeautifulSoup import requestsScraping Amazon Bestsellers list of 2020 from websiteno_pages = 2 def get_data(pageNo): headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0", "Accept-Encoding":"gzip, deflate", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "DNT":"1","Connection":"close", "Upgrade-Insecure-Requests":"1"} r = requests.get('https://www.amazon.com/gp/bestsellers/2020/books/ref=zg_bsar_cal_ye'+str(pageNo)+'?ie=UTF8&pg='+str(pageNo), headers=headers)#, proxies=proxies) content = r.content soup = BeautifulSoup(content) #print(soup) alls = [] for d in soup.findAll('div', attrs={'class':'a-section a-spacing-none aok-relative'}): #print(d) name = d.find('span', attrs={'class':'zg-text-center-align'}) n = name.find_all('img', alt=True) #print(n[0]['alt']) author = d.find('a', attrs={'class':'a-size-small a-link-child'}) rating = d.find('span', attrs={'class':'a-icon-alt'}) users_rated = d.find('a', attrs={'class':'a-size-small a-link-normal'}) price = d.find('span', attrs={'class':'p13n-sc-price'}) all1=[] if name is not None: #print(n[0]['alt']) all1.append(n[0]['alt']) else: all1.append("unknown-product") if author is not None: #print(author.text) all1.append(author.text) elif author is None: author = d.find('span', attrs={'class':'a-size-small a-color-base'}) if author is not None: all1.append(author.text) else: all1.append('0') if rating is not None: #print(rating.text) all1.append(rating.text) else: all1.append('-1') if users_rated is not None: #print(price.text) all1.append(users_rated.text) else: all1.append('0') if price is not None: #print(price.text) all1.append(price.text) else: all1.append('0') alls.append(all1) return allsSave the scraped result as csvresults = [] for i in range(1, no_pages+1): results.append(get_data(i)) flatten = lambda l: [item for sublist in l for item in sublist] df2 = pd.DataFrame(flatten(results),columns=['Book Title','Author','Rating','Num_Customers_Rated', 'Price($)']) df2.to_csv('amazon_bestsellers_2020.csv', index=False, encoding='utf-8')Open csv filedf2 = pd.read_csv("amazon_bestsellers_2020.csv") df2.shape df2.head(100)Data Preprocessingdf2.insert(0, 'Rank', df2.index + 1) #Adding the Best seller rank using index df2.insert(0, 'Year', '2020') #Adding Year column #Getting rid of all the 'out of 5 stars' phrase from Rating column values df2['Rating'] = df2['Rating'].apply(lambda x: x.split()[0]) df2['Rating'] = pd.to_numeric(df2['Rating']) #change Rating's data type into numeric #Getting rid of all the dollar sign '$' from Price column values df2['Price($)'] = df2['Price($)'].str.replace('$', '') df2['Price($)'] = df2['Price($)'].astype(float) #Getting rid of comma from Customers_Rated column value df2["Num_Customers_Rated"] = df2["Num_Customers_Rated"].str.replace(',', '') df2['Num_Customers_Rated'] = pd.to_numeric(df2['Num_Customers_Rated']) df2.head() df2.to_csv("amazon_bestsellers_2020.csv", sep=",", index = False)Requirment (1/1)import random import re import math %load_ext autoreload %autoreload 2 def string2func(string): replacements = { '^': '**', 'e': str(math.exp(1)), 'PI': str(math.pi) } allowedWords = [ 'x', 'e' ] for word in re.findall('[a-zA-Z_]+', string): if word not in allowedWords: raise ValueError( '"{}" is forbidden to use in math expression'.format(word) ) for old, new in replacements.items(): string = string.replace(old, new) def func(x): return eval(string) return func def monte_carlo(n, a, b, fx): sum_of_samples = 0 for i in range(n): x = (b - a) * random.uniform(0, 1) + a sum_of_samples += fx(x) return (b - a) * float(sum_of_samples/n) n = int(input("Enter the number of samples(n): ")) a = float(input("Enter the lower range(a): ")) b = float(input("Enter the upper range(b): ")) fx = string2func(input("Enter the function (fx): ")) print(monte_carlo(n, a, b, fx)) #(15*x^3 + 21*x^2 + 41*x +3)**(1/4.0) * e^(-0.5*x)2.718281828459045 4.05053655919342How to input the functions1- run the cell above.2- enter first n than a than b than the function. Note: the function in the pptx file you can copy paste it from the comment at the end of the cell 2- enter first n than a than b than the function. note: the function in the pptx file you can copy paste it from the comment at the end of the cell Requirment(1/2)import scipy.integrate as integrate import matplotlib.pyplot as pltBest value for nwe will choose 6500 so it will no do much calculation and the noise is acceptable on itintegral = integrate.quad(fx, a, b) x = [] y_exact = [] y_approx = [] for n in range (100, 10000, 100): y_exact.append(integral[0]) y_approx.append(monte_carlo(n, a, b, fx)) x.append(n) plt.plot(x, y_exact, 'r') plt.plot(x, y_approx, 'b')Requirment 2monte_carlo_res = [] a = 1 b = 4 n = 1000 for i in range(0,500): monte_carlo_res.append(monte_carlo(n, a, b, fx)) plt.hist(monte_carlo_res) length = len(monte_carlo_res) mean = sum(monte_carlo_res) / length deviations = [(x - mean) ** 2 for x in monte_carlo_res] variance = sum(deviations) / length std_dev = math.sqrt(variance)Does it follow a normal distribution?Yes as the graph saysprint("The standard deviation is equal to " + str(std_dev))The standard deviation is equal to 0.02976273674308516Testing api for curingimport requests get_user=requests.get("http://localhost:5000/api/users") users=get_user.json()['users'] print(users) get_bus=requests.get("http://localhost:5000/api/bu") bus=get_bus.json()['bus'] print(bus) get_comps=requests.get("http://localhost:5000/api/compounds") compounds=get_comps.json()['comps'] print(compounds) get_comps.content uparam={'id':3,'name':'ABCDE'} user_response=requests.post("http://localhost:5000/api/users", json=uparam) user_response uparam={'id':5,'name':'John5'} user_response=requests.put("http://localhost:5000/api/users/"+str(uparam['id']), json=uparam) user_response udel= User.query.filter_by(name='ABCDE').first() print(udel) user_response=requests.delete("http://localhost:5000/api/users/"+str(udel.id)) user_response get_response=requests.get("http://localhost:5000/api/cures")#, params=parameters) cures=get_response.json()['cures'] cures import json parameters={'user_id':2,'name':'ABC_BG','a':1.0,'b':3.0,'description':'my comp name entry', 'bu_id':3,'comp_id':2} #pstr='user_id=1 name="ABCD" a=1.0 b=2.0 description="no description"' #xx=json.dumps(parameters) post_response=requests.post("http://localhost:5000/api/cures", json=parameters) post_response.content print(dir(post_response)) post_response.status_code get_response.content import pandas as pd # get as pandas dataFrame def get_list_of_compounds(): #cures=get_response.json()['cures'] get_response=requests.get("http://localhost:5000/api/cures") #df=pd.DataFrame(data=cures,index='id') df=pd.DataFrame(get_response.json()['cures']) return df #return [ (c['id'],c['name'],c['description']) for c in cures] lcc=get_list_of_compounds() lcc # using filters in query def get_list_of_compounds_perUser(user=None): if user: uname = User.query.filter_by(name=user).first() cure=Curedb.query.filter_by(user_id=uname.id) return cure.all() else: cure=Curedb.query.all() return cure lcca=get_list_of_compounds_perUser() #print(dir(lccu)) print(lcca) lccu=get_list_of_compounds_perUser('Kai') #print(dir(lccu)) for lc in lccu: print(lc.id,lc.name,lc.description) # alternative request filter in url get_2=requests.get("http://localhost:5000/api/cures?user_id=2&name=ABCDE") cures=get_2.json()['cures'] cures # filter by name, needs objects imported (no flexible solution here) uname=User.query.filter_by(name='Kai').first() cure=Curedb.query.filter_by(user_id=uname.id) print('%d Cure(s) found in db for name: %s'%(cure.count(),uname.name)) for x in cure: print(x.id,x.name,x.a) parameter={'cure_id':1} get_byid=requests.get("http://localhost:5000/api/cures",json=parameter) get_byid.content get_byid.json() del_byid=requests.delete("http://localhost:5000/api/cures/7",json=parameter) del_byid.content測試資料# 表格資料title data = [ {"route_id": "0001", "route_desc": "路線1", "num_of_people": 100, "origin_amt": 1000, "act_amt": 600, "subsidy_amt": 400, "avg_subsidy_amt_by_people": 4}, {"route_id": "0002", "route_desc": "路線2", "num_of_people": 100, "origin_amt": 1000, "act_amt": 600, "subsidy_amt": 400, "avg_subsidy_amt_by_people": 4}, {"route_id": "0003", "route_desc": "路線3", "num_of_people": 100, "origin_amt": 1000, "act_amt": 600, "subsidy_amt": 400, "avg_subsidy_amt_by_people": 4}, ] df = pd.DataFrame(data, columns=["route_id", "route_desc", "num_of_people", "origin_amt", "act_amt", "subsidy_amt", "avg_subsidy_amt_by_people"]) df df.columns=["路線\n編號", "路線\n名稱", "使用轉乘優惠\n人數", "原始票收金額", "實際交易金額", "優惠補貼金額", "平均每人\n優惠金額"]EXCEL輸出格式產生wb = Workbook() ws = wb.active # 設定print area # https://openpyxl.readthedocs.io/en/stable/print_settings.html ws.print_options.horizontalCentered = True # ws.print_options.verticalCentered = True ws.print_area = 'A1:G10' # 設定輸出表格字體參數 table_title_ft = Font(name='標楷體', color='000000', size=14, bold=True) table_ft = Font(name='標楷體', color='000000', size=14) # 表格格線設定 table_border = Border(left=Side(border_style='thin', color='000000'), right=Side(border_style='thin', color='000000'), top=Side(border_style='thin', color='000000'), bottom=Side(border_style='thin', color='000000')) ws.insert_rows(1) # 在第一行插入一行 ws.merge_cells('A1:G1') # 欄位 ws["A1"] = '表1 ○年○月○○客運公司○○○公車轉乘第一段票免費補貼金額申請表' ws["A1"].font = table_title_ft ws["A1"].alignment = Alignment(horizontal='center') # DataFrame資料填入sheet row for row in dataframe_to_rows(df, index=False, header=True): ws.append(row) # Table加入格線 rows = ws["A3:G6"] for row in rows: for cell in row: cell.border = table_border cell.font = table_ft # 自動指定欄位寬度 # https://stackoverflow.com/questions/13197574/openpyxl-adjust-column-width-size # from openpyxl.utils import get_column_letter # column_widths = [] # for row in data: # for i, cell in enumerate(row): # if len(column_widths) > i: # if len(cell) > column_widths[i]: # column_widths[i] = len(cell) # else: # column_widths += [len(cell)] # for i, column_width in enumerate(column_widths): # ws.column_dimensions[get_column_letter(i+1)].width = column_width # 直接指定欄位寬度 # https://stackoverflow.com/questions/53906532/is-it-possible-to-change-the-column-width-using-openpyxl/53906585 ws.column_dimensions['A'].width = 10 ws.column_dimensions['B'].width = 10 ws.column_dimensions['C'].width = 23 ws.column_dimensions['D'].width = 23 ws.column_dimensions['E'].width = 23 ws.column_dimensions['F'].width = 23 ws.column_dimensions['G'].width = 23 # Table欄位title style設定 table_align_style = Alignment(wrapText=True, horizontal='center', vertical='center') for rows in ws['A3':'G3']: for cell in rows: cell.alignment = table_align_style # ws["A":"G"].alignment = Alignment(wrapText=True, horizontal='center') # ws['A1'].alignment = Alignment(wrapText=True) # rows = sheet["A1:C3"] # for row in rows: # for cell in row: # cell.border = border wb.save("report_01_output.xlsx")Area Deprivation Index Found [here](https://www.neighborhoodatlas.medicine.wisc.edu/).From the Neighborhood Atlas site:"The Area Deprivation Index (ADI) is based on a measure created by the Health Resources & Services Administration (HRSA) over two decades ago for primarily county-level use, but refined, adapted, and validated to the Census block group/neighborhood level by , MD, PhD and her research team at the University of Wisconsin-Madison. It allows for rankings of neighborhoods by socioeconomic status disadvantage in a region of interest (e.g. at the state or national level). It includes factors for the theoretical domains of income, education, employment, and housing quality. It can be used to inform health delivery and policy, especially for the most disadvantaged neighborhood groups."wi_adi = pd.read_csv(os.path.join("../../wi_bg_v1.5.txt"),delimiter=",") wi_adi['FIPS'] = wi_adi['FIPS'].astype(str) wi_adi.head() sns.distplot(wi_adi['ADI_NATRANK'],kde=False,label='National Percentile') sns.distplot(wi_adi['ADI_STATERNK']*10.0,kde=False,label='State Decile') plt.legend()It looks like we'll get more detail out of the National ADI percentiles than the State-level deciles. Census ShapefilesLink the FIPS codes in the ADI data to their corresponding spatial boundaries so the shapes and the data are in the same dataframe.wi_shp = gpd.read_file(os.path.join('../../gz_2010_55_150_00_500k/gz_2010_55_150_00_500k.shp')) wi_shp.head()It looks like the column 'GEO_ID' has the 12-digit FIPS codes following the letters 'US'. Create a new column with just the 12-digit FIPS codes.wi_shp['FIPS'] = wi_shp['GEO_ID'].apply(lambda x: x.split("US")[-1]) # Just subset to Dane County: dane = wi_shp[wi_shp['COUNTY']=='025'] # Join ADI to the Dane county dataframe: adi_dane = dane[['FIPS','CENSUSAREA','geometry','TRACT']].merge(wi_adi,how='left',on='FIPS') adi_dane.head() # THere are two tracts without ADI data - let's drop those from the dataframe print("Was",len(adi_dane)) adi_dane = adi_dane[adi_dane['CENSUSAREA']>0.0] print("Is",len(adi_dane)) sns.distplot(adi_dane['ADI_NATRANK']) geoplot.choropleth(adi_dane,hue='ADI_NATRANK',cmap='Blues',k=None,legend=True,figsize=(11,8))Table of Contents 1  arbitrary number of arguments2  arbitrary keyward arguments3  functions that return multiple values4  python function argument expansiondef function_name(arg0, arg1): """Function Documentation, aka docstring""" function_stuff = arg0 + arg1 # return is optional # if no, return it will return None # python functions always return something return(function_stuff) function_name? # no type declaration # because python is a dynamically typed language def my_add(x, y): """add values""" return(x + y) my_add(1, 4) my_add('hello', 'world') my_add([1, 2, 3], [4, 5, 6]) my_add('123', 123) my_add(1) # we just delt with positional arguments my_add(x='hello', y='world') my_add(y='world', x='hello') def my_func_default(x, y, z='foo'): return(x + y + z) my_func_default('hello', 'world') my_func_default('hello', 'world', 'again') my_func_default('hello', 'world', z='again') my_func_default('hello', 'world', a='again')arbitrary number of argumentsdef my_add_args(x, *args): total = x for arg in args: total += arg return(total) my_add_args(5) my_add_args(1, 2, 3, 4, 5)arbitrary keyward argumentsmy_dict = {'name':'daniel'} my_dict my_dict.items() for thing1, thing2 in my_dict.items(): print(thing1, thing2) def my_add_kwargs(x, **kwargs): total = x for arg, value in kwargs.items(): print(arg, value) total += value return(total) my_add_kwargs(1, b=2, c=3)b 2 c 3functions that return multiple values# return a tuple # and use multiple assignment to catch the return values def func_mult_returns(x, y): """returns the square of x and the square of y""" square_x = x**2 square_y = y **2 return square_x, square_y func_mult_returns(2, 3) type(func_mult_returns(2, 3)) x, y = func_mult_returns(2, 3) print(x, y) a, b, c = [1, 2, 3] print(a, b, c)1 2 3python function argument expansiondef my_add(x, y): return x + y tup = (1, 2) tup my_add(*z) my_dict = {'x':1, 'y':2} my_dict my_add(**my_dict)Why use the spatially enabled dataframe In Memory Fast computation On the fly indexing Multi-platform All the benefits of pandas and moreimport pandas as pd from arcgis.features import GeoAccessor, GeoSeriesAccessor from arcgis import GIS gis = GIS('https://arcgis.com', 'bhammersley_tech')Enter password: ········Data Wranglingdf = pd.read_csv(r'C:\Users\bhammersley\OneDrive - ESRI (UK) Ltd\Documents\Presentations\Berlin_2019\data\HH.csv') df.head() def f(): pass df["full_address"] = df.apply( lambda row: '%s %s %s %s %s' % (row['Name'],row['Street'],row['Town'],row['County'],row['Postcode']) if str(row["Name"]) != 'nan' else ('%s %s %s %s %s' % (int(row['Number']),row['Street'],row['Town'],row['County'],row['Postcode']) if str(row["Number"]) != 'nan' else f()), axis=1) df = df.drop(['Name', 'Number', 'Street', 'Town', 'County', 'Postcode'], axis=1).dropna().reset_index(drop=True) df.head() df.type.value_counts() df.type = df.type.str.lower() q = df.type == 'semi-detached' df.loc[q, 'type'] = 'semi detached' df.type.value_counts() sdf = pd.DataFrame.spatial.from_df(df, 'full_address') sdf.head()Visualisation Matplotlib syntaxm = gis.map() m.center = {'spatialReference': {'latestWkid': 3857, 'wkid': 102100},'x': 17740.16697718523,'y': 6833320.7425390035} m.zoom = 11 m sdf.spatial.plot(map_widget=m, renderer_type='u', # specify the unique value renderer using its notation 'u' col='type') # column to get unique values from bbox = sdf.spatial.bbox m.draw(shape = bbox) sdf.spatial.to_featurelayer('Berlin dev summit houses', gis=gis)Geoenrichmenthousing_item = gis.content.get('b92ce1f0a169498f8243056a88564125') housing_lyr = housing_item.layers[0] sdf = pd.DataFrame.spatial.from_layer(housing_lyr) sdf.head() sdf.spatial.sr sdf.spatial.project(4326) analysis_variables = [ 'EDUC01_CY', # 2017 POP age 16+ by Education: No qualifications 'EDUC02_CY', # 2017 POP age 16+ by Education: Level 1 qualifications 'EDUC03_CY', # 2017 POP age 16+ by Education: Level 2 qualifications 'EDUC04_CY', # 2017 POP age 16+ by Education: Level 3 qualifications 'EDUC05_CY', # 2017 POP age 16+ by Education: Level 4 qualifications and above 'UNEMP_CY', # 2016 unemployed population 'POPDENS_CY', # 2017 population density 'PPPC_CY', # 2017 Purchasing power by capita 'HINC01_CY', # Total households in lowest quintile (below £19,158) 'HINC01_CY', # Total households in 2nd quintile (£19,158 £28,123) 'HINC01_CY', # Total households in 3rd quintile (£28,124 to £38,084) 'HINC01_CY', # Total households in 4th quintile (£38,085 to £54,646) 'HINC01_CY', # Total households in 5th quintile (£54,646 to £19,158) 'HTYP01A_CY', # Households by type: Single person 'HTYP02A_CY', # Households by type: Married couple with dependent children 'HTYP03A_CY', # Households by type: Married couple with no children 'HTYP04A_CY', # Households by type: Cohabiting couple with dependent children 'HTYP05A_CY', # Households by type: Cohabiting couple with no dependent children 'HTYP06A_CY', # Households by type: Single parent with dependent children 'HTYP07A_CY', # Households by type: Single parent with no dependent children 'HTYP08A_CY' # Households by type: Other household types ] from arcgis.geoenrichment import enrich enriched_sdf = enrich(sdf, analysis_variables=analysis_variables) enriched_sdf.head() enriched_sdf.spatial.to_featurelayer('enriched houses')Gradient Matching Attack on a TF Classifier In this notebook, we will learn how to use ART to run a clean-label gradient matching poisoning attack on a neural network trained with Tensorflow. We will be training our data on a subset of the CIFAR-10 dataset. The methods described are derived from [this paper](https://arxiv.org/abs/2009.02276) by Geiping, et. al. 2020. Train a model to attackIn this example, we use a RESNET50 model on the CIFAR dataset.import numpy as np import os, sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) module_path = os.path.abspath(os.path.join('.')) if module_path not in sys.path: sys.path.append(module_path) from art.estimators.classification import TensorFlowV2Classifier from art.utils import load_cifar10 (x_train, y_train), (x_test, y_test), min_, max_ = load_cifar10() mean = np.mean(x_train,axis=(0,1,2,3)) std = np.std(x_train,axis=(0,1,2,3)) x_train = (x_train-mean)/(std+1e-7) x_test = (x_test-mean)/(std+1e-7) min_ = (min_-mean)/(std+1e-7) max_ = (max_-mean)/(std+1e-7) import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten from tensorflow.keras.preprocessing.image import ImageDataGenerator from tqdm.keras import TqdmCallback tf.get_logger().setLevel('ERROR') # Tweaked the model from https://github.com/calmisential/TensorFlow2.0_ResNet # MIT License def basic_block(x, filter_num, stride=1): conv1 = tf.keras.layers.Conv2D(filters=filter_num, kernel_size=(3, 3), strides=stride, padding="same") bn1 = tf.keras.layers.BatchNormalization() conv2 = tf.keras.layers.Conv2D(filters=filter_num, kernel_size=(3, 3), strides=1, padding="same") bn2 = tf.keras.layers.BatchNormalization() if stride != 1: downsample = tf.keras.Sequential() downsample.add(tf.keras.layers.Conv2D(filters=filter_num, kernel_size=(1, 1), strides=stride)) downsample.add(tf.keras.layers.BatchNormalization()) else: downsample = tf.keras.layers.Lambda(lambda x: x) residual = downsample(x) x = conv1(x) x = bn1(x) x = tf.nn.relu(x) x = conv2(x) x = bn2(x) output = tf.nn.relu(tf.keras.layers.add([residual, x])) return output def basic_block_layer(x, filter_num, blocks, stride=1): x = basic_block(x, filter_num, stride=stride) for _ in range(1, blocks): x = basic_block(x, filter_num, stride=1) return x def resnet(x, num_classes, layer_params): pad1 = tf.keras.layers.ZeroPadding2D(padding=1) conv1 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding="same") bn1 = tf.keras.layers.BatchNormalization() avgpool = tf.keras.layers.GlobalAveragePooling2D() fc = tf.keras.layers.Dense(units=num_classes, activation=tf.keras.activations.softmax) x = pad1(x) x = conv1(x) x = bn1(x) x = tf.nn.relu(x) x = basic_block_layer(x, filter_num=64, blocks=layer_params[0]) x = basic_block_layer(x, filter_num=128, blocks=layer_params[1], stride=2) x = basic_block_layer(x, filter_num=256, blocks=layer_params[2], stride=2) x = basic_block_layer(x, filter_num=512, blocks=layer_params[3], stride=2) x = avgpool(x) output = fc(x) return output def resnet_18(x, num_classes): return resnet(x, num_classes, layer_params=[2, 2, 2, 2]) def create_model(x_train, y_train, num_classes=10, batch_size=64, epochs=25, callbacks=[]): inputs = tf.keras.layers.Input(shape=x_train.shape[1:]) # Specify the dimensions outputs = resnet_18(inputs, num_classes) model = tf.keras.models.Model(inputs, outputs) opt = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False ) datagen.fit(x_train) callbacks = callbacks + [TqdmCallback(verbose=0)] model.fit(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size,epochs=epochs,verbose=0,callbacks=callbacks) return model model_path = "../../../models/cifar10-resnet18-notebook.h5" if not os.path.exists(model_path): model = create_model(x_train, y_train, epochs=80) model.save(model_path) else: model = tf.keras.models.load_model(model_path) model.evaluate(x_test, y_test) model_art = TensorFlowV2Classifier(model, nb_classes=10, input_shape=model.input_shape) print("Model and data preparation done.")313/313 [==============================] - 3s 8ms/step - loss: 0.4160 - accuracy: 0.9146 Model and data preparation done.Choose Target Image from Test Setfrom tensorflow.keras.utils import to_categorical # A trigger from class 0 will be classified into class 1. class_source = 0 class_target = 1 index_target = np.where(y_test.argmax(axis=1)==class_source)[0][5] # Trigger sample x_trigger = x_test[index_target:index_target+1] y_trigger = to_categorical([class_target], num_classes=10)Poison Training Images to Misclassify the Trigger Imagefrom art.attacks.poisoning.gradient_matching_attack import GradientMatchingAttack epsilson = 0.01/(std+1e-7) attack = GradientMatchingAttack(model_art, percent_poison=0.05, max_trials=1, max_epochs=500, clip_values=(min_,max_), epsilon=epsilson, verbose=1) x_poison, y_poison = attack.poison(x_trigger, y_trigger, x_train, y_train)Examples of the trigger, an original sample, and the poisoned sampleimport matplotlib.pyplot as plt plt.imshow(x_trigger[0]*(std+1e-7)+mean) plt.title('Trigger image') plt.show() index_poisoned_example = np.where([np.any(p!=o) for (p,o) in zip(x_poison,x_train)])[0] plt.imshow(x_train[index_poisoned_example[0]]*(std+1e-7)+mean) plt.title('Original image') plt.show() plt.imshow(x_poison[index_poisoned_example[0]]*(std+1e-7)+mean) plt.title('Poisoned image') plt.show()Training with Poison Images These attacks allow adversaries who can poison your dataset the ability to mislabel any particular target instance of their choosing without manipulating labels.class TriggerTestCallback(tf.keras.callbacks.Callback): def __init__(self, x_trigger, y_trigger): super().__init__() self.x_trigger = x_trigger self.y_trigger = y_trigger def on_epoch_end(self, epoch, logs=None): y_ = self.model.predict(x_trigger) print(y_[0][np.argmax(y_trigger)]) logs["Trigger Prediction Score"] = y_[0][np.argmax(y_trigger)] model_poisoned = create_model(x_poison, y_poison, epochs=80, callbacks=[TriggerTestCallback(x_trigger, y_trigger)]) y_ = model_poisoned.predict(x_trigger) print("y_trigger:", y_trigger) print("y_:", y_) if np.argmax(y_trigger) == np.argmax(y_): print("Poisoning was successful.") else: print("Poisoning failed.")This cell takes 4-5 mins to load all data!git clone https://github.com/SayWhat69/Plutus.git %cd Plutus !pip3 install -r requirements.txt ! python3 plutus.py !pip install vanity-address from vanity_address.vanity_address import VanityAddressGenerator from pprint import pprint def callback(address): return address.startswith(b'111') # Generate an address address = VanityAddressGenerator.generate_one(callback=callback) address = 3FpYfDGJSdkMAvZvCrwPHDqdmGqUkTsJys print("Address:\t{address.address}\nPrivate key:\t{address.private_key}".format(address=address)) !sudo apt install snapd !sudo snap install vanitygen !ls !make !cp vanitygen /Feature Engineering# import updated data data = pd.read_csv("./outputs/updated_data_02.csv")#, header=None, names=["timestamp","chan0_raw","chan1_raw","chan0_hp","chan1_hp","quat_x","quat_y","quat_z","quat_w","gyro_x","gyro_y","gyro_z","acc_x","acc_y","acc_z","label","rep","trial"]) data data = data[["timestamp","chan0_hp","chan1_hp","gyro_x","gyro_y","gyro_z","acc_x","acc_y","acc_z","label","rep","trial"]]Sliding Window Segmentation# function to segment data into overlapping windows def windowed_view(arr, window, overlap): arr = np.asarray(arr) window_step = window - overlap new_shape = arr.shape[:-1] + ((arr.shape[-1] - overlap) // window_step, window) new_strides = (arr.strides[:-1] + (window_step * arr.strides[-1],) + arr.strides[-1:]) return as_strided(arr, shape=new_shape, strides=new_strides) # create overlapping segments for each feature # separate by trial so that windows don't contain discontinous data acc_x_all_windows = [] acc_y_all_windows = [] acc_z_all_windows = [] gyro_x_all_windows = [] gyro_y_all_windows = [] gyro_z_all_windows = [] chan0_hp_all_windows = [] chan1_hp_all_windows = [] for trial_num in sorted(data["trial"].unique()): mask = (data["trial"] == trial_num) acc_x_windows = windowed_view(data[mask].acc_x,500,250) acc_y_windows = windowed_view(data[mask].acc_y,500,250) acc_z_windows = windowed_view(data[mask].acc_z,500,250) gyro_x_windows = windowed_view(data[mask].gyro_x,500,250) gyro_y_windows = windowed_view(data[mask].gyro_y,500,250) gyro_z_windows = windowed_view(data[mask].gyro_z,500,250) chan0_hp_windows = windowed_view(data[mask].chan0_hp,500,250) chan1_hp_windows = windowed_view(data[mask].chan1_hp,500,250) acc_x_all_windows.append(acc_x_windows.tolist()) acc_y_all_windows.append(acc_y_windows.tolist()) acc_z_all_windows.append(acc_z_windows.tolist()) gyro_x_all_windows.append(gyro_x_windows.tolist()) gyro_y_all_windows.append(gyro_y_windows.tolist()) gyro_z_all_windows.append(gyro_z_windows.tolist()) chan0_hp_all_windows.append(chan0_hp_windows.tolist()) chan1_hp_all_windows.append(chan1_hp_windows.tolist()) # flatten nested lists acc_x_all_windows = [item for sublist in acc_x_all_windows for item in sublist] acc_y_all_windows = [item for sublist in acc_y_all_windows for item in sublist] acc_z_all_windows = [item for sublist in acc_z_all_windows for item in sublist] gyro_x_all_windows = [item for sublist in gyro_x_all_windows for item in sublist] gyro_y_all_windows = [item for sublist in gyro_y_all_windows for item in sublist] gyro_z_all_windows = [item for sublist in gyro_z_all_windows for item in sublist] chan0_hp_all_windows = [item for sublist in chan0_hp_all_windows for item in sublist] chan1_hp_all_windows = [item for sublist in chan1_hp_all_windows for item in sublist]Calculate features for each window# zero_crossings function def zero_crossings(x): ZC=0; for pos in range(len(x)-1): if ((x[pos] > 0 and x[pos+1] < 0) or (x[pos] < 0 and x[pos+1] > 0)) and (np.abs(x[pos]-x[pos+1]) >= 0): ZC=ZC+1; return ZC # Integral of window acc_x_integrals = scipy.integrate.simps(acc_x_all_windows) acc_y_integrals = scipy.integrate.simps(acc_y_all_windows) acc_z_integrals = scipy.integrate.simps(acc_z_all_windows) gyro_x_integrals = scipy.integrate.simps(gyro_x_all_windows) gyro_y_integrals = scipy.integrate.simps(gyro_y_all_windows) gyro_z_integrals = scipy.integrate.simps(gyro_z_all_windows) chan0_integrals = scipy.integrate.simps(chan0_hp_all_windows) chan1_integrals = scipy.integrate.simps(chan0_hp_all_windows) # Variance of window acc_x_variance = [statistics.variance(x) for x in acc_x_all_windows] acc_y_variance = [statistics.variance(x) for x in acc_y_all_windows] acc_z_variance = [statistics.variance(x) for x in acc_z_all_windows] gyro_x_variance = [statistics.variance(x) for x in gyro_x_all_windows] gyro_y_variance = [statistics.variance(x) for x in gyro_y_all_windows] gyro_z_variance = [statistics.variance(x) for x in gyro_z_all_windows] chan0_ln_variance = [statistics.variance(x) for x in chan0_hp_all_windows] chan1_ln_variance = [statistics.variance(x) for x in chan1_hp_all_windows] # Absolute value of mean of window acc_x_mean = [np.abs(np.mean(x)) for x in acc_x_all_windows] acc_y_mean = [np.abs(np.mean(x)) for x in acc_y_all_windows] acc_z_mean = [np.abs(np.mean(x)) for x in acc_z_all_windows] gyro_x_mean = [np.abs(np.mean(x)) for x in gyro_x_all_windows] gyro_y_mean = [np.abs(np.mean(x)) for x in gyro_y_all_windows] gyro_z_mean = [np.abs(np.mean(x)) for x in gyro_z_all_windows] chan0_mean = [np.abs(np.mean(x)) for x in chan0_hp_all_windows] chan1_mean = [np.abs(np.mean(x)) for x in chan1_hp_all_windows] # Zero-crossings count per window acc_x_zc = [zero_crossings(x) for x in acc_x_all_windows] acc_y_zc = [zero_crossings(x) for x in acc_y_all_windows] acc_z_zc = [zero_crossings(x) for x in acc_z_all_windows] gyro_x_zc = [zero_crossings(x) for x in gyro_x_all_windows] gyro_y_zc = [zero_crossings(x) for x in gyro_y_all_windows] gyro_z_zc = [zero_crossings(x) for x in gyro_z_all_windows] # create dataframe of all window features data_features = pd.DataFrame({"acc_x_integrals": acc_x_integrals, "acc_x_integrals": acc_y_integrals, "acc_x_integrals": acc_z_integrals, "gyro_x_integrals": gyro_x_integrals, "gyro_y_integrals": gyro_y_integrals, "gyro_z_integrals": gyro_z_integrals, "acc_x_variance": acc_x_variance, "acc_y_variance": acc_y_variance, "acc_z_variance": acc_z_variance, "gyro_x_variance": gyro_x_variance, "gyro_y_variance": gyro_y_variance, "gyro_z_variance": gyro_z_variance, "acc_x_mean": acc_x_mean, "acc_y_mean": acc_y_mean, "acc_z_mean": acc_z_mean, "gyro_x_mean": gyro_x_mean, "gyro_y_mean": gyro_y_mean, "gyro_z_mean": gyro_z_mean, "acc_x_zc": acc_x_zc, "acc_y_zc": acc_y_zc, "acc_z_zc": acc_z_zc, "gyro_x_zc": gyro_x_zc, "gyro_y_zc": gyro_y_zc, "gyro_z_zc": gyro_z_zc, "chan0_integrals": chan0_integrals, "chan1_integrals": chan1_integrals, "chan0_ln_variance": chan0_ln_variance, "chan1_ln_variance": chan1_ln_variance, "chan0_mean": chan0_mean, "chan1_mean": chan1_mean} )Principal Component Analysis# Instantiate our StandardScaler ss = StandardScaler() # Standardize feature data data_scaled = ss.fit_transform(data_features) # Instantiate PCA. pca = PCA(random_state = 42) # Fit PCA pca.fit(data_scaled) PCA(copy=True, iterated_power='auto', n_components=None, random_state=42, svd_solver='auto', tol=0.0, whiten=False) # Transform PCA data_pca = pca.transform(data_scaled)Variance explained by componentvar_exp = pca.explained_variance_ratio_ print(f'Explained variance: {np.round(var_exp,3)}') # Generate the cumulative explained variance. cum_var_exp = np.cumsum(var_exp) print(f'Cumulative explained variance: {np.round(cum_var_exp,3)}')Explained variance: [0.26 0.178 0.121 0.073 0.065 0.053 0.045 0.042 0.032 0.028 0.024 0.017 0.012 0.011 0.009 0.008 0.007 0.005 0.003 0.002 0.002 0.001 0.001 0.001 0. 0. 0. 0. ] Cumulative explained variance: [0.26 0.438 0.559 0.632 0.697 0.75 0.795 0.837 0.869 0.897 0.921 0.938 0.95 0.961 0.97 0.978 0.984 0.989 0.992 0.994 0.996 0.998 0.998 0.999 1. 1. 1. 1. ]Save out pca dataframe as csv# pd.DataFrame(data_pca).to_csv(f'./outputs/data_pca_04.csv', index=False)3.2 statsmodels의 전처리 기능 StatsModels 패키지는 통계분석과 관련된 R의 기능을 파이썬으로 옮겨오기 위한 패키지이다. R에는 데이터프레임과 문자열 기호를 이용하여 회귀모형을 정의하는 방법이 존재한다. StatsModels 패키지도 이러한 R 스타일 모형 정의 방법을 지원한다. 이러한 지원을 가능하게 하는 것은 patsy라는 패키지 덕분이다. 여기에서는 patsy 패키지의 간단한 사용법과 이를 이용하여 StatsModels에서 회귀모형을 정의하는 방법을 설명한다. patsy 패키지 소개 patsy 패키지는 회귀분석 전처리를 위한 패키지로 데이터프레임을 가공하여 인코딩, 변환 등을 쉽게 해주는 기능을 제공한다.from patsy import * import statsmodels.api as sm from patsy import *patsy 패키지의 `dmatrix`라는 명령을 사용하면 실험 설계 행렬(experiment design matrix)을 간단히 만들수 있다. `dmatrix`에 다음과 같이 모형 정의 문자열 `formula`와 원 데이터를 담은 데이터프레임 `data`을 입력하면 `formula`에서 지정한 대로 변환된 데이터 `data_transformed`를 출력한다. ```data_transformed = dmatrix(formula, data)``` patsy 패키지가 제공하는 `demo_data` 명령으로 다음과 같이 예제 데이터 `x1`, `x2`, `y`를 만들자. `dmatrix`의 첫번째 기능은 자동 상수항 결합 기능이다. 대상이 되는 데이터에 자동으로 Intecept라는 이름의 데이터 열을 추가한다. 다음 예제에서 스타일 문자열은 단순히 `"x1"`이다. 스타일 문자열은 데이터와 연산자로 이루어지는데 데이터는 변수명 혹은 데이터프레임 열 이름으로 지정한다. 변수명으로 지정하는 경우에는 현재의 이름 공간(name space)에서 변수를 찾고 데이터프레임 열 이름을 지정하는 경우에는 `data`라는 인수에 데이터프레임을 넣어주어야 한다.df = pd.DataFrame(demo_data("x1", "x2", "y")) df df = pd.DataFrame(demo_data("x1", "x2", "y")) df dmatrix("x1", df) dmatrix("x1",df)R-style formula 연산자모형정의 연산자 `formula`에 복수의 데이터를 지정하는 경우에는 다음과 같은 연산자를 포함해야 한다.| 기호 | 설명 ||-|-||1, 0| 바이어스(bias, intercept) 추가 및 제거 ||`+`| 설명 변수 추가 ||`-`| 설명 변수 제거 ||`:`| 상호작용(interaction) ||`*`| `a*b = a + b + a:b` ||`/`| `a/b = a + a:b` | 상수항을 제외하고자 하는 경우에는 `- 1` 또는 `+ 0`을 써주어야 한다.dmatrix("x1 - 1", df) dmatrix("x1 - 1", df) dmatrix("x1 + 0", df)데이터를 추가하는 경우에는 `+` 연산자를 사용한다.dmatrix("x1+x2",df) dmatrix("x1 + x2", df)마찬가지로 `-1` 또는 `+0`이 있으면 상수항이 없어진다.dmatrix("x1 + x2 - 1", df)두 변수의 곱을 새로운 변수로 추가하려면 상호작용(interaction) 연산자 `:`를 사용한다.dmatrix("x1 + x2 + x1:x2", df) dmatrix("x1+x2+x1:x2",df)위 식은 다음과 같이 `*` 연산자로 간단하게 나타낼 수도 있다.dmatrix("x1 * x2", df) dmatrix("x1 * x2", df)`/` 연산자는 두번째 데이터를 빼고 출력한다.dmatrix("x1/x2",df) dmatrix("x1 / x2", df)수학 변환 `dmatrix`에서는 일반적인 수학 변환(transform)도 가능하다. numpy 함수 뿐 아니라 사용자 정의 함수도 사용할 수 있다.dmatrix("x1+np.log(np.abs(x2))",df) dmatrix("x1+np.log(np.abs(x2))",df) def doubleit(x): return 2*x dmatrix("doubleit(x1)",df) def doubleit(x): return 2 * x dmatrix("doubleit(x1)", df)상태 보존 변환 patsy의 가장 강력한 기능 중의 하나는 상태 보존 변환(stateful trasform)이 가능하다는 점이다. 예를 들어 다음 변환 함수는 평균을 계산하여 빼주거나 및 표준편차를 계산하여 나누어주는데 이 때 계산한 평균과 표준편차를 내부에 상태변수로 저장한다.* `center(x)`: 평균 제거* `standardize(x)`: 평균 제거 및 표준편차로 스케일링* `scale(x)`: `standardize(x)` 과 같음 예를 들어 x1 데이터의 평균을 제거하는 변환은 다음과 같다.dm = dmatrix("center(x1)", df) dm dm = dmatrix("center(x1)",df) dm이 변환 연산은 다음과 같이 x1 데이터에서 x1의 평균을 빼는 것이다.df.x1 - np.mean(df.x1) df.x1 - np.mean(df.x1)그런데 이 때 사용한 평균값은 `design_info`라는 속성에 상태변수(state variable)로서 저장된다.dm type(dm.design_info)이 값을 상태변수로 저장하는 이유는 다음과 같다.어떤 데이터 $X_{train}$을 학습용 데이터로 사용하여 예측모형으로 만든다고 하자. 이 때 학습성능을 좋게 하기 위해 $X_{train}$에서 $X_{train}$의 평균 $\bar{X}_{train}$(예를 들어 100)을 뺀 평균 제거 데이터 $X_{train} - 100$를 원래의 데이터 대신 학습용 데이터로 사용하여 모형을 만드는 경우가 있다. 이를 전처리 단계라고 한다.학습이 끝난 후 이 모형을 사용하여 실제 예측을 하자. 새로운 검증용 데이터 $X_{test}$를 이 모형에 넣으려면 모형을 학습할 때 사용한 것과 같은 전처리를 해야 한다. 즉, $X_{test}$에서 $X_{train}$의 평균인 $100$을 뺀 $X_{test}-100$을 입력으로 넣어서 출력을 계산해야 한다. 이 때 $X_{test}$의 평균이 아니라 $X_{train}$의 평균을 사용한다는 점에 주의한다. 이렇게 하기 위해서는 전처리 과정에서 계산한 $X_{train}$의 평균값 $100$을 기억하고 있어야 한다. patsy 패키지에서는 `center` 변환을 했을 때 사용한 평균값을 내부에 저장하고 있기 때문에 이러한 일을 할 수 있다. 예를 들어 다음처럼 검증용의 새로운 데이터가 있을 때,df_new = df.copy() df_new["x1"] = df_new["x1"] * 10 df_new df_new=df.copy() df_new["x1"]=df_new["x1"]*10 df_new`build_design_matrices` 명령을 사용하면 이미 저장된 x1의 평균을 이용하여 같은 변환을 한다.build_design_matrices([dm.design_info], df_new)이 값은 다음 계산 결과와 같다.df_new.x1 - np.mean(df.x1)평균값을 다시 새롭게 구해서 계산한 것과 다르다는 것을 알 수있다.dmatrix('center(x1)',df_new) dmatrix("center(x1)", df_new) df_new.x1 - np.mean(df_new.x1)변수 보호 함수를 사용한 변수 변환 이외에도 모형 정의 문자열 자체내에 연산기호를 넣어 연산한 값을 만드는 것도 가능하다. 이 때에는 모형정의 연산자와 혼동되지 않도록 `I()` 연산자를 추가해야 한다.dmatrix("I(x1 + x2)", df) dmatrix("I(x1+x2)",df)이 값을 다음 식과 비교하면 `I()`의 기능을 확실히 알 수 있다.dmatrix("x1 + x2", df) dmatrix("x1+x2",df)다항회귀 `I()` 연산자를 활용하면 다항회귀(polynomial regression)도 할 수 있다.dmatrix("x1+I(x1*x1)+I(x1**3)+I(x1**4)",df) dmatrix("x1+I(x1*x1)+I(x1**3)+I(x1**4)",df) dmatrix("x1 + I(x1*x1) + I(x1**3) + I(x1**4)", df)`OLS.from_formula` 메서드 선형회귀분석을 위한 `OLS` 클래스에는 모형 정의 문자열을 사용할 수 있는 `from_formula`라는 클래스 메서드가 있다. 이 메서드를 쓰면 사용자가 데이터 행렬을 직접 정의하지 않고 모형 정의 문자열만으로 선형회귀모형을 만드는 것이 가능하다. 선형 회귀모형을 formula로 정의할 때는 `~` 연산자를 사용한다. `~` 연산자의 왼쪽에는 종속 변수, 오른쪽에는 독립 변수를 넣어서 정의한다. 예를 들어 다음과 같은 데이터가 있을 때,np.random.seed(0) x1 = np.random.rand(20) + 10 x2 = np.random.rand(20) * 10 y = x1 + 2 * x2 + np.random.randn(20) df4 = pd.DataFrame(np.array([x1, x2, y]).T, columns=["x1", "x2", "y"]) np.random.seed(0) x1 = np.random.rand(20) + 10 x2 = np.random.rand(20) * 10 y = x1 + 2 * x2 + np.random.randn(20) df4 = pd.DataFrame(np.array([x1, x2, y]).T, columns=["x1", "x2", "y"])다음 두가지 방법으로 만든 모형은 동일하다.# 직접 데이터 행렬을 만드는 경우 dfy = df4.iloc[:, -1] dfX = sm.add_constant(df4.iloc[:, :-1]) model1 = sm.OLS(dfy, dfX) print(model1.fit().summary()) # 직접 데이터 행렬을 만드는 경우 dfy = df4.iloc[:, -1] dfX = sm.add_constant(df4.iloc[:, :-1]) model1 = sm.OLS(dfy, dfX) print(model1.fit().summary()) # 모형 정의 문자열을 사용하는 경우 model2 = sm.OLS.from_formula("y ~ x1 + x2", data=df4) print(model2.fit().summary())OLS Regression Results ============================================================================== Dep. Variable: y R-squared: 0.967 Model: OLS Adj. R-squared: 0.963 Method: Least Squares F-statistic: 246.8 Date: Mon, 17 Jun 2019 Prob (F-statistic): 2.75e-13 Time: 15:59:58 Log-Likelihood: -29.000 No. Observations: 20 AIC: 64.00 Df Residuals: 17 BIC: 66.99 Df Model: 2 Covariance Type: nonrobust ============================================================================== coef std err t P>|[...]插值from sympy import *拉格朗日插值def lagrange_interpolate(points: list, simplify_result=True, verbose=False): """拉格朗日插值 Args: points: list, [(x1, y1), (x2, y2), ..., (xn, yn)] simplify_result: bool, 化简最终结果, default True verbose: bool, 输出每一步的结果, default False Returns: L: sympy object of Symbol('x'), 插值多项式 $L(x)$ """ x = Symbol('x') L = 0 # 插值多项式 for i, point in enumerate(points): xi, yi = point li = 1 for j in range(len(points)): if j == i: continue xj, yj = points[j] li *= (x - xj) / (xi - xj) L += yi * li if verbose: print(f"l_{i}(x) = ", simplify(expand(yi * li))) if simplify_result: L = simplify(L) return L points = [(11, 0.190809), (12, 0.207912), (13, 0.224951)] L = lagrange_interpolate(points) print('插值多项式:', L) print('Hypothesis: ', L.subs(Symbol('x'), 11.5)) print('Actual vaule:', sin(rad(11.5)).evalf())插值多项式: -3.20000000000042e-5*x**2 + 0.0178390000000004*x - 0.00154799999999966 Hypothesis: 0.199368500000004 Actual vaule: 0.199367934417197牛顿插值# 差商缓存 __difference_quotient_cache = {} def difference_quotient(f, xs: list, verbose=False): """求差商 $f[xs...]$ Args: f: function, 函数 xs: list, 要计算的差商的 $f[x0, x1, ...]$ 的参数值:[x0, x1, ...] verbose: bool, 打印出每一步计算差商的值, default False Returns: dq: sympy object, 差商值 """ # 尝试从缓存读取 __key = str([(x, f(x)) for x in xs]) if __key in __difference_quotient_cache: if verbose: print(f"cached: f{xs}: {__difference_quotient_cache[__key]}") return __difference_quotient_cache[__key] if len(xs) == 1: # 0阶 dq = sympify(f(xs[0])) else: # n 阶 dq_h = difference_quotient(f, xs[:-1], verbose) dq_l = difference_quotient(f, xs[1: ], verbose) dq = (dq_l - dq_h) / (xs[-1] - xs[0]) if verbose: print(f"f{xs}: {dq}") # 写入缓存 __difference_quotient_cache[__key] = dq return dq def newton_interpolate(points: list, N_start=0, points_start=[], simplify_result=True, verbose=False): """牛顿插值 Args: points: [(x1, y1), (x2, y2), ..., (xn, yn)]: 插值点 N_start: a sympy object of Symbol('x'), 起始插值多项式。 points_start: [(x1, y1), (x2, y2), ..., (xn, yn)]: 计算 N_start 用的插值点 该函数中做的插值会「承袭」这个用 points_start 计算的 N_start,在其基础上用新增插值点 points 去改进。 default 0 (从头开始构建). simplify_result: bool, 化简最终结果, default True verbose: 打印出每一步计算差商的值, default False Returns: N: a sympy object of Symbol('x'), 插值多项式 $N(x)$ """ x = Symbol('x') # sympify points # for i in range(len(points)): # points[i] = sympify(points[i][0]), sympify(points[i][1]) f = lambda x: dict(points + points_start)[x] xs = [p[0] for p in points_start] # 承袭的插值点 N = N_start # 承袭的插值多项式 for point in points: xs.append(point[0]) N += difference_quotient(f, xs, verbose) * prod([x - xi for xi in xs[:-1]]) if simplify_result: N = simplify(N) return N points = [(11, 0.190809), (12, 0.207912), (13, 0.224951)] N = newton_interpolate(points) print('插值多项式:', N) print('Hypothesis: ', N.subs(Symbol('x'), 11.5)) print('Actual vaule:', sin(rad(11.5)).evalf()) # 承袭之前的 N: print('承袭:') new_points = [(11.2, sin(rad(11.2)).evalf()), (11.7, sin(rad(11.7)).evalf())] N1 = newton_interpolate(new_points, N_start=N, points_start=points, verbose=True) print('插值多项式:', N1) print('Hypothesis: ', N1.subs(Symbol('x'), 11.5).evalf()) print('Actual vaule:', sin(rad(11.5)).evalf()) __difference_quotient_cache三弯矩法import numpy as np def spline3_interpolate(points: list, simplify_result=True): """三弯矩法插值 三弯矩法进行三次样条插值。使用自由边界条件。 Args: points: list, [(x1, y1), (x2, y2), ..., (xn, yn)] simplify_result: bool, 化简最终结果, default True Returns: L: sympy Piecewise object of Symbol('x'), 插值多项式 $L(x)$ """ # 排序给定的点 ps = sorted(points, key=lambda p: p[0]) n = len(points) # points to a function _f_dict = dict(ps) def f(x): return _f_dict[x] # $h_k = x_{k+1} - x_k$ def h(k): return ps[k+1][0] - ps[k][0] hks = [h(0)] # 用方程 D * M = d 解出 M D = np.zeros((n, n)) d = np.zeros(n) for k in range(1, n-1): # $h_k$, $h_{k-1}$ hks.append(h(k)) hk, hks1 = hks[k], hks[k-1] # $\mu_k$ -> mu, $\lambda_k$ -> ld _fra = hks1 + hk mu = hks1 / _fra ld = hk / _fra # $\mu_kM_{k-1}+2M_k+\lambda_kM_{k+1} = d_k$ D[k, k-1] = mu D[k, k] = 2 D[k, k+1] = ld d[k] = 6 * difference_quotient(f, [ps[k-1][0], ps[k][0], ps[k+1][0]]) # 边界条件 # Natural Boundary D[0, 0] = 1 D[n-1, n-1] = 1 d[0] = 0 d[n-1] = 0 # 解出 M M = np.linalg.solve(D, d) # 插值函数 piecewises = [] for k in range(n-1): s = M[k] * (ps[k+1][0] - _x) ** 3 / (6 * hks[k]) s += M[k+1] * (_x - ps[k][0]) ** 3 / (6 * hks[k]) s += (ps[k][1] - M[k] * hks[k]**2 / 6) * (ps[k+1][0] - _x) / hks[k] s += (ps[k+1][1] - M[k+1] * hks[k]**2 / 6) * (_x - ps[k][0]) / hks[k] if simplify_result: s = simplify(s) piecewises.append((s, And(_x >= ps[k][0], _x <= ps[k+1][0]))) return Piecewise(*piecewises)题目2.区间 $[-5, 5]$ 作等距划分:...def f(x): return 1 / (1 + x ** 2) def get_points(s, n, f): """在闭区间 s 作 n 等距划分, 输出 n+1 个插值点 [(x0, f(x0)), (x1, f(x1)), ..., (xn, f(xn))] e.g. get_points([-5, 5], 2, lambda x: 1 / (1 + x ** 2)) output: [(-5.0, 0.038461538461538464), (0.0, 1.0), (5.0, 0.038461538461538464)] """ h = (s[1] - s[0]) / n xs = [s[0] + k * h for k in range(n+1)] points = [(x, f(x)) for x in xs] return points hps = {} # 多项式插值 hss = {} # 三次样条插值 for n in [5, 10, 20]: ps = get_points([-5, 5], n, f) # 多项式插值 # h = lagrange_interpolate(ps) h = newton_interpolate(ps) hps[n] = h # 三次样条插值 s = spline3_interpolate(ps) hss[n] = s print('多项式插值:') for k in hps: print(f'n = {k}:\t{hps[k]}') print('\n三次样条插值:') for k in hss: print(f'n = {k}:\t{hss[k]}') # 画图 def show_plot_range(r, hs, title): """画图比较插值结果 r:区间 hs: 插值结果组 {n: h(x)} title: 图标题 """ x = Symbol('x') colors = (c for c in [0, 'b', 'g', 'r']) plt = plot(f(x), (x, *r), show=False, line_color=next(colors), label='f(x)', legend=True, title=title) for n in hs: p = plot(hs[n], (x, *r), show=False, line_color=next(colors), label=f'n={n}', legend=True) plt.extend(p) plt.show() # 画图:多项式插值 show_plot_range((-5, 5), hps, 'Polynomial Interpolation') show_plot_range((-5, -3), hps, 'Polynomial Interpolation') show_plot_range((-1, 1), hps, 'Polynomial Interpolation') # 画图:三次样条插值 show_plot_range((-5, 5), hss, 'Cubic Spline Interpolation') show_plot_range((-5, -3), hss, 'Cubic Spline Interpolation') show_plot_range((-1, 1), hss, 'Cubic Spline Interpolation')拟合def normalEquationFit(X, y): """最小二乘拟合 Compute the normal equations: $\theta = (X^TX)^{-1}X^Ty$ Args: X: np.array, $$ X = \left[\begin{array}{c} - (x^{(1)})^T - \\ - (x^{(2)})^T - \\ \vdots \\ - (x^{(m)})^T - \\ \end{array}\right] $$ y: np.array, $y = [y^{(1)},y^{(2)},\dots,y^{(m)}]^T$ return theta. """ return np.dot(np.dot(np.linalg.pinv(np.dot(X.T, X)), X.T), y)题目:3.4.2 数据拟合 1.x = np.array([1, 2, 3, 4, 5]) y = np.array([4, 4.5, 6, 8, 8.5]) X = np.c_[np.ones((len(x), 1)), x] theta = normalEquationFit(X, y) print(theta) pred_f = lambda t: theta[0] + theta[1] * t # 用梯度下降检查 a, b = 0, 0 num_epoch = 10000 learning_rate = 5e-4 for e in range(num_epoch): y_pred = a * x + b grad_a = 2 * (y_pred - y).dot(x) grad_b = 2 * (y_pred - y).sum() a = a - learning_rate * grad_a b = b - learning_rate * grad_b print(a, b) # 画图 import matplotlib.pyplot as plt pred_x = [k/10 for k in range(50)] pred_y = [pred_f(x) for x in pred_x] plt.plot(x, y, 'bo', label='date') plt.plot(pred_x, pred_y, 'r-', label='pred') plt.legend() plt.show()题目:3.4.2 数据拟合 2.x = np.array([2, 3, 4, 7, 8, 10, 11, 14, 16, 18, 19]) y = np.array([106.42, 108.2, 109.5, 110, 109.93, 110.49, 110.59, 110.6, 110.76, 111, 111.2])(1) $y(x) = c_0 + c_1x + c_2x^2$数据预处理:# X: [1, x, x**2] X = np.c_[np.ones((len(x), 1)), x, x ** 2] print(X)[[ 1. 2. 4.] [ 1. 3. 9.] [ 1. 4. 16.] [ 1. 7. 49.] [ 1. 8. 64.] [ 1. 10. 100.] [ 1. 11. 121.] [ 1. 14. 196.] [ 1. 16. 256.] [ 1. 18. 324.] [ 1. 19. 361.]]定义预测函数、代价函数:def hypothesis(x, theta): return np.dot(theta, np.array([1, x, x**2])) def cost(x, y, hypothesis, theta): return np.sum( (hypothesis(x, theta) - y) ** 2 )最小二乘正规方程拟合:# theta: [c0, c1, c2] theta = normalEquationFit(X, y) theta计算代价:cost(x, y, hypothesis, theta)可视化拟合:x_test = np.linspace(0, 25, num=100, endpoint=True) y_pred = hypothesis(x_test, theta) plt.plot(x, y, 'bo', label='date') plt.plot(x_test, y_pred, 'r-', label='pred') plt.legend() plt.show()(2) $y(x) = ae^{\frac{b}{x}}$取对数:$\ln(y) = \ln(a) + \frac{b}{x}$数据预处理:# X: [1, 1/x] X = np.c_[np.ones((len(x), 1)), 1 / x] print(X)[[1. 0.5 ] [1. 0.33333333] [1. 0.25 ] [1. 0.14285714] [1. 0.125 ] [1. 0.1 ] [1. 0.09090909] [1. 0.07142857] [1. 0.0625 ] [1. 0.05555556] [1. 0.05263158]]新的预测函数:def hypothesis(x, theta): a, b = theta h = np.exp(a) * np.exp(b / x) return h拟合:# theta: [ln(a), b] theta = normalEquationFit(X, np.log(y)) theta代价计算:cost(x, y, hypothesis, theta) x_test = np.linspace(0, 25, num=100, endpoint=True) y_pred = hypothesis(x_test, theta) plt.plot(x, y, 'bo', label='date') plt.plot(x_test, y_pred, 'r-', label='pred') plt.ylim(90, 120) plt.legend() plt.show()Only Positive feedback connections: With the matrix [[1, 1], [1, 1]], the left and right signals both rise steeply, with the left slightly ahead of the right. They then reach a horizontal asymptote. Left signal encounters a discontinuity before its slope begins to decrease. The left approaches a value slightly above 1. The right approaches a value slightly below 1.![Alt text](positiveFunct.png)Feedback connections set to zero: With the matrix [[0, 0], [0, 0]], left spikes then returns to near zero. Right dips briefly then returns to near zero.![Alt text](negativeFunct.png)# YOUR CODE HERE # plot the raster plot of the spiking neurons, see #https://www.nengo.ai/nengo-extras/examples/plot_spikes.html plt.figure() plot_spikes(sim.trange(), sim.data[spikes_probe]) plt.title("Neuron Sike Raster") plt.xlabel("Time(s)") plt.ylabel("Neuron ID")Only Positive feedback connections: There seem to be more neurons that do not ever spike. Neurons which do spike spike more frequently.![Alt text](positiveSpike.png)Feedback connections set to zero: Similar to only positive. Seems to have less spikes overall.![Alt text](negativeSpike.png)# YOUR CODE HERE # plot the Phase plot for your neuron population, using the decoded_output of the neurons. This is a [n_time,2] vector # Please look at #https://www.nengo.ai/nengo/examples/dynamics/oscillator.html phaseData = sim.data[neuron_probe] plt.figure() plt.plot(phaseData[:, 0], phaseData[:, 1]) plt.title("Signal Intensity as a Coordinate of Left and Right Intensity") plt.xlabel("Left Intensity") plt.ylabel("Right Intensity")Building your own Chatbot Why should I build the service again? Related: Why can't I use FB/MSFT/some other cloud service? Word Vectors + Heuristic - Fancy Stuff = Quick Working Codeimport numpy as np import gensim print(f"Gensim version: {gensim.__version__}") from tqdm import tqdm class TqdmUpTo(tqdm): def update_to(self, b=1, bsize=1, tsize=None): if tsize is not None: self.total = tsize self.update(b * bsize - self.n) def get_data(url, filename): """ Download data if the filename does not exist already Uses Tqdm to show download progress """ import os from urllib.request import urlretrieve if not os.path.exists(filename): dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname) with TqdmUpTo(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t: urlretrieve(url, filename, reporthook=t.update_to) else: print("File already exists, please remove if you wish to download again") embedding_url = 'http://nlp.stanford.edu/data/glove.6B.zip' get_data(embedding_url, 'data/glove.6B.zip') # !unzip data/glove.6B.zip # !mv -v glove.6B.300d.txt data/glove.6B.300d.txt # !mv -v glove.6B.200d.txt data/glove.6B.200d.txt # !mv -v glove.6B.100d.txt data/glove.6B.100d.txt # !mv -v glove.6B.50d.txt data/glove.6B.50d.txt from gensim.scripts.glove2word2vec import glove2word2vec glove_input_file = 'data/glove.6B.300d.txt' word2vec_output_file = 'data/glove.6B.300d.txt.word2vec' import os if not os.path.exists(word2vec_output_file): glove2word2vec(glove_input_file, word2vec_output_file) %%time from gensim.models import KeyedVectors filename = word2vec_output_file embed = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False) assert embed['awesome'] is not None'awesome', this works! Use Case: Food Order Bot Do word vectors even work for this?cuisine_refs = ["mexican", "thai", "british", "american", "italian"] sample_sentence = "I’m looking for a cheap Indian or Chinese place in Indiranagar" tokens = sample_sentence.split() tokens = [x.lower().strip() for x in tokens] threshold = 18.3 found = [] for term in tokens: if term in embed.vocab: scores = [] for C in cuisine_refs: scores.append(np.dot(embed[C], embed[term].T)) # hint replace above above np.dot with: # scores.append(embed.cosine_similarities(, )) mean_score = np.mean(scores) print(f"{term}: {mean_score}") if mean_score > threshold: found.append(term) print(found)looking: 7.448504447937012 for: 10.627421379089355 a: 11.809560775756836 cheap: 7.09670877456665 indian: 18.64516258239746 or: 9.692893981933594 chinese: 19.09498405456543 place: 7.651237487792969 in: 10.085711479187012 ['indian', 'chinese']Next Stop: Classifying user intentdef sum_vecs(embed,text): tokens = text.split(' ') vec = np.zeros(embed.vector_size) for idx, term in enumerate(tokens): if term in embed.vocab: vec = vec + embed[term] return vec sentence_vector = sum_vecs(embed, sample_sentence) print(sentence_vector.shape) data={ "greet": { "examples" : ["hello","hey there","howdy","hello","hi","hey","hey ho"], "centroid" : None }, "inform": { "examples" : [ "i'd like something asian", "maybe korean", "what mexican options do i have", "what italian options do i have", "i want korean food", "i want german food", "i want vegetarian food", "i would like chinese food", "i would like indian food", "what japanese options do i have", "korean please", "what about indian", "i want some chicken", "maybe thai", "i'd like something vegetarian", "show me french restaurants", "show me a cool malaysian spot", "where can I get some spicy food" ], "centroid" : None }, "deny": { "examples" : [ "nah", "any other places ?", "anything else", "no thanks" "not that one", "i do not like that place", "something else please", "no please show other options" ], "centroid" : None }, "affirm":{ "examples":[ "yeah", "that works", "good, thanks", "this works", "sounds good", "thanks, this is perfect", "just what I wanted" ], "centroid": None } } def get_centroid(embed, examples): C = np.zeros((len(examples),embed.vector_size)) for idx, text in enumerate(examples): C[idx,:] = sum_vecs(embed,text) centroid = np.mean(C,axis=0) assert centroid.shape[0] == embed.vector_size return centroid # Adding Centroid to data dictionary for label in data.keys(): data[label]["centroid"] = get_centroid(embed,data[label]["examples"]) for label in data.keys(): print(f"{label}: {data[label]['examples']}") def get_intent(embed,data, text): intents = list(data.keys()) vec = sum_vecs(embed,text) scores = np.array([ np.linalg.norm(vec-data[label]["centroid"]) for label in intents]) return intents[np.argmin(scores)] for text in ["hey ","i am looking for chinese food","not for me", "ok, this is good"]: print(f"text : '{text}', predicted_label : '{get_intent(embed, data, text)}'")text : 'hey ', predicted_label : 'greet' text : 'i am looking for chinese food', predicted_label : 'inform' text : 'not for me', predicted_label : 'deny' text : 'ok, this is good', predicted_label : 'affirm'Bot Responsestemplates = { "utter_greet": ["hey there!", "Hey! How you doin'? "], "utter_options": ["ok, let me check some more"], "utter_goodbye": ["Great, I'll go now. Bye bye", "bye bye", "Goodbye!"], "utter_default": ["Sorry, I didn't quite follow"], "utter_confirm": ["Got it", "Gotcha", "Your order is confirmed now"] } response_map = { "greet": "utter_greet", "affirm": "utter_goodbye", "deny": "utter_options", "inform": "utter_confirm", "default": "utter_default", } import random def get_bot_response(bot_response_map, bot_templates, intent): if intent not in list(response_map): intent = "default" select_template = bot_response_map[intent] templates = bot_templates[select_template] return random.choice(templates) user_intent = get_intent(embed, data, "i want indian food") get_bot_response(response_map, templates, user_intent)**Better Response Personalisation?**:for text in ["hey","i am looking for italian food","not for me", "ok, this is good"]: user_intent = get_intent(embed, data, text) bot_reply = get_bot_response(response_map, templates, user_intent) print(f"text : '{text}', intent: {user_intent}, bot: {bot_reply}")text : 'hey', intent: greet, bot: Hey! How you doin'? text : 'i am looking for italian food', intent: inform, bot: Gotcha text : 'not for me', intent: deny, bot: ok, let me check some more text : 'ok, this is good', intent: affirm, bot: Goodbye!Lambda School Data Science*Unit 4, Sprint 3, Module 3*--- Autoencoders> An autoencoder is a type of artificial neural network used to learn efficient data codings in an unsupervised manner.[1][2] The aim of an autoencoder is to learn a representation (encoding) for a set of data, typically for dimensionality reduction, by training the network to ignore signal “noise”. Along with the reduction side, a reconstructing side is learnt, where the autoencoder tries to generate from the reduced encoding a representation as close as possible to its original input, hence its name. Learning Objectives*At the end of the lecture you should be to*:* Part 1: Describe the componenets of an autoencoder* Part 2: Train an autoencoder* Part 3: Apply an autoenocder to a basic information retrieval problem__Problem:__ Is it possible to automatically represent an image as a fixed-sized vector even if it isn’t labeled?__Solution:__ Use an autoencoderWhy do we need to represent an image as a fixed-sized vector do you ask? * __Information Retrieval__ - [Reverse Image Search](https://en.wikipedia.org/wiki/Reverse_image_search) - [Recommendation Systems - Content Based Filtering](https://en.wikipedia.org/wiki/Recommender_systemContent-based_filtering)* __Dimensionality Reduction__ - [Feature Extraction](https://www.kaggle.com/c/vsb-power-line-fault-detection/discussion/78285) - [Manifold Learning](https://en.wikipedia.org/wiki/Nonlinear_dimensionality_reduction)We've already seen *representation learning* when we talked about word embedding modelings during our NLP week. Today we're going to achieve a similiar goal on images using *autoencoders*. An autoencoder is a neural network that is trained to attempt to copy its input to its output. Usually they are restricted in ways that allow them to copy only approximately. The model often learns useful properties of the data, because it is forced to prioritize which aspecs of the input should be copied. The properties of autoencoders have made them an important part of modern generative modeling approaches. Consider autoencoders a special case of feed-forward networks (the kind we've been studying); backpropagation and gradient descent still work. Autoencoder Architecture (Learn) OverviewThe *encoder* compresses the input data and the *decoder* does the reverse to produce the uncompressed version of the data to create a reconstruction of the input as accurately as possible:The learning process gis described simply as minimizing a loss function: $ L(x, g(f(x))) $- $L$ is a loss function penalizing $g(f(x))$ for being dissimiliar from $x$ (such as mean squared error)- $f$ is the encoder function- $g$ is the decoder function Follow Along Extremely Simple Autoencoderfrom tensorflow.keras.layers import Input, Dense from tensorflow.keras.models import Model # import wandb # from wandb.keras import WandbCallback # this is the size of our encoded representations encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats # this is our input placeholder input_img = Input(shape=(784,)) # "encoded" is the encoded representation of the input encoded = Dense(encoding_dim, activation='sigmoid')(input_img) # "decoded" is the lossy reconstruction of the input decoded = Dense(784, activation = 'sigmoid')(encoded) # this model maps an input to its reconstruction autoencoder = Model(input_img, decoded) # this model maps an input to its encoded representation encoder = Model(input_img, encoded) # create a placeholder for an encoded (32-dimensional) input # retrieve the last layer of the autoencoder model # create the decoder model autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') from tensorflow.keras.datasets import mnist import numpy as np (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) print(x_train.shape) print(x_test.shape) #wandb.init(project="mnist_autoencoder", entity="ds5") autoencoder.fit(x_train, x_train, epochs=10, batch_size=256, shuffle=True, validation_data=(x_test, x_test), verbose = True) # can stop running/training of model at any point and training will be preserved # encode and decode some digits # note that we take them from the *test* set # visualize the results #encoded_images = encoder.predict(x_test) decoded_imgs = autoencoder.predict(x_test) # use Matplotlib (don't ask) import matplotlib.pyplot as plt n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # poor results; highly dependent on training timeChallengeExpected to talk about the components of autoencoder and their purpose. Train an Autoencoder (Learn) OverviewAs long as our architecture maintains an hourglass shape, we can continue to add layers and create a deeper network. Follow Along Deep Autoencoderinput_img = Input(shape=(784,)) # first layer of the neural network encoded = Dense(128, activation= 'relu')(input_img) # input_img - the data getting pushed to the next layer encoded = Dense(64, activation= 'relu')(encoded) encoded = Dense(32, activation= 'relu')(encoded) # fully dehydrated layer decoded = Dense(64, activation= 'relu')(encoded) decoded = Dense(128, activation= 'relu')(decoded) decoded = Dense(784, activation= 'sigmoid')(decoded) # compile & fit model autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') autoencoder.fit(x_train, x_train, epochs=20, batch_size=784, shuffle=True, validation_data=(x_test,x_test), verbose= True) decoded_imgs = autoencoder.predict(x_test) # use Matplotlib (don't ask) import matplotlib.pyplot as plt n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()Convolutional autoencoder> Since our inputs are images, it makes sense to use convolutional neural networks (convnets) as encoders and decoders. In practical settings, autoencoders applied to images are always convolutional autoencoders --they simply perform much better.> Let's implement one. The encoder will consist in a stack of Conv2D and MaxPooling2D layers (max pooling being used for spatial down-sampling), while the decoder will consist in a stack of Conv2D and UpSampling2D layers.# Working with upsampling example from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.models import Model from keras import backend as K # Create Model # Create Model input_img = Input(shape=(28,28,1)) x = Conv2D(16,(3,3), activation='relu', padding='same')(input_img) x = MaxPooling2D((2,2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) encoded = MaxPooling2D((2, 2), padding='same')(x) # at this point the representation is (4, 4, 8) i.e. 128-dimensional representation x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded) x = UpSampling2D((2, 2))(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(16, (3, 3), activation='relu')(x) x = UpSampling2D((2, 2))(x) decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x) autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') autoencoder.summary() from keras.datasets import mnist import numpy as np (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format #wandb.init(project="mnist_autoencoder", entity="ds5") autoencoder.fit(x_train, x_train, epochs=10, batch_size=784, shuffle=True, validation_data=(x_test, x_test), verbose=True) decoded_imgs = autoencoder.predict(x_test) n = 10 plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()Visualization of the Representationsencoder = Model(input_img, encoded) encoder.predict(x_train) n = 10 plt.figure(figsize=(20, 8)) for i in range(n): ax = plt.subplot(1, n, i) plt.imshow(encoded_imgs[i].reshape(4, 4 * 8).T) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()ChallengeYou will train an autoencoder at some point in the near future. Information Retrieval with Autoencoders (Learn) OverviewA common usecase for autoencoders is for reverse image search. Let's try to draw an image and see what's most similiar in our dataset. To accomplish this we will need to slice our autoendoer in half to extract our reduced features. :) Follow Alongencoder = Model(input_img, encoded) encoded_imgs = encoder.predict(x_train) encoded_imgs[0].reshape((128,)) #shape before reshape: 4,4,8 from sklearn.neighbors import NearestNeighbors nn = NearestNeighbors(n_neighbors=10, algorithm='ball_tree') nn.fit(encoded_imgs) nn.kneighbors(...)Bubble SortBubble sort is one of the simplest sorting algorihtms to understand, however it is also one of the most inefficient. In the worst case the time complexity is O(n²)import random def bubblesort(input_array): length = len(input_array) for i in range(length): for j in range(length - 1): if input_array[j] > input_array[j + 1]: input_array[j], input_array[j + 1] = input_array[j + 1], input_array[j] return input_array bubblesort([random.random() for i in range(10)])Show magic command%lsmagicExtract feature vectors from text corpus using universal-sentence-encoder-multilingualAuthor: !pip3 install tensorflow_text>=2.0.0rc0 import tensorflow_hub as hub import numpy as np import tensorflow_text import pandas as pd import altair as alt import seaborn as sns import matplotlib.pyplot as plt # The 16-language multilingual module is the default but feel free # to pick others from the list and compare the results. module_url = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3' #@param ['https://tfhub.dev/google/universal-sentence-encoder-multilingual/3', 'https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3'] model = hub.load(module_url) data = pd.read_csv("/content/export1305.csv") titles = data["titel"].fillna("") titles_embeds = model(titles) titles_embeds np.inner(titles_embeds, titles_embeds) def arr_to_csv(arr, filename, mult=10000): dataframe = (pd.DataFrame(arr) * mult).round(0).astype('int32') dataframe['id'] = data['id'] dataframe.to_csv(filename) return dataframe dataframe = arr_to_csv(titles_embeds, "use.csv") dataframe import umap def run_umap(data): umap_op = umap.UMAP(n_components=2) data_umap = umap_op.fit_transform(data) data_umap_df = pd.DataFrame(data=data_umap, columns=['x', 'y']) return data_umap_df umap_title = run_umap(titles_embeds) alt.data_transformers.disable_max_rows() alt.Chart(pd.concat([titles,umap_title], axis=1)).encode( x='x', y='y', tooltip=['titel'] ).mark_circle(size=60).properties( width=500, height=500 ).interactive()Machine Learning with FybrikTraining data set is synthetic bank transaction data: https://www.kaggle.com/ntnu-testimon/paysim1/dataNotebook: https://www.kaggle.com/arjunjoshua/predicting-fraud-in-financial-payment-services Install Python packages!python3 -m pip install --user pandas seaborn sklearn pyarrow import sys sys.path.append('/home/jovyan/.local/lib/python3.6/site-packages')Import ML dependenciesimport sys import pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt import matplotlib.lines as mlines from mpl_toolkits.mplot3d import Axes3D import seaborn as sns from sklearn.model_selection import train_test_split, learning_curve from sklearn.metrics import average_precision_score from xgboost.sklearn import XGBClassifier from xgboost import plot_importance, to_graphvizGet Data Load Data from Fybrikimport pyarrow.flight as fl import json client = fl.connect("grpc://..svc.cluster.local:80") request = { "asset": "/.csv", "columns": ["step", "type", "amount", "nameOrig", "oldbalanceOrg", "newbalanceOrig", "nameDest", "oldbalanceDest", "newbalanceDest", "isFraud", "isFlaggedFraud"] } info: fl.FlightInfo = client.get_flight_info( fl.FlightDescriptor.for_command(json.dumps(request))) result = client.do_get(info.endpoints[0].ticket) df: pd.DataFrame = result.read_pandas() dfRead Datadf = df.rename(columns={'oldbalanceOrg':'oldBalanceOrig', 'newbalanceOrig':'newBalanceOrig', \ 'oldbalanceDest':'oldBalanceDest', 'newbalanceDest':'newBalanceDest'}) print(df.head()) # Check for missing values in DataFrame df.isnull().values.any()Exploratory Data Analysis Determine types of transactions that are fraudulentprint('\n The types of fraudulent transactions are {}'.format(\ list(df.loc[df.isFraud == 1].type.drop_duplicates().values))) # only 'CASH_OUT' # & 'TRANSFER' dfFraudTransfer = df.loc[(df.isFraud == 1) & (df.type == 'TRANSFER')] dfFraudCashout = df.loc[(df.isFraud == 1) & (df.type == 'CASH_OUT')] print ('\n The number of fraudulent TRANSFERs = {}'.\ format(len(dfFraudTransfer))) # 4097 print ('\n The number of fraudulent CASH_OUTs = {}'.\ format(len(dfFraudCashout))) # 4116Determine when isFlaggedFraud gets setprint('\nThe type of transactions in which isFlaggedFraud is set: \ {}'.format(list(df.loc[df.isFlaggedFraud == 1].type.drop_duplicates()))) # only 'TRANSFER' dfTransfer = df.loc[df.type == 'TRANSFER'] dfFlagged = df.loc[df.isFlaggedFraud == 1] dfNotFlagged = df.loc[df.isFlaggedFraud == 0] print('\nMin amount transacted when isFlaggedFraud is set= {}'\ .format(dfFlagged.amount.min())) # 353874.22 print('\nMax amount transacted in a TRANSFER where isFlaggedFraud is not set=\ {}'.format(dfTransfer.loc[dfTransfer.isFlaggedFraud == 0].amount.max())) # 9 print('\nThe number of TRANSFERs where isFlaggedFraud = 0, yet oldBalanceDest = 0 and\ newBalanceDest = 0: {}'.\ format(len(dfTransfer.loc[(dfTransfer.isFlaggedFraud == 0) & \ (dfTransfer.oldBalanceDest == 0) & (dfTransfer.newBalanceDest == 0)]))) # 4158 print('\nMin, Max of oldBalanceOrig for isFlaggedFraud = 1 TRANSFERs: {}'.\ format([round(dfFlagged.oldBalanceOrig.min()), round(dfFlagged.oldBalanceOrig.max())])) print('\nMin, Max of oldBalanceOrig for isFlaggedFraud = 0 TRANSFERs where \ oldBalanceOrig = \ newBalanceOrig: {}'.format(\ [dfTransfer.loc[(dfTransfer.isFlaggedFraud == 0) & (dfTransfer.oldBalanceOrig \ == dfTransfer.newBalanceOrig)].oldBalanceOrig.min(), \ round(dfTransfer.loc[(dfTransfer.isFlaggedFraud == 0) & (dfTransfer.oldBalanceOrig \ == dfTransfer.newBalanceOrig)].oldBalanceOrig.max())]))Conclusion: Although isFraud is always set when isFlaggedFraud is set, since isFlaggedFraud is set just 16 times in a seemingly meaningless way, we can treat this feature as insignificant and discard it in the dataset without loosing information. Data CleaningFrom the exploratory data analysis, we know that fraud only occurs in 'TRANSFER's and 'CASH_OUT's. So we assemble only the corresponding data in X for analysis.X = df.loc[(df.type == 'TRANSFER') | (df.type == 'CASH_OUT')] randomState = 5 np.random.seed(randomState) #X = X.loc[np.random.choice(X.index, 100000, replace = False)] Y = X['isFraud'] del X['isFraud'] # Eliminate columns shown to be irrelevant for analysis in the EDA and columns with redacted data X = X.drop(['nameOrig', 'nameDest', 'isFlaggedFraud'], axis = 1) # Binary-encoding of labelled data in 'type' X.loc[X.type == 'TRANSFER', 'type'] = 0 X.loc[X.type == 'CASH_OUT', 'type'] = 1 X.type = X.type.astype(int) # convert dtype('O') to dtype(int)Imputation of Latent Missing ValuesXfraud = X.loc[Y == 1] XnonFraud = X.loc[Y == 0] print('\nThe fraction of fraudulent transactions with \'oldBalanceDest\' = \ \'newBalanceDest\' = 0 although the transacted \'amount\' is non-zero is: {}'.\ format(len(Xfraud.loc[(Xfraud.oldBalanceDest == 0) & \ (Xfraud.newBalanceDest == 0) & (Xfraud.amount)]) / (1.0 * len(Xfraud)))) print('\nThe fraction of genuine transactions with \'oldBalanceDest\' = \ newBalanceDest\' = 0 although the transacted \'amount\' is non-zero is: {}'.\ format(len(XnonFraud.loc[(XnonFraud.oldBalanceDest == 0) & \ (XnonFraud.newBalanceDest == 0) & (XnonFraud.amount)]) / (1.0 * len(XnonFraud)))) #Since the destination account balances being zero is a strong indicator of fraud, we replace the value of 0 with -1 X.loc[(X.oldBalanceDest == 0) & (X.newBalanceDest == 0) & (X.amount != 0), \ ['oldBalanceDest', 'newBalanceDest']] = - 1 X.loc[(X.oldBalanceOrig == 0) & (X.newBalanceOrig == 0) & (X.amount != 0), \ ['oldBalanceOrig', 'newBalanceOrig']] = np.nanFeature EngineeringCreate 2 new features (columns) recording errors in the originating and destination accounts for each transaction. These new features turn out to be important in obtaining the best performance from the ML algorithm that we will finally use.X['errorBalanceOrig'] = X.newBalanceOrig + X.amount - X.oldBalanceOrig X['errorBalanceDest'] = X.oldBalanceDest + X.amount - X.newBalanceDestData VisualizationVisualize the differences between fraudulent and genuine transactions to confirm that an ML algorithm can make strong predictionslimit = len(X) def plotStrip(x, y, hue, figsize = (14, 9)): fig = plt.figure(figsize = figsize) colours = plt.cm.tab10(np.linspace(0, 1, 9)) with sns.axes_style('ticks'): ax = sns.stripplot(x, y, \ hue = hue, jitter = 0.4, marker = '.', \ size = 4, palette = colours) ax.set_xlabel('') ax.set_xticklabels(['genuine', 'fraudulent'], size = 16) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(2) handles, labels = ax.get_legend_handles_labels() plt.legend(handles, ['Transfer', 'Cash out'], bbox_to_anchor=(1, 1), \ loc=2, borderaxespad=0, fontsize = 16); return axDispersion Over Time This plot shows how the fraudulent and genuine transactions yield different figerprints when their dispersion is views over timeax = plotStrip(Y[:limit], X.step[:limit], X.type[:limit]) ax.set_ylabel('time [hour]', size = 16) ax.set_title('Striped vs. homogenous fingerprints of genuine and fraudulent \ transactions over time', size = 20);Dispersion Over AmountThe new `errorBalanceDest` feature is more effective at making a distinction than the original `amount` featurelimit = len(X) ax = plotStrip(Y[:limit], X.amount[:limit], X.type[:limit], figsize = (14, 9)) ax.set_ylabel('amount', size = 16) ax.set_title('Same-signed fingerprints of genuine \ and fraudulent transactions over amount', size = 18);Dispersion Over Error in Balance in Destination Accountslimit = len(X) ax = plotStrip(Y[:limit], - X.errorBalanceDest[:limit], X.type[:limit], \ figsize = (14, 9)) ax.set_ylabel('- errorBalanceDest', size = 16) ax.set_title('Opposite polarity fingerprints over the error in \ destination account balances', size = 18);Separating Out Genuine From Fraudulent TransactionsThe 3D plot below distinguishes best between fraud and non-fraud data by using both of the engineered error-based features.# Long computation in this cell (~2.5 minutes) x = 'errorBalanceDest' y = 'step' z = 'errorBalanceOrig' zOffset = 0.02 limit = len(X) sns.reset_orig() # prevent seaborn from over-riding mplot3d defaults fig = plt.figure(figsize = (10, 12)) ax = fig.add_subplot(111, projection='3d') ax.scatter(X.loc[Y == 0, x][:limit], X.loc[Y == 0, y][:limit], \ -np.log10(X.loc[Y == 0, z][:limit] + zOffset), c = 'g', marker = '.', \ s = 1, label = 'genuine') ax.scatter(X.loc[Y == 1, x][:limit], X.loc[Y == 1, y][:limit], \ -np.log10(X.loc[Y == 1, z][:limit] + zOffset), c = 'r', marker = '.', \ s = 1, label = 'fraudulent') ax.set_xlabel(x, size = 16); ax.set_ylabel(y + ' [hour]', size = 16); ax.set_zlabel('- log$_{10}$ (' + z + ')', size = 16) ax.set_title('Error-based features separate out genuine and fraudulent \ transactions', size = 20) plt.axis('tight') ax.grid(1) noFraudMarker = mlines.Line2D([], [], linewidth = 0, color='g', marker='.', markersize = 10, label='genuine') fraudMarker = mlines.Line2D([], [], linewidth = 0, color='r', marker='.', markersize = 10, label='fraudulent') plt.legend(handles = [noFraudMarker, fraudMarker], \ bbox_to_anchor = (1.20, 0.38 ), frameon = False, prop={'size': 16});Fingerprints of Genuine and Fraudulent TransactionsSmoking gun and comprehensive evidence embedded in the dataset of the difference between fraudulent and genuine transactions is obtained by examining their respective correlations in the heatmaps below.Xfraud = X.loc[Y == 1] # update Xfraud & XnonFraud with cleaned data XnonFraud = X.loc[Y == 0] correlationNonFraud = XnonFraud.loc[:, X.columns != 'step'].corr() mask = np.zeros_like(correlationNonFraud) indices = np.triu_indices_from(correlationNonFraud) mask[indices] = True grid_kws = {"width_ratios": (.9, .9, .05), "wspace": 0.2} f, (ax1, ax2, cbar_ax) = plt.subplots(1, 3, gridspec_kw=grid_kws, \ figsize = (14, 9)) cmap = sns.diverging_palette(220, 8, as_cmap=True) ax1 =sns.heatmap(correlationNonFraud, ax = ax1, vmin = -1, vmax = 1, \ cmap = cmap, square = False, linewidths = 0.5, mask = mask, cbar = False) ax1.set_xticklabels(ax1.get_xticklabels(), size = 16); ax1.set_yticklabels(ax1.get_yticklabels(), size = 16); ax1.set_title('Genuine \n transactions', size = 20) correlationFraud = Xfraud.loc[:, X.columns != 'step'].corr() ax2 = sns.heatmap(correlationFraud, vmin = -1, vmax = 1, cmap = cmap, \ ax = ax2, square = False, linewidths = 0.5, mask = mask, yticklabels = False, \ cbar_ax = cbar_ax, cbar_kws={'orientation': 'vertical', \ 'ticks': [-1, -0.5, 0, 0.5, 1]}) ax2.set_xticklabels(ax2.get_xticklabels(), size = 16); ax2.set_title('Fraudulent \n transactions', size = 20); cbar_ax.set_yticklabels(cbar_ax.get_yticklabels(), size = 14);Machine Learning to Detect Fraud in Skewed DataHaving obtained evidence from the plots above that the data now contains features that make fraudulent transactions clearly detectable, the remaining obstacle for training a robust ML model is the highly imbalanced nature of the data.print('skew = {}'.format( len(Xfraud) / float(len(X)) )) # Split the data into training and test sets in a 80:20 ratio trainX, testX, trainY, testY = train_test_split(X, Y, test_size = 0.2, \ random_state = randomState) # Long computation in this cell (~1.8 minutes) weights = (Y == 0).sum() / (1.0 * (Y == 1).sum()) clf = XGBClassifier(max_depth = 3, scale_pos_weight = weights, \ n_jobs = 4) probabilities = clf.fit(trainX, trainY).predict_proba(testX) print('AUPRC = {}'.format(average_precision_score(testY, \ probabilities[:, 1])))Important Features for the ML ModelThe figure below shows that the new feature errorBalanceOrig that we created is the most relevant feature for the model. The features are ordered based on the number of samples affected by splits on those features.fig = plt.figure(figsize = (14, 9)) ax = fig.add_subplot(111) colours = plt.cm.Set1(np.linspace(0, 1, 9)) ax = plot_importance(clf, height = 1, color = colours, grid = False, \ show_values = False, importance_type = 'cover', ax = ax); for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(2) ax.set_xlabel('importance score', size = 16); ax.set_ylabel('features', size = 16); ax.set_yticklabels(ax.get_yticklabels(), size = 12); ax.set_title('Ordering of features by importance to the model learnt', size = 20);Visualization of ML ModelThe root node in the decision tree visualized below is indeed the feature errorBalanceOrig, as would be expected from its high significance to the model.to_graphviz(clf)Bias-Variance TradeoffThe model we have learnt has a degree of bias and is slighly underfit. This is indicated by the levelling in AUPRC as the size of the training set is increased in the cross-validation curve below. The easiest way to improve the performance of the model still further is to increase the max_depth parameter of the XGBClassifier at the expense of the longer time spent learning the model. Other parameters of the classifier that can be adjusted to correct for the effect of the modest underfitting include decreasing min_child_weight and decreasing reg_lambda.# Long computation in this cell (~6 minutes) trainSizes, trainScores, crossValScores = learning_curve(\ XGBClassifier(max_depth = 3, scale_pos_weight = weights, n_jobs = 4), trainX,\ trainY, scoring = 'average_precision') trainScoresMean = np.mean(trainScores, axis=1) trainScoresStd = np.std(trainScores, axis=1) crossValScoresMean = np.mean(crossValScores, axis=1) crossValScoresStd = np.std(crossValScores, axis=1) colours = plt.cm.tab10(np.linspace(0, 1, 9)) fig = plt.figure(figsize = (14, 9)) plt.fill_between(trainSizes, trainScoresMean - trainScoresStd, trainScoresMean + trainScoresStd, alpha=0.1, color=colours[0]) plt.fill_between(trainSizes, crossValScoresMean - crossValScoresStd, crossValScoresMean + crossValScoresStd, alpha=0.1, color=colours[1]) plt.plot(trainSizes, trainScores.mean(axis = 1), 'o-', label = 'train', \ color = colours[0]) plt.plot(trainSizes, crossValScores.mean(axis = 1), 'o-', label = 'cross-val', \ color = colours[1]) ax = plt.gca() for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(2) handles, labels = ax.get_legend_handles_labels() plt.legend(handles, ['train', 'cross-val'], bbox_to_anchor=(0.8, 0.15), \ loc=2, borderaxespad=0, fontsize = 16); plt.xlabel('training set size', size = 16); plt.ylabel('AUPRC', size = 16) plt.title('Learning curves indicate slightly underfit model', size = 20);Import necessary packagesimport json import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequencesSet the hyper-parametresvocab_size = 10000 embedding_dim = 16 max_length = 100 trunc_type='post' padding_type='post' oov_tok = "" training_size = 20000Download the dataset!wget --no-check-certificate \ https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sarcasm.json \ -O /tmp/sarcasm.json--2019-09-27 17:18:03-- https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sarcasm.json Resolving storage.googleapis.com (storage.googleapis.com)... 192.168.3.11, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b Connecting to storage.googleapis.com (storage.googleapis.com)|192.168.3.11|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 5643545 (5.4M) [application/json] Saving to: ‘/tmp/sarcasm.json’ /tmp/sarcasm.json 0%[ ] 0 --.-KB/s /tmp/sarcasm.json 100%[===================>] 5.38M --.-KB/s in 0.07s 2019-09-27 17:18:03 (72.7 MB/s) - ‘/tmp/sarcasm.json’ saved [5643545/5643545]Open the json file and extract sentences and labels in the form of listswith open("/tmp/sarcasm.json", 'r') as f: datastore = json.load(f) sentences = [] labels = [] for item in datastore: sentences.append(item['headline']) labels.append(item['is_sarcastic']) training_sentences = sentences[0:training_size] testing_sentences = sentences[training_size:] training_labels = labels[0:training_size] testing_labels = labels[training_size:]Do the tokenization and pad the sequencestokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(training_sentences) word_index = tokenizer.word_index training_sequences = tokenizer.texts_to_sequences(training_sentences) training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(testing_sentences) testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)Our Keras Modelmodel = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length), tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(24, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])Print the summary of our modelmodel.summary()Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_1 (Embedding) (None, 100, 16) 160000 _________________________________________________________________ global_average_pooling1d_1 ( (None, 16) 0 _________________________________________________________________ dense_2 (Dense) (None, 24) 408 _________________________________________________________________ dense_3 (Dense) (None, 1) 25 ================================================================= Total params: 160,433 Trainable params: 160,433 Non-trainable params: 0 _________________________________________________________________Set the number of epochs and fit the modelnum_epochs = 30 history = model.fit(training_padded, training_labels, epochs=num_epochs, validation_data=(testing_padded, testing_labels), verbose=2)Train on 20000 samples, validate on 6709 samples Epoch 1/30 20000/20000 - 3s - loss: 0.6703 - acc: 0.5712 - val_loss: 0.5937 - val_acc: 0.6667 Epoch 2/30 20000/20000 - 3s - loss: 0.4293 - acc: 0.8278 - val_loss: 0.3795 - val_acc: 0.8435 Epoch 3/30 20000/20000 - 3s - loss: 0.3033 - acc: 0.8789 - val_loss: 0.3549 - val_acc: 0.8483 Epoch 4/30 20000/20000 - 3s - loss: 0.2514 - acc: 0.9033 - val_loss: 0.3436 - val_acc: 0.8565 Epoch 5/30 20000/20000 - 2s - loss: 0.2147 - acc: 0.9172 - val_loss: 0.3476 - val_acc: 0.8557 Epoch 6/30 20000/20000 - 2s - loss: 0.1879 - acc: 0.9280 - val_loss: 0.3610 - val_acc: 0.8526 Epoch 7/30 20000/20000 - 2s - loss: 0.1681 - acc: 0.9363 - val_loss: 0.3727 - val_acc: 0.8550 Epoch 8/30 20000/20000 - 2s - loss: 0.1469 - acc: 0.9474 - val_loss: 0.4311 - val_acc: 0.8375 Epoch 9/30 20000/20000 - 2s - loss: 0.1330 - acc: 0.9521 - val_loss: 0.4159 - val_acc: 0.8501 Epoch 10/30 20000/20000 - 2s - loss: 0.1191 - acc: 0.9582 - val_loss: 0.4539 - val_acc: 0.8438 Epoch 11/3[...]Plot the loss using matplotlibimport matplotlib.pyplot as plt def plot_graphs(history, string): plt.plot(history.history[string]) plt.plot(history.history['val_'+string]) plt.xlabel("Epochs") plt.ylabel(string) plt.legend([string, 'val_'+string]) plt.show() plot_graphs(history, "loss")Decoding our training sentences after tokenizing and padding themreverse_word_index = dict([(value, key) for (key, value) in word_index.items()]) def decode_sentence(text): return ' '.join([reverse_word_index.get(i, '?') for i in text]) print(decode_sentence(training_padded[0])) print(training_sentences[2]) print(labels[2])former store clerk sues over secret 'black for minority shoppers ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? mom starting to fear son's web series closest thing she will have to grandchild 1Get the number of words and also the dimensions of our embeddingse = model.layers[0] weights = e.get_weights()[0] print(weights.shape) # shape: (vocab_size, embedding_dim)(10000, 16)For Visualization on Tensorboardimport io out_v = io.open('vecs.tsv', 'w', encoding='utf-8') out_m = io.open('meta.tsv', 'w', encoding='utf-8') for word_num in range(1, vocab_size): word = reverse_word_index[word_num] embeddings = weights[word_num] out_m.write(word + "\n") out_v.write('\t'.join([str(x) for x in embeddings]) + "\n") out_v.close() out_m.close() try: from google.colab import files except ImportError: pass else: files.download('vecs.tsv') files.download('meta.tsv')Predictionsentence = ["granny starting to fear spiders in the garden might be real", "game of thrones season finale showing this sunday night"] sequences = tokenizer.texts_to_sequences(sentence) padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) print(model.predict(padded))[[0.49796104] [0.49761578]]![Noteable.ac.uk Banner](https://github.com/jstix/mr-noteable/blob/master/Banner%20image/1500x500.jfif?raw=true) Introduction to programming Legend for colors In blue, the instructions and goals are highlighted. In green, the information is highlighted. In yellow, the exercises are highlighted. In red, the error and alert messages are highlighted. Instructions Click "Run" on each cell to go through the code in each cell. This will take you through the cell and print out the results. If you wish to see all the outputs at once in the whole notebook, just click Cell and then Run All. Goal After this workshop, the student should get more familiar with the following topics: printing basic statements and commands in Jupyter Notebook performing basic arithmetic calculations in Python improving an existent model of the code These objectives are in agreement with the National 3 Scottish Curriculum for high-school students. Note: For most of the workshop, the student will be given some coding examples. In some cases, the student will have to code himself/herself. The coding part is optional, but highly recommended to approach. Explore Some printing statements... Let us begin with a very simple exercise:print("Hello!! Welcome to your first programming workshop. I am here to help you")This is an example of what a computer does: given some input , some data from the keyboard, it produces a specific output. In this case, the output is as simple as it gets. We just print out a statement, this is the main usage of print function. Do not remove the "" sign when printing statments, otherwise the code will not work. What happens if you remove the sign though? Before the end of the next workshop, we will discuss this. However, for the moment, let us do some practice. Exercise: Investigate the following code:input("Put in your name: ")What happens when you type in the above command? Discuss this in pairs if you find it more helpful. Exercise: Modify the above line to the following - "Put in your whole name" and type the name accordingly.# Type your whole name using the input function hereLet us try now another small code together: we will print, at a very basic level, a Tic-Tac-Toe board.print("x" "o" "o") print("o" "o" "x") print("x" "o" "x")We have just printed out what was supposed to be the design of a tic tac toe! Exercise: Can you print out a similar design for tic-tac-toe, but with four lines and four columns?# Try printing a 4-by-4 Tic Tac Toe boardYou can also use your creativity to create whatever you want (tree, smiley face, go for it). Let us try one more example togetherprint(" . " " o " " . " " o " " . ") print(" . " " . " " | " " . " " . ") print(" o " " . " " . " " . " " o ") print(" . " " o " " o " " o " " . ") print(" . " " . " " . " " . " " . "). o . o . . . | . . o . . . o . o o o . . . . . .This is how a smiley face can look! Of course, examples are plenty so try more to make it better! Exercise: For example, can you add to this smiley face a beard? Use this as hint if you find it too hard: add a v-character under the middle circle. Talk about it in pairs, suggest improvements to the sketch above, try drawing everything on a paper before doing it on your notebook. Question: Did I proceed well by keeping the dots? What if I remove them? First think it through your head, then do the adjustments: Note: There is no correct answer, this is up to your imagination.# Draw your new smiley face hereAnd some arithmetic operations Let us see how the computer plays with numbers as wellprint(2+3)Alright, so the computer does know how to play with numbers! Let us try with bigger onesprint(89275*4354) print(552643/324)How to raise a number to the power of another number?2**5Exercise: Compute the following operation: $ 3^{4} + \frac{21}{35} + (4 \cdot 4) $# Perform the arithmetic operation here:Notice how print is the command line for showing us what the computer has done. This is what we call output. For Jupyter Notebooks, however, you do not even need to use this command. Let us try one more command:print(3<5)So we can also do comparisons between different numbersprint(4>5)Take-away This is it for today, and well done for managing to go through the material!! After this session, you should be more familiar with how simple sentences, numbers and conditional statements can be printed in Python. Also, feel free to work more on this notebook using any commands you would like. Note: Always keep a back-up of the notebook, in case the original one is altered. For today's session, this should be enough! See you later!!print("Bye bye! :D")Convolution Neural Network# Optional to surpress warning import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.datasets import cifar10 # Set physical device to GPU physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Data Shape print('Data shape: ',x_train.shape) x_train = x_train.astype('float32') / 255.0 x_test = x_test.astype('float32') / 255.0 # Model Initialization model = keras.Sequential( [ layers.InputLayer(input_shape=(32, 32, 3)), layers.Conv2D(32, 3, padding='valid', activation='relu'), layers.MaxPool2D(pool_size=(2,2)), layers.Conv2D(64, 3, activation='relu'), layers.MaxPool2D(), layers.Conv2D(128, 3, activation='relu'), layers.Flatten(), layers.Dense(64, activation='relu'), layers.Dense(10) ] ) model.summary() model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=keras.optimizers.Adam(learning_rate=3e-4), metrics=['accuracy'] ) model.fit(x_train, y_train, batch_size=64, epochs=10, verbose=2) model.evaluate(x_test, y_test, batch_size=64, verbose=True)Epoch 1/10 782/782 - 9s - loss: 1.7068 - accuracy: 0.3774 Epoch 2/10 782/782 - 4s - loss: 1.3580 - accuracy: 0.5135 Epoch 3/10 782/782 - 4s - loss: 1.2270 - accuracy: 0.5659 Epoch 4/10 782/782 - 4s - loss: 1.1278 - accuracy: 0.6035 Epoch 5/10 782/782 - 4s - loss: 1.0488 - accuracy: 0.6328 Epoch 6/10 782/782 - 4s - loss: 0.9923 - accuracy: 0.6538 Epoch 7/10 782/782 - 4s - loss: 0.9491 - accuracy: 0.6699 Epoch 8/10 782/782 - 4s - loss: 0.9000 - accuracy: 0.6888 Epoch 9/10 782/782 - 4s - loss: 0.8611 - accuracy: 0.7020 Epoch 10/10 782/782 - 4s - loss: 0.8264 - accuracy: 0.7147 157/157 [==============================] - 1s 3ms/step - loss: 0.9054 - accuracy: 0.6859Functional APIdef my_model(): inputs = keras.layers.Input(shape=(32, 32, 3)) x = layers.Conv2D(32, 3)(inputs) x = layers.BatchNormalization()(x) x = keras.activations.relu(x) x = layers.MaxPool2D()(x) x = layers.Conv2D(64, 5, padding='same')(x) x = layers.BatchNormalization()(x) x = keras.activations.relu(x) x = layers.Conv2D(128, 3)(x) x = layers.BatchNormalization()(x) x = keras.activations.relu(x) x = layers.Flatten()(x) x = layers.Dense(64, activation='relu')(x) outputs = layers.Dense(10)(x) model = keras.Model(inputs=inputs, outputs=outputs) return model model = my_model() model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=keras.optimizers.Adam(learning_rate=3e-4), metrics=['accuracy'] ) model.fit(x_train, y_train, batch_size=64, epochs=10, verbose=2) model.evaluate(x_test, y_test, batch_size=64, verbose=True)Epoch 1/10 782/782 - 6s - loss: 1.2912 - accuracy: 0.5447 Epoch 2/10 782/782 - 5s - loss: 0.8926 - accuracy: 0.6867 Epoch 3/10 782/782 - 5s - loss: 0.7311 - accuracy: 0.7435 Epoch 4/10 782/782 - 5s - loss: 0.6168 - accuracy: 0.7836 Epoch 5/10 782/782 - 5s - loss: 0.5288 - accuracy: 0.8152 Epoch 6/10 782/782 - 5s - loss: 0.4407 - accuracy: 0.8468 Epoch 7/10 782/782 - 5s - loss: 0.3702 - accuracy: 0.8722 Epoch 8/10 782/782 - 5s - loss: 0.3001 - accuracy: 0.8983 Epoch 9/10 782/782 - 5s - loss: 0.2435 - accuracy: 0.9197 Epoch 10/10 782/782 - 5s - loss: 0.1935 - accuracy: 0.9373 157/157 [==============================] - 1s 4ms/step - loss: 0.9196 - accuracy: 0.7298Text Generationimport string import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Embedding, LSTM, Dense, BidirectionalHelping Functionsdef create_lyrics_corpus(dataset, field): # Remove all other punctuation dataset[field] = dataset[field].str.replace('[{}]'.format(string.punctuation), '') # Make it lowercase dataset[field] = dataset[field].str.lower() # Make it one long string to split by line lyrics = dataset[field].str.cat() corpus = lyrics.split('\n') # Remove any trailing whitespace for l in range(len(corpus)): corpus[l] = corpus[l].rstrip() # Remove any empty lines corpus = [l for l in corpus if l != ''] return corpus def tokenize_corpus(corpus, num_words=-1): # Fit a Tokenizer on the corpus if num_words > -1: tokenizer = Tokenizer(num_words=num_words) else: tokenizer = Tokenizer() tokenizer.fit_on_texts(corpus) return tokenizerStep 1 : Get the Corpus# Read the dataset from csv - just first 10 songs for now path = tf.keras.utils.get_file('songdata.csv', 'https://drive.google.com/uc?id=1LiJFZd41ofrWoBtW-pMYsfz1w8Ny0Bj8') print (path) dataset = pd.read_csv(path, dtype=str)[:10] corpus = create_lyrics_corpus(dataset, 'text')Step 2 : Tokenize the Corpus# Tokenize the corpus tokenizer = tokenize_corpus(corpus) total_words = len(tokenizer.word_index) + 19 # why 19? #print(tokenizer.word_index) print(total_words) dataset.head()Step 3 : Create n-Gramsequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i+1] sequences.append(n_gram_sequence)Step 4 : Pad sequences# Pad sequences for equal input length max_sequence_len = max([len(seq) for seq in sequences]) sequences = np.array(pad_sequences(sequences, maxlen=max_sequence_len, padding='pre'))Step 5 : X and y - Values# Split sequences between the "input" sequence and "output" predicted word X = sequences[:,:-1] y_label = sequences[:,-1] # One-hot encode the labels y = tf.keras.utils.to_categorical(y_label, num_classes = total_words)Explore and Trace# Check out how some of our data is being stored # The Tokenizer has just a single index per word print(tokenizer.word_index['know']) print(tokenizer.word_index['feeling']) # Input sequences will have multiple indexes print(X[5]) print(X[6]) # And the one hot labels will be as long as the full spread of tokenized words print(y[5]) print(y[6])32 97 [ 0 0 0 0 0 0 0 0 0 0 0 0 0 81 82 142 197 29 4] [ 0 0 0 0 0 0 0 0 0 0 0 0 81 82 142 197 29 4 287] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.[...]Step 6 : Create Modelmodel = Sequential() model.add(Embedding(total_words, 64, input_length=max_sequence_len-1)) model.add(Bidirectional(LSTM(20))) model.add(Dense(total_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() history = model.fit(X, y, epochs=200, verbose=1) def plot_graphs(history, string): plt.plot(history.history[string]) plt.xlabel("Epochs") plt.ylabel(string) plt.show() plot_graphs(history, 'accuracy')Step 7 : Generate Textseed_text = "im feeling chills" next_words = 100 for _ in range(next_words): token_list = tokenizer.texts_to_sequences([seed_text])[0] token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre') predicted = np.argmax(model.predict(token_list), axis=-1) output_word = "" for word, index in tokenizer.word_index.items(): if index == predicted: output_word = word break seed_text += " " + output_word print(seed_text)im feeling chills your music and i do what a walk in my stuff tomorrow talking walk am am am am am more more more more more more more slack seems but no blown thought thought morning thought thought morning closed would weave realized its new new found new love found true crazy wonderful plan shoulder hour am am am am more more more more more more slack more more so seems figure quiet here i realized realized realized its blue think do crazy wonderful didnt do may just advice dreamed realized dreamed chiquitita true shoulder new to dont thought would realized do**Bag of Words (BOW) and Tf-idf unstemmed**def transform_BOW(training, testing, column_name): vect = CountVectorizer(max_features=100, ngram_range=(1,3), stop_words=ENGLISH_STOP_WORDS) vectFit = vect.fit(training[column_name]) BOW_training = vectFit.transform(training[column_name]) BOW_training_df = pd.DataFrame(BOW_training.toarray(), columns=vect.get_feature_names()) BOW_testing = vectFit.transform(testing[column_name]) BOW_testing_Df = pd.DataFrame(BOW_testing.toarray(), columns=vect.get_feature_names()) return vectFit, BOW_training_df, BOW_testing_Df def transform_tfidf(training, testing, column_name): Tfidf = TfidfVectorizer(ngram_range=(1,3), max_features=100, stop_words=ENGLISH_STOP_WORDS) Tfidf_fit = Tfidf.fit(training[column_name]) Tfidf_training = Tfidf_fit.transform(training[column_name]) Tfidf_training_df = pd.DataFrame(Tfidf_training.toarray(), columns=Tfidf.get_feature_names()) Tfidf_testing = Tfidf_fit.transform(testing[column_name]) Tfidf_testing_df = pd.DataFrame(Tfidf_testing.toarray(), columns=Tfidf.get_feature_names()) return Tfidf_fit, Tfidf_training_df, Tfidf_testing_df from sklearn import svm from sklearn.metrics import classification_report from sklearn.calibration import CalibratedClassifierCV # def build_model(X_train, y_train, X_test, y_test, name_of_test): # # log_reg = LogisticRegression(C=30, max_iter=200).fit(X_train, y_train) # linear_svm = svm.SVC(kernel='linear',C=1.0,probability=True) # linear_svm.fit(X_train, y_train) # # linearn_svm = LinearSVC(C=1) # y_pred = linear_svm.predict(X_test) # print('Training accuracy of '+name_of_test+': ', linear_svm.score(X_train, y_train)) # print('Testing accuracy of '+name_of_test+': ', linear_svm.score(X_test, y_test)) # print(classification_report(y_test, y_pred)) # return linear_svm def build_model(X_train, y_train, X_test, y_test, name_of_test): # log_reg = LogisticRegression(C=30, max_iter=200).fit(X_train, y_train) lin_svm = LinearSVC(C=1) linear_svm = CalibratedClassifierCV(lin_svm) linear_svm.fit(X_train, y_train) # linearn_svm = LinearSVC(C=1) y_pred = linear_svm.predict(X_test) print('Training accuracy of '+name_of_test+': ', linear_svm.score(X_train, y_train)) print('Testing accuracy of '+name_of_test+': ', linear_svm.score(X_test, y_test)) print(classification_report(y_test, y_pred)) return linear_svm**loading training and testing dataset**df_train = load_data('train') df_test = load_data('test') df_trainUsing custom data configuration default Reusing dataset rotten_tomatoes_movie_review (/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46)Bag of Words(BOW)# Create unstemmed BOW features for training set unstemmed_BOW_vect_fit, df_train_bow_unstem, df_test_bow_unstem = transform_BOW(df_train, df_test, 'Review')/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function get_feature_names is deprecated; get_feature_names is deprecated in 1.0 and will be removed in 1.2. Please use get_feature_names_out instead. warnings.warn(msg, category=FutureWarning)TF-idfunstemmed_tfidf_vect_fit, df_train_tfidf_unstem, df_test_tfidf_unstem = transform_tfidf(df_train, df_test, 'Review')/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function get_feature_names is deprecated; get_feature_names is deprecated in 1.0 and will be removed in 1.2. Please use get_feature_names_out instead. warnings.warn(msg, category=FutureWarning) /usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function get_feature_names is deprecated; get_feature_names is deprecated in 1.0 and will be removed in 1.2. Please use get_feature_names_out instead. warnings.warn(msg, category=FutureWarning)Training linear svm model on BOW(unstemmed) features and tf-idf(unstemmed) featuresbow_unstemmed = build_model(df_train_bow_unstem, df_train['Sentiment'], df_test_bow_unstem, df_test['Sentiment'], 'BOW Unstemmed') tfidf_unstemmed = build_model(df_train_tfidf_unstem, df_train['Sentiment'], df_test_tfidf_unstem, df_test['Sentiment'], 'TFIDF Unstemmed')Training accuracy of TFIDF Unstemmed: 0.6213364595545134 Testing accuracy of TFIDF Unstemmed: 0.6078799249530957 precision recall f1-score support 0 0.60 0.67 0.63 533 1 0.62 0.54 0.58 533 accuracy 0.61 1066 macro avg 0.61 0.61 0.61 1066 weighted avg 0.61 0.61 0.61 1066Installing textattack toolbox!pip install textattack !pip install tensorflow-textRequirement already satisfied: textattack in /usr/local/lib/python3.7/dist-packages (0.3.4) Requirement already satisfied: more-itertools in /usr/local/lib/python3.7/dist-packages (from textattack) (8.8.0) Requirement already satisfied: lemminflect in /usr/local/lib/python3.7/dist-packages (from textattack) (0.2.2) Requirement already satisfied: torch!=1.8,>=1.7.0 in /usr/local/lib/python3.7/dist-packages (from textattack) (1.10.0+cu111) Requirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from textattack) (1.4.1) Requirement already satisfied: num2words in /usr/local/lib/python3.7/dist-packages (from textattack) (0.5.10) Requirement already satisfied: language-tool-python in /usr/local/lib/python3.7/dist-packages (from textattack) (2.6.2) Requirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from textattack) (3.4.0) Requirement already satisfied: bert-score>=0.3.5 in /usr/local/lib/python3.7/dist-packages (from textattack)[...]Performing Textfooler attackimport textattack from textattack.models.wrappers import SklearnModelWrapper from textattack.datasets import HuggingFaceDataset from textattack.attack_recipes import TextFoolerJin2019 from textattack import Attacker**On Unstemmed BOW features**model_wrapper = SklearnModelWrapper(bow_unstemmed, unstemmed_BOW_vect_fit) dataset = HuggingFaceDataset("rotten_tomatoes", None, "train") attack = TextFoolerJin2019.build(model_wrapper) attacker = Attacker(attack, dataset) attacker.attack_dataset()Attack( (search_method): GreedyWordSwapWIR( (wir_method): delete ) (goal_function): UntargetedClassification (transformation): WordSwapEmbedding( (max_candidates): 50 (embedding): WordEmbedding ) (constraints): (0): WordEmbeddingDistance( (embedding): WordEmbedding (min_cos_sim): 0.5 (cased): False (include_unknown_words): True (compare_against_original): True ) (1): PartOfSpeech( (tagger_type): nltk (tagset): universal (allow_verb_noun_swap): True (compare_against_original): True ) (2): UniversalSentenceEncoder( (metric): angular (threshold): 0.840845057 (window_size): 15 (skip_text_shorter_than_window): True (compare_against_original): False ) (3): RepeatModification (4): StopwordModification (5): InputColumnModification( (matching_column_labels): ['premise', 'hypothesis'] [...]**On Unstemmed tf-idf features**from textattack.models.wrappers import SklearnModelWrapper model_wrapper = SklearnModelWrapper(tfidf_unstemmed, unstemmed_tfidf_vect_fit) dataset = HuggingFaceDataset("rotten_tomatoes", None, "train") attack = TextFoolerJin2019.build(model_wrapper) attacker = Attacker(attack, dataset) attacker.attack_dataset()listlst=['ashish',1,2,"ram",2,[3,4,5]] lst lst.append(32) lst lst.count(2) lst.index('ram') lst.insert(0,121) lst lst.reverse() lstDictionarydct={'name':'ashish',"age":20,'number':993239123, 'email':'' } dct dct.get('email') dct.pop('number') dct dct.items() dct.values() dct1={'college':'xyz','branch':'cse'} dct.update(dct1) dctsetsst={'ashu','ram',1,2,3,1,3,2,4,'ram'} st st.add('india') st st1={1,3,5,'india'} st.difference(st1) st.discard('ram') st st.intersection(st1) st.union(st1)tuppletup=(1,2,3,4,[2,3,4],'hello','india') tup tup.count(2) tup.index([2,3,4]) tuple(st) len(tup) tup1=(1,3,4,5,12,24) max(tup1) min(tup1)stringstr="my country is india" str str.capitalize() str.find('is') str.islower() str.split() str.format() str.replace('india','england')Summary---- Importsimport concurrent.futures import itertools import os from pathlib import Path import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import proteinsolver import psutil import pyarrow as pa import pyarrow.parquet as pq import torch from kmbio import PDB from scipy import stats from tqdm.notebook import tqdm DEBUG = "CI" not in os.environ if DEBUG: %load_ext autoreload %autoreload 2 %matplotlib inline try: inline_rc except NameError: inline_rc = mpl.rcParams.copy() mpl.rcParams.update({"font.size": 12})ParametersUNIQUE_ID = "191f05de" # No attention # UNIQUE_ID = "0007604c" # 5-layer graph-conv with attention, batch_size=1 # UNIQUE_ID = "91fc9ab9" # 4-layer graph-conv with attention, batch_size=4 BEST_STATE_FILES = { # "191f05de": "protein_train/191f05de/e53-s1952148-d93703104.state" } NOTEBOOK_NAME = "06_global_analysis_of_protein_folding" NOTEBOOK_PATH = Path(NOTEBOOK_NAME).resolve() NOTEBOOK_PATH.mkdir(exist_ok=True) NOTEBOOK_PATH INPUT_PATH = Path(os.getenv("DATAPKG_INPUT_DIR")) INPUT_PATH DATAPKG_DATA_DIR = Path(f"~/datapkg_data_dir").expanduser().resolve() DATAPKG_DATA_DIR device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device proteinsolver.settings.data_url = DATAPKG_DATA_DIR.as_posix() proteinsolver.settings.data_urlLoad data!ls {INPUT_PATH}/global_analysis_of_protein_folding !ls {INPUT_PATH}/global_analysis_of_protein_folding/aan0693_designed-PDB-files !ls {INPUT_PATH}/global_analysis_of_protein_folding/aan0693_SI_datasetsaan0693_SI_datasets!ls {INPUT_PATH}/global_analysis_of_protein_folding/aan0693_SI_datasets/stability_scores def remove_controls(df): df = df[ (~df["name"].str.endswith("_hp")) & (~df["name"].str.endswith("_random")) & (~df["name"].str.endswith("_buryD")) ] return df def load_stability_scores(key): stability_scores = pd.read_csv( INPUT_PATH / "global_analysis_of_protein_folding" / "aan0693_SI_datasets" / "stability_scores" / f"{key}_stability_scores", sep="\t", ) stability_scores = remove_controls(stability_scores) for energy_function in ["talaris2013", "betanov15"]: rosetta_energies_file = ( INPUT_PATH / "global_analysis_of_protein_folding" / "aan0693_SI_datasets" / "design_structural_metrics" / f"{key}_relax_scored_{'filtered_' if energy_function == 'betanov15' else ''}{energy_function}.sc" ) if not rosetta_energies_file.is_file(): print(f"Not loading Rosetta energies for {energy_function}!") continue before_ = len(stability_scores) relax_scored_filtered = pd.read_csv( rosetta_energies_file, sep="\t" if energy_function == "betanov15" else " +", engine="python" ).rename(columns={"description": "name", "total_score": f"{energy_function}_score"}) stability_scores = stability_scores.merge( relax_scored_filtered[["name", f"{energy_function}_score"]], on="name", how="outer" ) # assert len(stability_scores) == before_, (len(stability_scores), before_) stability_scores["library_name"] = key return stability_scoresstability_scores# stability_scores = {} # for key in ["rd1", "rd2", "rd3", "rd4", "ssm2"]: # stability_scores[key] = load_stability_scores(key) # stability_scores["fig1"] = pd.read_csv( # INPUT_PATH / "global_analysis_of_protein_folding" / "aan0693_SI_datasets" / "fig1_thermodynamic_data.csv" # ).assign(library_name="fig1") stability_scores = torch.load(NOTEBOOK_PATH.joinpath("stability_scores.torch"))Load model%run protein_train/{UNIQUE_ID}/model.py batch_size = 1 num_features = 20 adj_input_size = 2 hidden_size = 128 frac_present = 0.5 frac_present_valid = frac_present info_size= 1024 state_file = BEST_STATE_FILES[UNIQUE_ID] state_file net = Net( x_input_size=num_features + 1, adj_input_size=adj_input_size, hidden_size=hidden_size, output_size=num_features ) net.load_state_dict(torch.load(state_file, map_location=device)) net.eval() net = net.to(device)Mutation probabilities Test networkNOTEBOOK_PATH.parents[2] dataset = [] for structure_id in ["5vli02", "1n5uA03", "4z8jA00", "4unuA00", "4beuA02"]: structure_file = Path( os.getenv( "STRUCTURE_FILE", NOTEBOOK_PATH.parent.parent / "proteinsolver" / "data" / "inputs" / f"{structure_id}.pdb", ) ).resolve() structure = PDB.load(structure_file) pdata = proteinsolver.utils.extract_seq_and_adj(structure, list(structure[0])[0].id) data = proteinsolver.datasets.protein.row_to_data(pdata) data = proteinsolver.datasets.protein.transform_edge_attr(data) dataset.append(data) start_time = time.perf_counter() for data in tqdm(dataset): data = data.to(device) out = net(data.x, data.edge_index, data.edge_attr) data.x[0] = 0 out = net(data.x, data.edge_index, data.edge_attr) print(f"Elapsed time: {time.perf_counter() - start_time}.") start_time = time.perf_counter() for data in tqdm(dataset): data = data.to(device) proteinsolver.utils.scan_with_mask(net, data.x, data.edge_index, data.edge_attr, 20) print(f"Elapsed time: {time.perf_counter() - start_time}.")※ このNotebookでは、perftool動作確認のための環境構築をを実施します。 ssh公開鍵の作成Jupyter Notebook サーバからVCノードに ssh 接続するために、ssh キーペアを作成しておきます。# 動作確認用SSHキーペア作成 !mkdir -p ~/.ssh !test -f ~/.ssh/id_rsa || ssh-keygen -t rsa -f ~/.ssh/id_rsa -N ""VCノードの作成 VCP SDK の初期化 VCP SDK を使用ために、はじめにアクセストークンを使用して VCP SDKを初期化してください。 次のセルを実行すると、VCコントローラのアクセストークンの入力枠が表示されます。 アクセストークンの値を入力し、Enterキーを押すことで初期化が完了します。from getpass import getpass vcc_access_token = getpass() from common import logsetting from vcpsdk.vcpsdk import VcpSDK # VCP SDKの初期化 vcp = VcpSDK( vcc_access_token, # VC Controllerのアクセストークン ) vcp.version()········ vcplib: filename: /notebooks/notebook/vcpsdk/vcplib/occtr.py version: 20.08.0+20200831 vcpsdk: filename: /notebooks/notebook/vcpsdk/vcpsdk/vcpsdk.py version: 20.10.0+2021001 plugin: aws: 1.2+20191001 aws_disk: 1.0+20190408 aws_spot: 1.1+20191001 azure: 1.2+20191001 vmware: 1.1+20191001 azure_disk: 1.0+20190408 sakura: 1.1+20191001 sakura_disk: 1.0+20190930 oracle: 1.0+20200331 oracle_disk: 1.0+20200331 aic: 1.2+20191001 abc: 1.3+20190408 hokudai: 1.1+20191001 chameleon: 1.0+20200831 chameleon_ext: 20200831 gcp: 1.0+20190408 onpremises: 1.0+20190408 vc_controller: host: 10.0.0.1 name: vcc1045 wait_timeout_sec: 1000(default 15min) vc_controller: 20.10.1+20210101 vc_controller_git_tag: 20.10.1 plugin: vmware: 1.1+20210101 chameleon: 1.0+20210101 azure: 1.2+20210101 aws_spot: 1.1+20210101 aic: 1.2+20210101 sakura: 1.1+20210101 hokudai: 1.1+20210101 abc: 1.[...]もしエラーが発生したら...- `Exception: ('VCP', 'config vc failed')` ... 使われたアクセストークンが正しくない- `Exception: ('VCP', 'server error')` ... VCコントローラのIPアドレス設定が正しくないか、ネットワーク接続エラーが発生している VCノードの起動 次のセルを実行し、動作確認用のVCノードを1個作成します。 クラウドのインスタンスを起動してVCノード用の初期設定を行うため、実行完了まで数分程度を要します。# UnitGroupの作成 unit_group = vcp.create_ugroup( 'sinetstream01' # UnitGroupの名前 ) spec = vcp.get_spec('aws', 'small') spec.set_ssh_pubkey('/home/jovyan/.ssh/id_rsa.pub') spec.num_nodes = 2 unit = unit_group.create_unit( 'aws-testnodes', # Unit名の指定 spec )2021-03-18 14:56:22,623 - INFO - BOOTING ... 0 sec 2021-03-18 14:56:27,750 - INFO - BOOTING ... 5 sec 2021-03-18 14:56:32,873 - INFO - BOOTING ... 10 sec 2021-03-18 14:56:38,006 - INFO - BOOTING ... 15 sec 2021-03-18 14:56:43,135 - INFO - BOOTING ... 20 sec 2021-03-18 14:56:48,265 - INFO - BOOTING ... 25 sec 2021-03-18 14:56:53,394 - INFO - BOOTING ... 30 sec 2021-03-18 14:56:58,531 - INFO - BOOTING ... 35 sec 2021-03-18 14:57:03,667 - INFO - BOOTING ... 40 sec 2021-03-18 14:57:08,801 - INFO - BOOTING ... 45 sec 2021-03-18 14:57:13,930 - INFO - BOOTING ... 50 sec 2021-03-18 14:57:19,086 - INFO - BOOTING ... 55 sec 2021-03-18 14:57:24,443 - INFO - BOOTING ... 60 sec 2021-03-18 14:57:29,707 - INFO - BOOTING ... 65 sec 2021-03-18 14:57:34,902 - INFO - BOOTING ... 70 sec 2021-03-18 14:57:40,039 - INFO - BOOTING ... 75 sec 2021-03-18 14:57:45,172 - INFO - BOOTING ... 80 sec 2021-03-18 14:57:50,310 - INFO - BOOTING ... 85 sec 2021-03-18 14:57:55,440 - INFO - BOOTING ... 90 sec 2021-03-18 14:[...]もしエラーが発生したら...AWSのクレデンシャル情報が正しくない場合、以下のようなエラーが発生します。 Valut に設定した内容を確認してください。```*** HAS ERROR:---2019/06/12 05:09:13 UTC: * provider.aws: InvalidClientTokenId: The security token included in the request is invalid.---``` 起動したVCノードの確認次のセルを実行し、起動したVCノードを確認してください。unit.df_nodes()VCノードへのsshログインJupyterNotebook の Terminal 機能を用いて、作成したVCノードにsshログインすることができます。 VCノードのIPアドレスは上記の `unit.df_nodes()` の出力に含まれています。```$ ssh -i ~/.ssh/id_rsa root@{cloud_instance_address}```# unit_group.find_ip_addresses() は UnitGroup内の全VCノードのIPアドレスのリストを返します ip_addresses = unit_group.find_ip_addresses(node_state='RUNNING') client_ip = ip_addresses[0] server_ip = ip_addresses[1]まず、ログインする前に ~/.ssh/known_hosts のホストキーを更新します。!touch ~/.ssh/known_hosts # ~/.ssh/known_hosts から古いホストキーを削除する !ssh-keygen -R {client_ip} !ssh-keygen -R {server_ip} # ホストキーの登録 !ssh-keyscan -H {client_ip} >> ~/.ssh/known_hosts !ssh-keyscan -H {server_ip} >> ~/.ssh/known_hosts import os ssh_private_key = os.path.expanduser('~/.ssh/id_rsa') ssh_opts = f"-i {ssh_private_key} -l root" !ssh {ssh_opts} {client_ip} uname -a !ssh {ssh_opts} {server_ip} uname -aLinux ip-172-30-2-7 4.4.0-159-generic #187-Ubuntu SMP Thu Aug 1 16:28:06 UTC 2019 x86_64 Linux Linux ip-172-30-2-36 4.4.0-159-generic #187-Ubuntu SMP Thu Aug 1 16:28:06 UTC 2019 x86_64 LinuxBrokerを準備する!ssh {ssh_opts} {server_ip} /usr/local/bin/docker run -d --name broker --hostname broker -p 1883:1883 -p 9092:9092 sinetstream/tutorial:1.0.0 !ssh {ssh_opts} {server_ip} /usr/local/bin/docker ps -l !ssh {ssh_opts} {server_ip} /usr/local/bin/docker exec -t broker ps axCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES e4d92eb5ef85 sinetstream/tutorial:1.0.0 "/usr/local/bin/supe…" 2 minutes ago Up 2 minutes 0.0.0.0:1883->1883/tcp, 0.0.0.0:9092->9092/tcp broker PID TTY STAT TIME COMMAND 1 ? Ss 0:00 /usr/bin/python3 /usr/local/bin/supervisord -n -c /et 9 ? Sl 0:06 java -Xmx1G -Xms1G -server -XX:+UseG1GC -XX:MaxGCPaus 10 ? S 0:00 /usr/sbin/mosquitto -c /etc/mosquitto/mosquitto.conf 12 ? Sl 0:01 java -Xmx512M -Xms512M -server -XX:+UseG1GC -XX:MaxGC 717 pts/0 Rs+ 0:00 ps axpertool実行環境を準備する!ssh {ssh_opts} {client_ip} /usr/local/bin/docker run -d --name reader --hostname reader -e ENABLE_BROKER=false --add-host=broker:{server_ip} sinetstream/tutorial:1.0.0 !ssh {ssh_opts} {client_ip} /usr/local/bin/docker ps -l !ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t reader ps axCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 4e81e87190e4 sinetstream/tutorial:1.0.0 "/usr/local/bin/supe…" 3 minutes ago Up 3 minutes 1883/tcp, 9092/tcp reader PID TTY STAT TIME COMMAND 1 ? Ss 0:00 /usr/bin/python3 /usr/local/bin/supervisord -n -c /et 30 pts/0 Rs+ 0:00 ps axReader用のコンテナを起動する際に指定した --add-host は Readerコンテナの /etc/hosts に、Broker の IPアドレスを登録するためのものです。 Kafkaブローカーを利用するためにはサーバアドレスの名前解決が必要となるため、このパラメータの指定を追加しています。 Readerコンテナの /etc/hosts を表示して Broker のIPアドレスが登録されていることを確認します。!ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t reader cat /etc/hosts127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b ip6-localnet fdf8:f53e:61e4::18 ip6-mcastprefix fc00:db20:35b:7399::5 ip6-allnodes fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b ip6-allrouters 172.30.2.36 broker 172.17.0.2 readerSINETStreamをインストールする!ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader pip3 install --user sinetstream-kafka sinetstream-mqttCollecting sinetstream-kafka Downloading https://files.pythonhosted.org/packages/fd/7c/cfb05a77a711cc2ed397063e4953e2a86ec494087b8d1d6e63ba71088e5c/sinetstream_kafka-1.5.0-py3-none-any.whl Collecting sinetstream-mqtt Downloading https://files.pythonhosted.org/packages/75/f4/a94c6835181fdd9a16c42906632124a6151972d73771a5f3b6560b76dcfd/sinetstream_mqtt-1.5.0-py3-none-any.whl Collecting promise Downloading https://files.pythonhosted.org/packages/cf/9c/fb5d48abfe5d791cd496e4242ebcf87a4bb2e0c3dcd6e0ae68c11426a528/promise-2.3.tar.gz Collecting kafka-python>=2.0 [?25l Downloading https://files.pythonhosted.org/packages/75/68/dcb0db055309f680ab2931a3eeb22d865604b638acf8c914bedf4c1a0c8c/kafka_python-2.0.2-py2.py3-none-any.whl (246kB)  |################################| 256kB 12.4MB/s eta 0:00:01 [?25hCollecting sinetstream>=1.5.0 Downloading https://files.pythonhosted.org/packages/75/e9/416e68c5209237de0b844b0f260dbe529ffbf868218ec3745df4f884c8c5/sinetstream-1.5.0-py3-none-any[...]最後に Successfully installed ...と表示されていれば、ライブラリのインストールに成功しています。 確認のためインストールされている Python3 ライブラリの一覧を表示してみます。!ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader pip3 listPackage Version ----------------- ------- avro-python3 1.10.2 kafka-python 2.0.2 paho-mqtt 1.5.1 pip 19.3.1 promise 2.3 pycryptodomex 3.10.1 PyYAML 3.12 setuptools 42.0.2 sinetstream 1.5.0 sinetstream-kafka 1.5.0 sinetstream-mqtt 1.5.0 six 1.15.0 supervisor 4.1.0 WARNING: You are using pip version 19.3.1; however, version 21.0.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command.SINETStreamの動作確認 Readerのプログラムと設定ファイルを準備する!ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader mkdir -p /home/user01/sinetstream/reader ss_url='https://raw.githubusercontent.com/nii-gakunin-cloud/sinetstream/master' !ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader curl -O {ss_url}/docs/tutorial/.sinetstream_config.yml -O {ss_url}/python/sample/text/consumer.py !ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader mv .sinetstream_config.yml sinetstream/reader/ !ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader mv consumer.py sinetstream/reader/ !ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader ls -a /home/user01/sinetstream/reader. .. .sinetstream_config.yml consumer.pyWriterのプログラムと設定ファイルを準備する!ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader mkdir -p /home/user01/sinetstream/writer ss_url='https://raw.githubusercontent.com/nii-gakunin-cloud/sinetstream/master' !ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader curl -O {ss_url}/docs/tutorial/.sinetstream_config.yml -O {ss_url}/python/sample/text/producer.py !ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader mv .sinetstream_config.yml sinetstream/writer/ !ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader mv producer.py sinetstream/writer/ !ssh {ssh_opts} {client_ip} /usr/local/bin/docker exec -t -u user01 reader ls -a /home/user01/sinetstream/writer. .. .sinetstream_config.yml producer.pyhttps://www.sinetstream.net/docs/tutorial/TUTORIAL-STEP2.html 3.1. Kafkaブローカーとの間でメッセージの送受信を行う と同様に実行してみる VCノードの削除次のセルを実行し、動作確認に使用したクラウドインスタンスを削除してください。 実行完了まで数分程度を要します。unit_group.cleanup()2021-03-18 14:56:14,734 - INFO - cleanup completed. vc sinetstream01 is cleanup(no unit)Pré-processamento da base de dados census.csvA base de dados census.csv foi criada por em 1996, ele extraiu esses dados de uma *database* de 1994 do Censo. Esta *database* tem por objetivo prever se a renda anual de um adulto excederá o valor de 50 mil, ela possui um atributo classificador e 14 atributos previsores.Lista dos atributos previsores:* **age**: contínuo.* **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.* **final-weight**: contínuo.* **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.* **education-num**: contínuo.* **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.* **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.* **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.* **race**: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.* **sex**: Female, Male.* **capital-gain**: contínuo.* **capital-loss**: contínuo.* **hours-per-week**: contínuo.* **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.Para essa base de dados utilizaremos técnicas de pré-processamento um pouco diferentes das aplicadas na base de dados credit_data.csv, pois nessa base do censo já foram verificados que não existem valores inconsistentes, e por ela possuir atributos com variáveis categóricas, devemos relizar um tratamento de codificação, para que os cálculos possam ser realizados.import pandas as pd base = pd.read_csv('census.csv') previsores = base.iloc[:, 0:14].values classe = base.iloc[:, 14].valuesDescrição dos atributos numéricos da base de dados:base.describe()O próximo passo será a codificação das variáveis categóricas, para isso podemos utilizar duas classes diferentes da bilioteca **sklearn.preprocessing**, que são o **LabelEncoder** e o **OneHotEncoder**, no caso do OneHotEnconder iremos necessitar de uma classe da biblioteca **sklearn.compose** chamado **ColumnTransformer**. A escolha do codificador correto para o algoritmo de machine learnig que está sendo utilizado, é uma das chaves para obter os melhores resultados possíveis.# Importando as classes codificadoras from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.compose import ColumnTransformerLabelEncoderA codificação por meio da classe **LabelEncoder** é feita pela rotulação das variáveis categóricas em ordem alfabética, onde a váriavel na primeira posição da ordenação é substituída por 0, à segunda por 1 e assim sucessivamente. O algoritmo que obteve ótimos resultados utilizando esse tipo de codificação foi o **Naive bayes**.Estes são os dez primeiros registros antes da codifição:base.head(10) # Criando um objeto do tipo LabelEncoder labelencoder_previsores = LabelEncoder() # Codificando cada uma das colunas que contêm variáveis categóricas previsores[:, 1] = labelencoder_previsores.fit_transform(previsores[:, 1]) previsores[:, 3] = labelencoder_previsores.fit_transform(previsores[:, 3]) previsores[:, 5] = labelencoder_previsores.fit_transform(previsores[:, 5]) previsores[:, 6] = labelencoder_previsores.fit_transform(previsores[:, 6]) previsores[:, 7] = labelencoder_previsores.fit_transform(previsores[:, 7]) previsores[:, 8] = labelencoder_previsores.fit_transform(previsores[:, 8]) previsores[:, 9] = labelencoder_previsores.fit_transform(previsores[:, 9]) previsores[:, 13] = labelencoder_previsores.fit_transform(previsores[:, 13])Dez primeiros registros após a codificação:for i in range(0, 10): print(i + 1, 'º) |', end = ' ') for j in range(0, 14): print(previsores[i][j], end = ' | ') print('\n')1 º) | 39 | 7 | 77516 | 9 | 13 | 4 | 1 | 1 | 4 | 1 | 2174 | 0 | 40 | 39 | 2 º) | 50 | 6 | 83311 | 9 | 13 | 2 | 4 | 0 | 4 | 1 | 0 | 0 | 13 | 39 | 3 º) | 38 | 4 | 215646 | 11 | 9 | 0 | 6 | 1 | 4 | 1 | 0 | 0 | 40 | 39 | 4 º) | 53 | 4 | 234721 | 1 | 7 | 2 | 6 | 0 | 2 | 1 | 0 | 0 | 40 | 39 | 5 º) | 28 | 4 | 338409 | 9 | 13 | 2 | 10 | 5 | 2 | 0 | 0 | 0 | 40 | 5 | 6 º) | 37 | 4 | 284582 | 12 | 14 | 2 | 4 | 5 | 4 | 0 | 0 | 0 | 40 | 39 | 7 º) | 49 | 4 | 160187 | 6 | 5 | 3 | 8 | 1 | 2 | 0 | 0 | 0 | 16 | 23 | 8 º) | 52 | 6 | 209642 | 11 | 9 | 2 | 4 | 0 | 4 | 1 | 0 | 0 | 45 | 39 | 9 º) | 31 | 4 | 45781 | 12 | 14 | 4 | 10 | 1 | 4 | 0 | 14084 | 0 | 50 | 39 | 10 º) | 42 | 4 | 159449 | 9 | 13 | 2 | 4 | 0 | 4 | 1 | 5178 | 0 | 40 | 39 |OneHotEncoderO codificador **OneHotEncoder** funciona de uma maneira um pouco diferente, primeiro ele transforma cada categoria dos atributos em uma coluna, contendo apenas os valores 0 e 1, as colunas correspondentes a cada atributo são organizadas em ordem alfabética. Se a categoria "dona" da coluna estiver registrada em alguma das linhas essa linha será marcada com 1, se não permanecerá com 0. Em alguns textos as categorias que são transformadas em colunas são chamadas de variáveis **dummy**, que significa manequim ou modelo.Aqui temos um exemplo dessa codificação, utilizaremos os 5 primeiros registros dos atributos workclass e sex:workclass | sex--------------- | ------state-gov | malesef-emp-not-inc | maleprivate | maleprivate | maleprivate | femaleApós a codificação:self-emp-not-inc | state-gov | private | female | male:--------------: | :-------: | :-----: | :----: | :--:0 | 1 | 0 | 0 | 11 | 0 | 0 | 0 | 10 | 0 | 1 | 0 | 10 | 0 | 1 | 0 | 10 | 0 | 1 | 1 | 0Essas são as dimensões do array previsores antes da codificação:previsores.shapePara que possamos criar um objeto do tipo OneHotEncoder precisaremos utilizar uma classe chamada **ColumnTransformer**, que realiza transformações em colunas, nesse exemplo utilizaremos apenas dois parâmetros: * **transformers**: Nesse parâmetro devemos especificar o tipo da transformação e as colunas que serão alteradas.* **remainder**: Por padrão esse parâmetro é definido como **drop**, onde as variáveis categóricas são transformadas e as numéricas são descartadas. Mas utilizaremos o especificador **passthrough**, diferentemente do anterior ele preserva as variáveis numéricas e as realoca nas últimas colunas do array.# Criando o objeto do tipo OneHotEncoder e especificando os parâmetros onehotencoder = ColumnTransformer(transformers=[("OneHot", OneHotEncoder(), [1,3,5,6,7,8,9,13])], remainder='passthrough') # Codificando e transformando as variáveis categóricas de previsores previsores = onehotencoder.fit_transform(previsores).toarray()Essas são as dimensões do array previsores após a codificação:previsores.shapeAgora que o array previsores está codificado, nos resta realizar a codificação do array classe. Como ele possui apenas duas categorias, que são **50K**, precisaremos apenas do LabelEncoder para realizar a codificação binária.# Codificando o array classe labelencoder_classe = LabelEncoder() classe = labelencoder_classe.fit_transform(classe)EscalonamentoDos algoritmos em que esta base de dados foi utilizada, a maioria deles obtiveram bons resultados escalonando apenas as variáveis númericas, já que às categóricas foram codificadas com 0 e 1. Por esse motivo escalonaremos apenas as últimas seis colunas do array previsores, pois foram lá que as variáveis numéricas foram realocadas.Estes são os 10 primeiros registros das colunas 102 à 107 antes do escalonamento:for i in range(0, 10): print(i + 1, 'º) |', end = ' ') for j in range(102, 108): print(previsores[i][j], end = ' | ') print('\n')1 º) | 39.0 | 77516.0 | 13.0 | 2174.0 | 0.0 | 40.0 | 2 º) | 50.0 | 83311.0 | 13.0 | 0.0 | 0.0 | 13.0 | 3 º) | 38.0 | 215646.0 | 9.0 | 0.0 | 0.0 | 40.0 | 4 º) | 53.0 | 234721.0 | 7.0 | 0.0 | 0.0 | 40.0 | 5 º) | 28.0 | 338409.0 | 13.0 | 0.0 | 0.0 | 40.0 | 6 º) | 37.0 | 284582.0 | 14.0 | 0.0 | 0.0 | 40.0 | 7 º) | 49.0 | 160187.0 | 5.0 | 0.0 | 0.0 | 16.0 | 8 º) | 52.0 | 209642.0 | 9.0 | 0.0 | 0.0 | 45.0 | 9 º) | 31.0 | 45781.0 | 14.0 | 14084.0 | 0.0 | 50.0 | 10 º) | 42.0 | 159449.0 | 13.0 | 5178.0 | 0.0 | 40.0 |Para realizarmos o escalonamento utilizaremos o escalonador padrão do módulo sklearn.preprocessing, o **StandardScale**.# Importando o escalonador padrão from sklearn.preprocessing import StandardScaler # Criando um objeto do tipo StandardScaler scale = StandardScaler() # Escalonando as colunas 102, 103, 104, 105, 106 e 107 do array previsores previsores[:, 102:108] = scale.fit_transform(previsores[:, 102:108])Estes são os 10 primeiros registros das colunas 102 à 107 após o escalonamento:for i in range(0, 10): print(i + 1, 'º)', end = ' ') for j in range(102, 108): print(previsores[i][j], end = '|') print('\n')1 º) 0.030670557354392187|-1.0636107451560874|1.1347387637961852|0.14845289521750027|-0.2166595270325583|-0.035429446972779874| 2 º) 0.8371089803598256|-1.0087070008321513|1.1347387637961852|-0.145920483558874|-0.2166595270325583|-2.222153121346444| 3 º) -0.04264202655519267|0.24507850479478385|-0.42005962401595|-0.145920483558874|-0.2166595270325583|-0.035429446972779874| 4 º) 1.0570467320885801|0.4258013560422527|-1.1974588179220176|-0.145920483558874|-0.2166595270325583|-0.035429446972779874| 5 º) -0.7757678656510413|1.4081757204012328|1.1347387637961852|-0.145920483558874|-0.2166595270325583|-0.035429446972779874| 6 º) -0.11595461046477752|0.8982009411562786|1.523438360749219|-0.145920483558874|-0.2166595270325583|-0.035429446972779874| 7 º) 0.7637963964502408|-0.2803583815660745|-1.9748580118280852|-0.145920483558874|-0.2166595270325583|-1.979183824193815| 8 º) 0.9837341481789953|0.1881946254296566|-0.42005962401595|-0.145920483558874|-0.2166595270325583|0.36951938161493575[...]Distribution in fitted maximum velocity differenceplt.rc('font', size=16) lwidth = 2 # Line width used in plots ########################################################################### # Hard-coded entry for the bins for the histrogram plots at the end of this # function. #-------------------------------------------------------------------------- hist_range = ( -300, 300) BINS = np.linspace( hist_range[0], hist_range[1], 50) ########################################################################### ########################################################################### # Plot the v_max_diff distribution and separate the distributions into # walls, voids, and other. #-------------------------------------------------------------------------- #v_max_diff_hist = plt.figure() plt.figure(figsize=(7,5)) plt.title('$\Delta v_{max}$ distribution') plt.hist( v_max_diff_other, BINS, color='g', density=True, histtype='step', linewidth=lwidth, linestyle='--', label='Other') # p = norm.pdf(x, v_max_other_mean, v_max_other_stdev) # plt.plot(x, p, 'g--', linewidth=2) # plt.axvline( v_max_other_mean, color='green', linestyle='-', linewidth=1.5) # plt.axvline( v_max_other_mean + v_max_other_stdev, # color='green', linestyle=':', linewidth=1) # plt.axvline( v_max_other_mean - v_max_other_stdev, # color='green', linestyle=':', linewidth=1) # plt.axvline( v_max_other_mean + 2*v_max_other_stdev, # color='green', linestyle=':', linewidth=1) # plt.axvline( v_max_other_mean - 2*v_max_other_stdev, # color='green', linestyle=':', linewidth=1) # _, mean_v_max_other_ = plt.ylim() # plt.text(v_max_other_mean + v_max_other_mean/10, # mean_v_max_other_ - mean_v_max_other_/10, # 'Mean: {:.2f}'.format( v_max_other_mean)) plt.hist( v_max_diff_void, BINS, color='r', density=True, histtype='step', linewidth=lwidth, label='Void') # p = norm.pdf(x, v_max_void_mean, v_max_void_stdev) # plt.plot(x, p, 'r--', linewidth=2) # plt.axvline( v_max_void_mean, color='red', linestyle='-', linewidth=1.5) # plt.axvline( v_max_void_mean + v_max_void_stdev, # color='red', linestyle=':', linewidth=1) # plt.axvline( v_max_void_mean - v_max_void_stdev, # color='red', linestyle=':', linewidth=1) # plt.axvline( v_max_void_mean + 2*v_max_void_stdev, # color='red', linestyle=':', linewidth=1) # plt.axvline( v_max_void_mean - 2*v_max_void_stdev, # color='red', linestyle=':', linewidth=1) # _, mean_void_v_max_ = plt.ylim() # plt.text(v_max_void_mean + v_max_void_mean/10, # mean_void_v_max_ - mean_void_v_max_/10, # 'Mean: {:.2f}'.format( v_max_void_mean)) plt.hist( v_max_diff_wall, BINS, color='k', density=True, histtype='step', linewidth=lwidth, linestyle=':', label='Wall') # p = norm.pdf(x, v_max_wall_mean, v_max_wall_stdev) # plt.plot(x, p, 'k--', linewidth=2) # plt.axvline( v_max_wall_mean, color='black', linestyle='-', linewidth=1.5) # plt.axvline( v_max_wall_mean + v_max_wall_stdev, # color='black', linestyle=':', linewidth=1) # plt.axvline( v_max_wall_mean - v_max_wall_stdev, # color='black', linestyle=':', linewidth=1) # plt.axvline( v_max_wall_mean + 2*v_max_wall_stdev, # color='black', linestyle=':', linewidth=1) # plt.axvline( v_max_wall_mean - 2*v_max_wall_stdev, # color='black', linestyle=':', linewidth=1) # _, mean_wall_v_max_ = plt.ylim() # plt.text(v_max_wall_mean + v_max_wall_mean/10, # mean_wall_v_max_ - mean_wall_v_max_/10, # 'Mean: {:.2f}'.format( v_max_wall_mean)) #ax = v_max_diff_hist.add_subplot(111) plt.tick_params( axis='both', direction='in') #ax.yaxis.set_ticks_position('both') #ax.xaxis.set_ticks_position('both') plt.ylabel('Galaxy fraction') plt.xlabel('$\Delta v_{max}$ [km/s]') plt.legend() # textstr = '\n'.join(( # r'$STDEV_{wall}$: $%.2f$' % ( v_max_wall_stdev, ), # r'$STDEV_{void}$: $%.2f$' % ( v_max_void_stdev, ), # r'STDEV: $%.2f$' % ( v_max_other_stdev, ), # r'$RMS_{wall}$: $%.2f$' % ( v_max_wall_rms, ), # r'$RMS_{void}$: $%.2f$' % ( v_max_void_rms, ), # r'RMS: $%.2f$' % ( v_max_other_rms, ))) # props = dict( boxstyle='round', facecolor='cornsilk', alpha=0.6) # ax.text(0.72, 0.95, textstr, # verticalalignment='top', horizontalalignment='left', # transform=ax.transAxes, # color='black', fontsize=8, bbox=props) ''' plt.savefig( IMAGE_DIR + '/histograms/v_max_diff_hist.' + IMAGE_FORMAT, format=IMAGE_FORMAT) ''' #plt.show() #plt.close() ###########################################################################Scatter of fitted maximum velocity v. galaxy inclination angle########################################################################### # Plot the difference in the fitted v_max parameters from the positive and # negative rotation curves against the inclination angle of the galaxy # to see if there is any correlation. # # NOTE: Distributions are separated by wall, void, if the galaxy is cut off # by the edge of the footprint, or if the galaxy is not found within # the footprint. #-------------------------------------------------------------------------- #v_max_diff_vs_inclination_fig = plt.figure() plt.figure(figsize=(7,5)) plt.title('$\Delta v_{max}$ vs. Inclination angle') # plot points with errorbars # plt.errorbar( inclin_angle_wall, np.abs( v_max_diff_wall), # yerr=v_max_diff_wall_error, fmt='kv', ecolor='black') # plt.errorbar( inclin_angle_void, np.abs( v_max_diff_void), # yerr=v_max_diff_void_error, fmt='ro', ecolor='red') # plt.errorbar( inclin_angle_other, np.abs( v_max_diff_other), # yerr=v_max_diff_other_error, # fmt='go', ecolor='green', fillstyle='none') #plt.errorbar( inclination_angle, np.abs( v_max_difference), # yerr=v_max_difference_error , fmt='ko', ecolor='gray') # plot points without errorbars plt.plot( inclin_angle_other, np.abs( v_max_diff_other), 'go', markersize=3, fillstyle='none', label='Other') plt.plot( inclin_angle_wall, np.abs( v_max_diff_wall), 'kv', markersize=3, label='Wall') plt.plot( inclin_angle_void, np.abs( v_max_diff_void), 'ro', markersize=3, label='Void') #plt.plot( inclination_angle, v_max_difference, 'ko') #ax = v_max_diff_vs_inclination_fig.add_subplot(111) plt.tick_params( axis='both', direction='in') #ax.yaxis.set_ticks_position('both') #ax.xaxis.set_ticks_position('both') plt.ylabel('$\Delta v_{max}$ [km/s]') plt.ylim( ( 0, 2000)) plt.xlabel('$i$ [deg]') plt.legend() ''' plt.savefig( IMAGE_DIR + '/v_max_vs_inclination.' + IMAGE_FORMAT, format=IMAGE_FORMAT) ''' #plt.show() #plt.close() ###########################################################################Scatter of fitted maximum velocity vs. ratio of dark matter halo mass to stellar mass########################################################################### # Plot the difference in the fitted v_max parameters from the positive and # negative rotation curves against the stellar mass to dark matter mass # ratio to see if there is any correlation. # # NOTE: Distributions are separated by wall, void, if the galaxy is cut off # by the edge of the footprint, or if the galaxy is not found within # the footprint. #-------------------------------------------------------------------------- #v_max_diff_vs_mass_ratio_fig = plt.figure() plt.figure(figsize=(7,5)) plt.title("$\Delta v_{max}$ vs. $M_{DM}/M_*$") # plot points with errorbars # plt.errorbar( mass_ratio_wall, np.abs( v_max_diff_wall), # xerr=mass_ratio_wall_error, yerr=v_max_diff_wall_error, # fmt='kv', ecolor='black') # plt.errorbar( mass_ratio_void, np.abs( v_max_diff_void), # xerr=mass_ratio_void_error, yerr=v_max_diff_void_error, # fmt='ro', ecolor='red') # plt.errorbar( mass_ratio_other, np.abs( v_max_diff_other), # xerr=mass_ratio_other_error, yerr=v_max_diff_other_error, # fmt='go', ecolor='green', fillstyle='none') #plt.errorbar( mass_ratio, np.abs( v_max_difference), # xerr=mass_ratio_error, yerr=v_max_difference_error, # fmt='ko', ecolor='gray') # plot points without errorbars plt.plot( mass_ratio_other, np.abs(v_max_diff_other), 'go', markersize=3, fillstyle='none', label='Other') plt.plot( mass_ratio_wall, np.abs(v_max_diff_wall), 'kv', markersize=3, label='Wall') plt.plot( mass_ratio_void, np.abs(v_max_diff_void), 'ro', markersize=3, label='Void') #plt.plot( mass_ratio, np.abs(v_max_difference), 'ko', markersize=2) #ax = v_max_diff_vs_mass_ratio_fig.add_subplot(111) plt.tick_params( axis='both', direction='in') #ax.yaxis.set_ticks_position('both') #ax.xaxis.set_ticks_position('both') plt.ylabel('$\Delta v_{max}$ [km/s]') plt.xlabel('$M_{DM}/M_*$') plt.ylim( (0, 4000)) plt.xlim( (0, 1000)) plt.legend() ''' plt.savefig( IMAGE_DIR + '/v_max_vs_mass_ratio.' + IMAGE_FORMAT, format=IMAGE_FORMAT) ''' #plt.show() #plt.close() ###########################################################################Scatter of Chi Square versus difference in fitted maximum velocity########################################################################### # Plot the difference in the fitted v_max parameters from the positive and # negative rotation curves against the Chi Square to see if there is any # correlation. # # NOTE: Distributions are separated by wall, void, if the galaxy is cut off # by the edge of the footprint, or if the galaxy is not found within # the footprint. #-------------------------------------------------------------------------- #v_max_diff_vs_mass_ratio_fig = plt.figure() plt.figure(figsize=(7,5)) plt.title("$\Delta v_{max}$ vs. $\chi^2$") # plot points with errorbars # plt.errorbar( mass_ratio_wall, np.abs( v_max_diff_wall), # xerr=mass_ratio_wall_error, yerr=v_max_diff_wall_error, # fmt='kv', ecolor='black') # plt.errorbar( mass_ratio_void, np.abs( v_max_diff_void), # xerr=mass_ratio_void_error, yerr=v_max_diff_void_error, # fmt='ro', ecolor='red') # plt.errorbar( mass_ratio_other, np.abs( v_max_diff_other), # xerr=mass_ratio_other_error, yerr=v_max_diff_other_error, # fmt='go', ecolor='green', fillstyle='none') #plt.errorbar( mass_ratio, np.abs( v_max_difference), # xerr=mass_ratio_error, yerr=v_max_difference_error, # fmt='ko', ecolor='gray') # plot points without errorbars plt.loglog( chi2_other, v_max_diff_other, 'go', markersize=3, fillstyle='none', label='Other') plt.loglog( chi2_wall, v_max_diff_wall, 'kv', markersize=3, label='Wall') plt.loglog( chi2_void, v_max_diff_void, 'ro', markersize=3, label='Void') #plt.plot( mass_ratio, chi2_list, 'ko', markersize=2) #ax = v_max_diff_vs_mass_ratio_fig.add_subplot(111) plt.tick_params( axis='both', direction='in') #ax.yaxis.set_ticks_position('both') #ax.xaxis.set_ticks_position('both') plt.ylabel('$\Delta v_{max}$ [km/s]') plt.xlabel('$\chi^2$') #plt.ylim( (0, 4000)) #plt.xlim( (0, 1000)) plt.legend() ''' plt.savefig( IMAGE_DIR + '/v_max_vs_mass_ratio.' + IMAGE_FORMAT, format=IMAGE_FORMAT) ''' #plt.show() #plt.close() ###########################################################################(pd)= PandasIn addition to what's in Anaconda, this lecture will need the followinglibraries:!pip install --upgrade pandas-datareaderRequirement already up-to-date: pandas-datareader in /home/choldgraf/anaconda/envs/dev/lib/python3.8/site-packages (0.8.1) Requirement already satisfied, skipping upgrade: lxml in /home/choldgraf/anaconda/envs/dev/lib/python3.8/site-packages (from pandas-datareader) (4.5.0) Requirement already satisfied, skipping upgrade: pandas>=0.21 in /home/choldgraf/anaconda/envs/dev/lib/python3.8/site-packages (from pandas-datareader) (1.0.1) Requirement already satisfied, skipping upgrade: requests>=2.3.0 in /home/choldgraf/anaconda/envs/dev/lib/python3.8/site-packages (from pandas-datareader) (2.23.0) Requirement already satisfied, skipping upgrade: pytz>=2017.2 in /home/choldgraf/anaconda/envs/dev/lib/python3.8/site-packages (from pandas>=0.21->pandas-datareader) (2019.3) Requirement already satisfied, skipping upgrade: numpy>=1.13.3 in /home/choldgraf/anaconda/envs/dev/lib/python3.8/site-packages (from pandas>=0.21->pandas-datareader) (1.18.1) Requirement already satisfied, skipping upgrade: p[...]Overview[Pandas](http://pandas.pydata.org/) is a package of fast, efficient dataanalysis tools for Python.Its popularity has surged in recent years, coincident with the rise offields such as data science and machine learning.Here\'s a popularity comparison over time against STATA, SAS, and[dplyr](https://dplyr.tidyverse.org/) courtesy of Stack Overflow Trends```{figure} /_static/lecture_specific/pandas/pandas_vs_rest.png:scale: 23%```Just as [NumPy](http://www.numpy.org/) provides the basic array datatype plus core array operations, pandas1. defines fundamental structures for working with data and2. endows them with methods that facilitate operations such as - reading in data - adjusting indices - working with dates and time series - sorting, grouping, re-ordering and general data munging[^1] - dealing with missing values, etc., etc.More sophisticated statistical functionality is left to other packages,such as [statsmodels](http://www.statsmodels.org/) and[scikit-learn](http://scikit-learn.org/), which are built on top ofpandas.This lecture will provide a basic introduction to pandas.Throughout the lecture, we will assume that the following imports havetaken placeimport pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import requestsSeriesTwo important data types defined by pandas are `Series` and `DataFrame`.You can think of a `Series` as a \"column\" of data, such as acollection of observations on a single variable.A `DataFrame` is an object for storing related columns of data.Let\'s start with `Series`s = pd.Series(np.random.randn(4), name='daily returns') sHere you can imagine the indices `0, 1, 2, 3` as indexing four listedcompanies, and the values being daily returns on their shares.Pandas `Series` are built on top of NumPy arrays and support manysimilar operationss * 100 np.abs(s)But `Series` provide more than NumPy arrays.Not only do they have some additional (statistically oriented) methodss.describe()But their indices are more flexibles.index = ['AMZN', 'AAPL', 'MSFT', 'GOOG'] sViewed in this way, `Series` are like fast, efficient Pythondictionaries (with the restriction that the items in the dictionary allhave the same type---in this case, floats).In fact, you can use much of the same syntax as Python dictionariess['AMZN'] s['AMZN'] = 0 s 'AAPL' in sDataFramesWhile a `Series` is a single column of data, a `DataFrame` is severalcolumns, one for each variable.In essence, a `DataFrame` in pandas is analogous to a (highly optimized)Excel spreadsheet.Thus, it is a powerful tool for representing and analyzing data that arenaturally organized into rows and columns, often with descriptiveindexes for individual rows and individual columns.```{only} htmlLet\'s look at an example that reads data from the CSV file`pandas/data/test_pwt.csv` that can be downloaded[here](https://lectures.quantecon.org/_downloads/pandas/data/test_pwt.csv).``````{only} latexLet\'s look at an example that reads data from the CSV file`pandas/data/test_pwt.csv` and can be downloaded[here](https://lectures.quantecon.org/_downloads/pandas/data/test_pwt.csv).```Here\'s the content of `test_pwt.csv````{code-block} none"country","country isocode","year","POP","XRAT","tcgdp","cc","cg""Argentina","ARG","2000","37335.653","0.9995","295072.21869","75.716805379","5.5788042896""Australia","AUS","2000","19053.186","1.72483","541804.6521","67.759025993","6.7200975332""India","IND","2000","1006300.297","44.9416","1728144.3748","64.575551328","14.072205773""Israel","ISR","2000","6114.57","4.07733","129253.89423","64.436450847","10.266688415""Malawi","MWI","2000","11801.505","59.543808333","5026.2217836","74.707624181","11.658954494""South Africa","ZAF","2000","45064.098","6.93983","227242.36949","72.718710427","5.7265463933""United States","USA","2000","282171.957","1","9898700","72.347054303","6.0324539789""Uruguay","URY","2000","3219.793","12.099591667","25255.961693","78.978740282","5.108067988"```Supposing you have this data saved as `test_pwt.csv` in the presentworking directory (type `%pwd` in Jupyter to see what this is), it canbe read in as follows:df = pd.read_csv('https://raw.githubusercontent.com/QuantEcon/lecture-source-py/master/source/_static/lecture_specific/pandas/data/test_pwt.csv') type(df) dfWe can select particular rows using standard Python array slicingnotationdf[2:5]To select columns, we can pass a list containing the names of thedesired columns represented as stringsdf[['country', 'tcgdp']]To select both rows and columns using integers, the `iloc` attributeshould be used with the format `.iloc[rows, columns]`df.iloc[2:5, 0:4]To select rows and columns using a mixture of integers and labels, the`loc` attribute can be used in a similar waydf.loc[df.index[2:5], ['country', 'tcgdp']]Let\'s imagine that we\'re only interested in population (`POP`) andtotal GDP (`tcgdp`).One way to strip the data frame `df` down to only these variables is tooverwrite the dataframe using the selection method described abovedf = df[['country', 'POP', 'tcgdp']] dfHere the index `0, 1,..., 7` is redundant because we can use the countrynames as an index.To do this, we set the index to be the `country` variable in thedataframedf = df.set_index('country') dfLet\'s give the columns slightly better namesdf.columns = 'population', 'total GDP' dfPopulation is in thousands, let\'s revert to single unitsdf['population'] = df['population'] * 1e3 dfNext, we\'re going to add a column showing real GDP per capita,multiplying by 1,000,000 as we go because total GDP is in millionsdf['GDP percap'] = df['total GDP'] * 1e6 / df['population'] dfOne of the nice things about pandas `DataFrame` and `Series` objects isthat they have methods for plotting and visualization that work throughMatplotlib.For example, we can easily generate a bar plot of GDP per capitaax = df['GDP percap'].plot(kind='bar') ax.set_xlabel('country', fontsize=12) ax.set_ylabel('GDP per capita', fontsize=12) plt.show()At the moment the data frame is ordered alphabetically on thecountries---let\'s change it to GDP per capitadf = df.sort_values(by='GDP percap', ascending=False) dfPlotting as before now yieldsax = df['GDP percap'].plot(kind='bar') ax.set_xlabel('country', fontsize=12) ax.set_ylabel('GDP per capita', fontsize=12) plt.show()On-Line Data SourcesPython makes it straightforward to query online databasesprogrammatically.An important database for economists is[FRED](https://research.stlouisfed.org/fred2/) --- a vast collectionof time series data maintained by the St. Louis Fed.For example, suppose that we are interested in the [unemploymentrate](https://research.stlouisfed.org/fred2/series/UNRATE).Via FRED, the entire series for the US civilian unemployment rate can bedownloaded directly by entering this URL into your browser (note thatthis requires an internet connection)```{code-block} nonehttps://research.stlouisfed.org/fred2/series/UNRATE/downloaddata/UNRATE.csv```(Equivalently, click here:https://research.stlouisfed.org/fred2/series/UNRATE/downloaddata/UNRATE.csv)This request returns a CSV file, which will be handled by your defaultapplication for this class of files.Alternatively, we can access the CSV file from within a Python program.This can be done with a variety of methods.We start with a relatively low-level method and then return to pandas. Accessing Data with requestsOne option is to use[requests](https://requests.readthedocs.io/en/master/), a standardPython library for requesting data over the Internet.To begin, try the following code on your computerr = requests.get('http://research.stlouisfed.org/fred2/series/UNRATE/downloaddata/UNRATE.csv')If there\'s no error message, then the call has succeeded.If you do get an error, then there are two likely causes1. You are not connected to the Internet --- hopefully, this isn\'t the case.2. Your machine is accessing the Internet through a proxy server, and Python isn\'t aware of this.In the second case, you can either- switch to another machine- solve your proxy problem by reading [the documentation](https://requests.readthedocs.io/en/master/)Assuming that all is working, you can now proceed to use the `source`object returned by the call`requests.get('http://research.stlouisfed.org/fred2/series/UNRATE/downloaddata/UNRATE.csv')`url = 'http://research.stlouisfed.org/fred2/series/UNRATE/downloaddata/UNRATE.csv' source = requests.get(url).content.decode().split("\n") source[0] source[1] source[2]We could now write some additional code to parse this text and store itas an array.But this is unnecessary --- pandas\' `read_csv` function can handlethe task for us.We use `parse_dates=True` so that pandas recognizes our dates column,allowing for simple date filteringdata = pd.read_csv(url, index_col=0, parse_dates=True)The data has been read into a pandas DataFrame called `data` that we cannow manipulate in the usual waytype(data) data.head() # A useful method to get a quick look at a data frame pd.set_option('precision', 1) data.describe() # Your output might differ slightlyWe can also plot the unemployment rate from 2006 to 2012 as followsax = data['2006':'2012'].plot(title='US Unemployment Rate', legend=False) ax.set_xlabel('year', fontsize=12) ax.set_ylabel('%', fontsize=12) plt.show()Note that pandas offers many other file type alternatives.Pandas has [a widevariety](https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html)of top-level methods that we can use to read, excel, json, parquet orplug straight into a database server. Using pandas_datareader to Access DataThe maker of pandas has also authored a library called`pandas_datareader` that gives programmatic access to manydata sources straight from the Jupyter notebook.While some sources require an access key, many of the most important(e.g., FRED, [OECD](https://data.oecd.org/),[EUROSTAT](https://ec.europa.eu/eurostat/data/database) and the WorldBank) are free to use.For now let\'s work through one example of downloading and plotting data--- this time from the World Bank.The World Bank [collects and organizesdata](http://data.worldbank.org/indicator) on a huge range ofindicators.For example,[here\'s](http://data.worldbank.org/indicator/GC.DOD.TOTL.GD.ZS/countries)some data on government debt as a ratio to GDP.The next code example fetches the data for you and plots time series forthe US and Australiafrom pandas_datareader import wb govt_debt = wb.download(indicator='GC.DOD.TOTL.GD.ZS', country=['US', 'AU'], start=2005, end=2016).stack().unstack(0) ind = govt_debt.index.droplevel(-1) govt_debt.index = ind ax = govt_debt.plot(lw=2) ax.set_xlabel('year', fontsize=12) plt.title("Government Debt to GDP (%)") plt.show()/home/choldgraf/anaconda/envs/dev/lib/python3.8/site-packages/pandas_datareader/compat/__init__.py:7: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. from pandas.util.testing import assert_frame_equalThe[documentation](https://pandas-datareader.readthedocs.io/en/latest/index.html)provides more details on how to access various data sources. Exercises(pd_ex1)= Exercise 1With these imports:import datetime as dt from pandas_datareader import dataWrite a program to calculate the percentage price change over 2019 forthe following shares:ticker_list = {'INTC': 'Intel', 'MSFT': 'Microsoft', 'IBM': 'IBM', 'BHP': 'BHP', 'TM': 'Toyota', 'AAPL': 'Apple', 'AMZN': 'Amazon', 'BA': 'Boeing', 'QCOM': 'Qualcomm', 'KO': 'Coca-Cola', 'GOOG': 'Google', 'SNE': 'Sony', 'PTR': 'PetroChina'}Here\'s the first part of the programdef read_data(ticker_list, start=dt.datetime(2019, 1, 2), end=dt.datetime(2019, 12, 31)): """ This function reads in closing price data from Yahoo for each tick in the ticker_list. """ ticker = pd.DataFrame() for tick in ticker_list: prices = data.DataReader(tick, 'yahoo', start, end) closing_prices = prices['Close'] ticker[tick] = closing_prices return ticker ticker = read_data(ticker_list)Complete the program to plot the result as a bar graph like this one:```{glue:} pandas_share_prices```(pd_ex2)= Exercise 2Using the method `read_data` introduced in{ref}`Exercise 1 `, write a program toobtain year-on-year percentage change for the following indices:indices_list = {'^GSPC': 'S&P 500', '^IXIC': 'NASDAQ', '^DJI': 'Dow Jones', '^N225': 'Nikkei'}Complete the program to show summary statistics and plot the result as atime series graph like this one:```{glue:} pandas_indices_pctchange``` Solutions Exercise 1There are a few ways to approach this problem using Pandas to calculatethe percentage change.First, you can extract the data and perform the calculation such as:p1 = ticker.iloc[0] #Get the first set of prices as a Series p2 = ticker.iloc[-1] #Get the last set of prices as a Series price_change = (p2 - p1) / p1 * 100 price_changeAlternatively you can use an inbuilt method `pct_change` and configureit to perform the correct calculation using `periods` argument.change = ticker.pct_change(periods=len(ticker)-1, axis='rows')*100 price_change = change.iloc[-1] price_changeThen to plot the chartprice_change.sort_values(inplace=True) price_change = price_change.rename(index=ticker_list) fig, ax = plt.subplots() ax.set_xlabel('stock', fontsize=12) ax.set_ylabel('percentage change in price', fontsize=12) price_change.plot(kind='bar', ax=ax) plt.show() from myst_nb import glue glue("pandas_share_prices", fig, display=False)Exercise 2Following the work you did in {ref}`Exercise 1 `, you can query the data using `read_data` by updating thestart and end dates accordingly.indices_data = read_data( indices_list, start=dt.datetime(1928, 1, 2), end=dt.datetime(2020, 12, 31) )Then, extract the first and last set of prices per year as DataFramesand calculate the yearly returns such as:yearly_returns = pd.DataFrame() for index, name in indices_list.items(): p1 = indices_data.groupby(indices_data.index.year)[index].first() # Get the first set of returns as a DataFrame p2 = indices_data.groupby(indices_data.index.year)[index].last() # Get the last set of returns as a DataFrame returns = (p2 - p1) / p1 yearly_returns[name] = returns yearly_returnsNext, you can obtain summary statistics by using the method `describe`.yearly_returns.describe()Then, to plot the chartfig, axes = plt.subplots(2, 2) for iter_, ax in enumerate(axes.flatten()): # Flatten 2-D array to 1-D array index_name = yearly_returns.columns[iter_] # Get index name per iteration ax.plot(yearly_returns[index_name]) # Plot pct change of yearly returns per index ax.set_ylabel("percent change", fontsize = 12) ax.set_title(index_name) plt.tight_layout() from myst_nb import glue glue("pandas_indices_pctchange", fig, display=False)We will be performing all the below steps in Feature Engineering 1. Missing Values 2. Temporal Variable 3. Categorical Variable: Remove Rare Labels 4. Standardise the value of the variables to the same rangeimport pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline pd.pandas.set_option('display.max_columns',None) dataset = pd.read_csv('data/train.csv') dataset.head() dataset.shape # Always remember that there may be change of data leakage so, we need to split the data first and then perform feature # Engineering from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(dataset, dataset['SalePrice'], test_size = 0.1,random_state = 0) X_train.shape, X_test.shapeMissing Valuesfeature_nan = [feature for feature in dataset.columns if dataset[feature].isnull().sum() > 1 and dataset[feature].dtypes == 'O'] for feature in feature_nan: print(feature+ " : ",np.round(dataset[feature].isnull().mean(),4)) def replace_cat_feature(dataset,feature_nan): data = dataset.copy() data[feature_nan] = data[feature_nan].fillna('Missing') return data dataset = replace_cat_feature(dataset,feature_nan) dataset[feature_nan].isnull().sum() dataset.head() # Checking Numerical variables containing missing values numerical_with_nan = [feature for feature in dataset.columns if dataset[feature].isnull().sum()> 1 and dataset[feature].dtypes !='O' ] for feature in numerical_with_nan: print(feature,np.round(dataset[feature].isnull().mean(),4)) for feature in numerical_with_nan: median_value = dataset[feature].median() dataset[feature+'nan'] = np.where(dataset[feature].isnull,1,0) dataset[feature].fillna(median_value,inplace = True) dataset[numerical_with_nan].isnull().sum() dataset.head() # Date-Time Variable for feature in dataset[['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']]: dataset[feature] = dataset['YrSold'] - dataset[feature] dataset.head() dataset[['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']].head()Numerical Variable's# Found it using Exploratory Data Analysis num_features = [['LotFrontage','LotArea','1stFlrSF','GrLivArea','SalePrice']] for feature in num_features: dataset[feature] = np.log(dataset[feature]) dataset.head()Handling Rare Categorical Feature# We will remove categorical variables that are present less than 1% of the observations categorical_feature = [feature for feature in dataset.columns if dataset[feature].dtypes == 'O'] categorical_feature for feature in categorical_feature: temp = dataset.groupby(feature)['SalePrice'].count()/len(dataset) # getting percentage of that feature temp_df = temp[temp > 0.01].index dataset[feature] = np.where(dataset[feature].isin(temp_df),dataset[feature],'Rare_var') dataset.head(100)Feature Scalingfor feature in categorical_feature: labels_ordered=dataset.groupby([feature])['SalePrice'].mean().sort_values().index labels_ordered={k:i for i,k in enumerate(labels_ordered,0)} dataset[feature]=dataset[feature].map(labels_ordered) scaling_feature=[feature for feature in dataset.columns if feature not in ['Id','SalePrice'] ] len(scaling_feature) scaling_feature feature_scale=[feature for feature in dataset.columns if feature not in ['Id','SalePrice']] from sklearn.preprocessing import MinMaxScaler scaler=MinMaxScaler() scaler.fit(dataset[feature_scale]) scaler.transform(dataset[feature_scale]) # Transform the train and test set, and add on the Id and SalePrice variables. data = pd.concat([dataset[['Id','SalePrice']].reset_index(drop = True), pd.DataFrame(scaler.transform(dataset[feature_scale]), columns = feature_scale)], axis = 1) data.head() data.to_csv('X_train.csv', index = False) X_testNormal Distribution- Different displays of normally distributed data- Compare different samples from a normal distribution- Check for normality- Work with the cumulative distribution function (CDF)%pylab inline import scipy.stats as stats # seaborn is a package for the visualization of statistical data import seaborn as sns sns.set(style='ticks')Populating the interactive namespace from numpy and matplotlibDifferent Representations''' Different aspects of a normal distribution''' # Generate the data x = r_[-10:10:0.1] rv = stats.norm(0,1) # random variate x2 = r_[0:1:0.001] ax = subplot2grid((3,2),(0,0), colspan=2) plot(x,rv.pdf(x)) xlim([-10,10]) title('Normal Distribution - PDF') subplot(323) plot(x,rv.cdf(x)) xlim([-4,4]) title('CDF: cumulative distribution fct') subplot(324) plot(x,rv.sf(x)) xlim([-4,4]) title('SF: survival fct') subplot(325) plot(x2,rv.ppf(x2)) title('PPF') subplot(326) plot(x2,rv.isf(x2)) title('ISF') tight_layout() show()Multiple normal sample distributions'''Show multiple samples from the same distribution, and compare means.''' # Do this 25 times, and show the histograms numRows = 5 numData = 100 for ii in range(numRows): for jj in range(numRows): data = stats.norm.rvs(myMean, mySD, size=numData) subplot(numRows,numRows,numRows*ii+jj+1) hist(data) xticks([]) yticks([]) xlim(myMean-3*mySD, myMean+3*mySD) tight_layout() show() # Check out the mean of 1000 normally distributded samples numTrials = 1000; numData = 100 myMeans = ones(numTrials)*nan for ii in range(numTrials): data = stats.norm.rvs(myMean, mySD, size=numData) myMeans[ii] = mean(data) print('The standard error of the mean, with {0} samples, is {1:5.3f}'.format(numData, std(myMeans)))Normality Check'''Check if the distribution is normal.''' # Generate and show a distribution numData = 100 data = stats.norm.rvs(myMean, mySD, size=numData) hist(data) # Graphical test: if the data lie on a line, they are pretty much # normally distributed _ = stats.probplot(data, plot=plt) # The scipy "normaltest" is based on D’Agostino and Pearson’s test that # combines skew and kurtosis to produce an omnibus test of normality. _, pVal = stats.normaltest(data) # Or you can check for normality with Kolmogorov-Smirnov test: but this is only advisable for large sample numbers! #_,pVal = stats.kstest((data-np.mean(data))/np.std(data,ddof=1), 'norm') if pVal > 0.05: print('Data are probably normally distributed')Data are probably normally distributedValues from the Cumulative Distribution Function'''Calculate an empirical cumulative distribution function, compare it with the exact one, and find the exact point for a specific data value.''' # Generate normally distributed random data myMean = 5 mySD = 2 numData = 100 data = stats.norm.rvs(myMean, mySD, size=numData) # Calculate the cumulative distribution function, CDF numbins = 20 counts, bin_edges = histogram(data, bins=numbins, normed=True) cdf = cumsum(counts) cdf /= max(cdf) # compare with the exact CDF step(bin_edges[1:],cdf) plot(x, stats.norm.cdf(x, myMean, mySD),'r') # Find out the value corresponding to the x-th percentile: the # "cumulative distribution function" value = 2 myMean = 5 mySD = 2 cdf = stats.norm.cdf(value, myMean, mySD) print(('With a threshold of {0:4.2f}, you get {1}% of the data'.format(value, round(cdf*100)))) # For the percentile corresponding to a certain value: # the "inverse cumulative distribution function" value = 0.025 icdf = stats.norm.isf(value, myMean, mySD) print('To get {0}% of the data, you need a threshold of {1:4.2f}.'.format((1-value)*100, icdf))With a threshold of 2.00, you get 7.0% of the data To get 97.5% of the data, you need a threshold of 8.92.IMDb Movie Assignment You have the data for the 100 top-rated movies from the past decade along with various pieces of information about the movie, its actors, and the voters who have rated these movies online. In this assignment, you will try to find some interesting insights into these movies and their voters, using Python. Task 1: Reading the data - Subtask 1.1: Read the Movies Data.Read the movies data file provided and store it in a dataframe `movies`.# Read the csv file using 'read_csv'. Please write your dataset location here. movies = pd.read_csv('Movie+Assignment+Data.csv') # Reading Movies Assignment Dataset using pandas read_csv command movies.head() # printing top 5 movies from the DF to check if the data is loaded correctly- Subtask 1.2: Inspect the DataframeInspect the dataframe for dimensions, null-values, and summary of different numeric columns.# Check the number of rows and columns in the dataframe movies.shape #This would return the rows and columns of the movies DF - 100 Rows & 62 Columns # Check the column-wise info of the dataframe movies.info() # This would give me the column wise information on the null count and the Dtype for movies DF # Check the summary for the numeric columns movies.describe() #method for calculating numerical values of the DFTask 2: Data AnalysisNow that we have loaded the dataset and inspected it, we see that most of the data is in place. As of now, no data cleaning is required, so let's start with some data manipulation, analysis, and visualisation to get various insights about the data. - Subtask 2.1: Reduce those Digits!These numbers in the `budget` and `gross` are too big, compromising its readability. Let's convert the unit of the `budget` and `gross` columns from `$` to `million $` first.# Divide the 'gross' and 'budget' columns by 1000000 to convert '$' to 'million $' movies['Gross'] = movies['Gross'] / 1000000 #Diving the Gross column by 1000000 and appending the values to Gross movies['budget'] = movies['budget'] / 1000000 #Diving the Budget column by 1000000 and appending the values to Budget movies.head() #Checking the top 5 rows- Subtask 2.2: Let's Talk Profit! 1. Create a new column called `profit` which contains the difference of the two columns: `gross` and `budget`. 2. Sort the dataframe using the `profit` column as reference. 3. Extract the top ten profiting movies in descending order and store them in a new dataframe - `top10`. 4. Plot a scatter or a joint plot between the columns `budget` and `profit` and write a few words on what you observed. 5. Extract the movies with a negative profit and store them in a new dataframe - `neg_profit`# Create the new column named 'profit' by subtracting the 'budget' column from the 'gross' column movies['profit'] = movies['Gross'] - movies['budget'] #Profit = Gross - budget movies.head() # print top 5 records # Sort the dataframe with the 'profit' column as reference using the 'sort_values' function. Make sure to set the argument #'ascending' to 'False' movies = movies.sort_values(by='profit', ascending=False) movies.head() # Get the top 10 profitable movies by using position based indexing. Specify the rows till 10 (0-9) movies.sort_values(by='profit',ascending=False).head(10) #Sorting by profit and descending is True movies.reset_index(drop=True,inplace=True) # resetting the index column so that the sequence is maintained movies.head() #Plot profit vs budget sns.jointplot('budget' , 'profit', movies) #Joint Plot for plotting Budget vs Profit for the movies DF plt.show() #Display the graphThe dataset contains the 100 best performing movies from the year 2010 to 2016. However scatter plot tells a different story. You can notice that there are some movies with negative profit. Although good movies do incur losses, but there appear to be quite a few movie with losses. What can be the reason behind this? Lets have a closer look at this by finding the movies with negative profit.#Find the movies with negative profit neg_profit = movies[(movies['profit']<0)] #Creating a new DF neg_profit by extracting profit lesser than 0 neg_profit.reset_index(drop=True,inplace=True) #Resetting the index for neg_profit DF neg_profit**`Checkpoint 1:`** Can you spot the movie `Tangled` in the dataset? You may be aware of the movie 'Tangled'. Although its one of the highest grossing movies of all time, it has negative profit as per this result. If you cross check the gross values of this movie (link: https://www.imdb.com/title/tt0398286/), you can see that the gross in the dataset accounts only for the domestic gross and not the worldwide gross. This is true for may other movies also in the list. - Subtask 2.3: The General Audience and the CriticsYou might have noticed the column `MetaCritic` in this dataset. This is a very popular website where an average score is determined through the scores given by the top-rated critics. Second, you also have another column `IMDb_rating` which tells you the IMDb rating of a movie. This rating is determined by taking the average of hundred-thousands of ratings from the general audience. As a part of this subtask, you are required to find out the highest rated movies which have been liked by critics and audiences alike.1. Firstly you will notice that the `MetaCritic` score is on a scale of `100` whereas the `IMDb_rating` is on a scale of 10. First convert the `MetaCritic` column to a scale of 10.2. Now, to find out the movies which have been liked by both critics and audiences alike and also have a high rating overall, you need to - - Create a new column `Avg_rating` which will have the average of the `MetaCritic` and `Rating` columns - Retain only the movies in which the absolute difference(using abs() function) between the `IMDb_rating` and `Metacritic` columns is less than 0.5. Refer to this link to know how abs() funtion works - https://www.geeksforgeeks.org/abs-in-python/ . - Sort these values in a descending order of `Avg_rating` and retain only the movies with a rating equal to higher than `8` and store these movies in a new dataframe `UniversalAcclaim`.# Change the scale of MetaCritic movies['MetaCritic'] = movies['MetaCritic'] / 10 # Divide the MetaCritic column by 10 and store it back to MetaCritic # Find the average ratings movies['Avg_rating'] = (movies['MetaCritic'] + movies['IMDb_rating']) / 2 #For calculating Avg_rating we are dividing (MetaCritic + IMDB_rating) /2 #Sort in descending order of average rating movies.sort_values(by='Avg_rating',ascending=False) #Sorting DF based on Avg_rating in descending order # Find the movies with metacritic-rating < 0.5 and also with the average rating of >=8 UniversalAcclaim = movies.loc[(abs((movies['IMDb_rating'] - movies['MetaCritic']) < 0.5)) & (movies['Avg_rating'] >= 8)] UniversalAcclaim #Retaining the movies using absolute diff of Rating & Metacritic less than 0.5 and Avg_rating >= 8 & storing in UniversalAcclaim DF**`Checkpoint 2:`** Can you spot a `Star Wars` movie in your final dataset? - Subtask 2.4: Find the Most Popular Trios - IYou're a producer looking to make a blockbuster movie. There will primarily be three lead roles in your movie and you wish to cast the most popular actors for it. Now, since you don't want to take a risk, you will cast a trio which has already acted in together in a movie before. The metric that you've chosen to check the popularity is the Facebook likes of each of these actors.The dataframe has three columns to help you out for the same, viz. `actor_1_facebook_likes`, `actor_2_facebook_likes`, and `actor_3_facebook_likes`. Your objective is to find the trios which has the most number of Facebook likes combined. That is, the sum of `actor_1_facebook_likes`, `actor_2_facebook_likes` and `actor_3_facebook_likes` should be maximum.Find out the top 5 popular trios, and output their names in a list.# Write your code here #Adding a column popularity that sums the facebook likes of all three actors movies['Popularity'] = movies.loc[:,['actor_1_facebook_likes', 'actor_2_facebook_likes', 'actor_3_facebook_likes']].sum(axis=1) #based on the popularity fetching the top 5 records popular_actor = movies.sort_values(by='Popularity',ignore_index=True, ascending=False).loc[0:4,:] popular_actor # Displaying the list of top 5 trios top_popular = movies.sort_values(by='Popularity',ascending=False,ignore_index=True).loc[0:4,['actor_1_name','actor_2_name','actor_3_name']].values.tolist() top_popular- Subtask 2.5: Find the Most Popular Trios - IIIn the previous subtask you found the popular trio based on the total number of facebook likes. Let's add a small condition to it and make sure that all three actors are popular. The condition is **none of the three actors' Facebook likes should be less than half of the other two**. For example, the following is a valid combo:- actor_1_facebook_likes: 70000- actor_2_facebook_likes: 40000- actor_3_facebook_likes: 50000But the below one is not:- actor_1_facebook_likes: 70000- actor_2_facebook_likes: 40000- actor_3_facebook_likes: 30000since in this case, `actor_3_facebook_likes` is 30000, which is less than half of `actor_1_facebook_likes`.Having this condition ensures that you aren't getting any unpopular actor in your trio (since the total likes calculated in the previous question doesn't tell anything about the individual popularities of each actor in the trio.).You can do a manual inspection of the top 5 popular trios you have found in the previous subtask and check how many of those trios satisfy this condition. Also, which is the most popular trio after applying the condition above? **Write your answers below.**- **`No. of trios that satisfy the above condition:`**- **`Most popular trio after applying the condition:`** **`Optional:`** Even though you are finding this out by a natural inspection of the dataframe, can you also achieve this through some *if-else* statements to incorporate this. You can try this out on your own time after you are done with the assignment.# Your answer here (optional) indi_popularity = movies[~( (((movies['actor_1_facebook_likes'] < movies['actor_2_facebook_likes']/2) | (movies['actor_1_facebook_likes'] < movies['actor_3_facebook_likes']/2)) == True) |(((movies['actor_2_facebook_likes'] < movies['actor_1_facebook_likes']/2) | (movies['actor_2_facebook_likes'] < movies['actor_3_facebook_likes']/2)) == True) |(((movies['actor_3_facebook_likes'] < movies['actor_1_facebook_likes']/2) | (movies['actor_3_facebook_likes'] < movies['actor_2_facebook_likes']/2)) == True) )] pop_actors = popular_actor[popular_actor['Title'].isin(indi_popularity['Title'])] pop_actors final_popular_trio = pop_actors.sort_values(by='Popularity',ascending=False,ignore_index=True).loc[0:5,['actor_1_name','actor_2_name','actor_3_name']].values.tolist() final_popular_trio- Subtask 2.6: Runtime AnalysisThere is a column named `Runtime` in the dataframe which primarily shows the length of the movie. It might be intersting to see how this variable this distributed. Plot a `histogram` or `distplot` of seaborn to find the `Runtime` range most of the movies fall into.# Runtime histogram/density plot sns.set_style('whitegrid') #setting whitegrid as the style sns.distplot(movies['Runtime']) #drawing a distplot for finding the range for Runtime from movies DF plt.show()**`Checkpoint 3:`** Most of the movies appear to be sharply 2 hour-long. - Subtask 2.7: R-Rated MoviesAlthough R rated movies are restricted movies for the under 18 age group, still there are vote counts from that age group. Among all the R rated movies that have been voted by the under-18 age group, find the top 10 movies that have the highest number of votes i.e.`CVotesU18` from the `movies` dataframe. Store these in a dataframe named `PopularR`.# Write your code here PopularR = movies[['Title','CVotesU18','content_rating']] PopularR = PopularR.loc[(PopularR.content_rating == 'R')].sort_values(by='CVotesU18',ascending=False) PopularR.head(10) # Creating Popular DF by adding Title,CVotesU18,content_rating columns # Filter the content_rating by 'R' using loc and sorting the values based on CVotesU18 #Display the top 10 popular R rated movies**`Checkpoint 4:`** Are these kids watching `Deadpool` a lot? Task 3 : Demographic analysisIf you take a look at the last columns in the dataframe, most of these are related to demographics of the voters (in the last subtask, i.e., 2.8, you made use one of these columns - CVotesU18). We also have three genre columns indicating the genres of a particular movie. We will extensively use these columns for the third and the final stage of our assignment wherein we will analyse the voters across all demographics and also see how these vary across various genres. So without further ado, let's get started with `demographic analysis`. - Subtask 3.1 Combine the Dataframe by GenresThere are 3 columns in the dataframe - `genre_1`, `genre_2`, and `genre_3`. As a part of this subtask, you need to aggregate a few values over these 3 columns. 1. First create a new dataframe `df_by_genre` that contains `genre_1`, `genre_2`, and `genre_3` and all the columns related to **CVotes/Votes** from the `movies` data frame. There are 47 columns to be extracted in total.2. Now, Add a column called `cnt` to the dataframe `df_by_genre` and initialize it to one. You will realise the use of this column by the end of this subtask.3. First group the dataframe `df_by_genre` by `genre_1` and find the sum of all the numeric columns such as `cnt`, columns related to CVotes and Votes columns and store it in a dataframe `df_by_g1`.4. Perform the same operation for `genre_2` and `genre_3` and store it dataframes `df_by_g2` and `df_by_g3` respectively. 5. Now that you have 3 dataframes performed by grouping over `genre_1`, `genre_2`, and `genre_3` separately, it's time to combine them. For this, add the three dataframes and store it in a new dataframe `df_add`, so that the corresponding values of Votes/CVotes get added for each genre.There is a function called `add()` in pandas which lets you do this. You can refer to this link to see how this function works. https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.add.html6. The column `cnt` on aggregation has basically kept the track of the number of occurences of each genre.Subset the genres that have atleast 10 movies into a new dataframe `genre_top10` based on the `cnt` column value.7. Now, take the mean of all the numeric columns by dividing them with the column value `cnt` and store it back to the same dataframe. We will be using this dataframe for further analysis in this task unless it is explicitly mentioned to use the dataframe `movies`.8. Since the number of votes can't be a fraction, type cast all the CVotes related columns to integers. Also, round off all the Votes related columns upto two digits after the decimal point.# Create the dataframe df_by_genre df_by_genre = pd.concat([movies.loc[:,['genre_1','genre_2','genre_3']], movies.iloc[:,16:-4]],join='outer', axis =1) df_by_genre # Creating a df_by_genre DF by aquiring the required columns (genre 1,2,3 and all CVotes & Votes columns that sums upto 47) # print the df_by_genre # Create a column cnt and initialize it to 1 df_by_genre['cnt'] = 1 # Creating a column 'cnt' and assigning a default value of 1 df_by_genre # print the DF df_by_genre # Group the movies by individual genres df_by_genre.groupby('genre_1').sum() #Grouping Genre 1 df_by_genre.groupby('genre_2').sum() #Grouping Genre 2 df_by_genre.groupby('genre_3').sum() #Grouping Genre 3 # Add the grouped data frames and store it in a new data frame df_by_g1 = df_by_genre.groupby('genre_1').sum() #Creating a new DF df_by_g1 based on genre_1 df_by_g2 = df_by_genre.groupby('genre_2').sum() #Creating a new DF df_by_g2 based on genre_2 df_by_g3 = df_by_genre.groupby('genre_3').sum() #Creating a new DF df_by_g3 based on genre_3 final_genre = df_by_g1.add(df_by_g2,fill_value=0) df_add = final_genre.add(df_by_g3,fill_value=0) df_add = df_add.sort_values(by='cnt',ascending=False) df_add # Creating final_genre DF to combine df_by_g1 & df_by_g2 # Creating df_add DF to combile final_genre & df_by_g3 # print the DF df_add # Extract genres with atleast 10 occurences genre_top10 = df_add[df_add['cnt'] >= 10] # other possible solution for getting the same result: genre_top10 = df_add.iloc[:10] genre_top10 # Take the mean for every column by dividing with cnt genre_top10 = genre_top10.iloc[:,:-1].div(genre_top10.cnt, axis = 0).join(genre_top10.cnt) genre_top10 # compute the mean by dividing Cvotes& Votes by cnt and display the results # Rounding off the columns of Votes to two decimals votes = genre_top10.iloc[:,27:].apply(lambda x: pd.Series.round(x,2)) votes # Converting CVotes to int type cvotes = genre_top10.iloc[:,:27].round().astype(int) cvotes genre_votes = pd.concat([cvotes, votes], axis=1,ignore_index=False) genre_votes # We have created 2 DF's cvotes and votes to perform individual actions # convert cvotes to int # convert votes to 2 decimal places # finally concate both cvotes and votes based on index and display the resultIf you take a look at the final dataframe that you have gotten, you will see that you now have the complete information about all the demographic (Votes- and CVotes-related) columns across the top 10 genres. We can use this dataset to extract exciting insights about the voters! - Subtask 3.2: Genre Counts!Now let's derive some insights from this data frame. Make a bar chart plotting different genres vs cnt using seaborn.# Countplot for genres plt.figure(figsize=[10,6]) # configuring the size of the bar chart genre_counts = sns.barplot(data=genre_top10, x = genre_top10.index, y = genre_top10.cnt) genre_counts.set(xlabel="Genre", ylabel = "Count", title = "Genre Counts") plt.show() # creating a barplot plotting genres vs count # setting up the labels for x & y # Set the title as Genre Counts and display the graph**`Checkpoint 5:`** Is the bar for `Drama` the tallest? - Subtask 3.3: Gender and GenreIf you have closely looked at the Votes- and CVotes-related columns, you might have noticed the suffixes `F` and `M` indicating Female and Male. Since we have the vote counts for both males and females, across various age groups, let's now see how the popularity of genres vary between the two genders in the dataframe. 1. Make the first heatmap to see how the average number of votes of males is varying across the genres. Use seaborn heatmap for this analysis. The X-axis should contain the four age-groups for males, i.e., `CVotesU18M`,`CVotes1829M`, `CVotes3044M`, and `CVotes45AM`. The Y-axis will have the genres and the annotation in the heatmap tell the average number of votes for that age-male group. 2. Make the second heatmap to see how the average number of votes of females is varying across the genres. Use seaborn heatmap for this analysis. The X-axis should contain the four age-groups for females, i.e., `CVotesU18F`,`CVotes1829F`, `CVotes3044F`, and `CVotes45AF`. The Y-axis will have the genres and the annotation in the heatmap tell the average number of votes for that age-female group. 3. Make sure that you plot these heatmaps side by side using `subplots` so that you can easily compare the two genders and derive insights.4. Write your any three inferences from this plot. You can make use of the previous bar plot also here for better insights.Refer to this link- https://seaborn.pydata.org/generated/seaborn.heatmap.html. You might have to plot something similar to the fifth chart in this page (You have to plot two such heatmaps side by side).5. Repeat subtasks 1 to 4, but now instead of taking the CVotes-related columns, you need to do the same process for the Votes-related columns. These heatmaps will show you how the two genders have rated movies across various genres.You might need the below link for formatting your heatmap.https://stackoverflow.com/questions/56942670/matplotlib-seaborn-first-and-last-row-cut-in-half-of-heatmap-plot- Note : Use `genre_top10` dataframe for this subtask# 1st set of heat maps for CVotes-related columns male_cvote = genre_top10.groupby(genre_top10.index)['CVotesU18M','CVotes1829M','CVotes3044M','CVotes45AM'].mean().astype(int) female_cvote = genre_top10.groupby(genre_top10.index)['CVotesU18F','CVotes1829F','CVotes3044F','CVotes45AF'].mean().astype(int) fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k') fig,(ax1,ax2,axcb) = plt.subplots(1,3, gridspec_kw={'width_ratios':[1,1,0.08]}) ax1.get_shared_y_axes().join(ax1,ax2) mcvote = sns.heatmap(male_cvote,cmap='Blues', annot=True, fmt='g',cbar=False, ax=ax1) mcvote.set_ylabel('Genre', fontsize=10) mcvote.set_xlabel('Male Age Group', fontsize=10) fcvote = sns.heatmap(female_cvote,cmap='Blues', annot=True, fmt='g',cbar_ax=axcb, ax=ax2) fcvote.set_xlabel('Female Age Group', fontsize=10) fcvote.set_yticks([ ]) plt.show()**`Inferences:`** A few inferences that can be seen from the heatmap above is that males have voted more than females, and Sci-Fi appears to be most popular among the 18-29 age group irrespective of their gender. What more can you infer from the two heatmaps that you have plotted? Write your three inferences/observations below:- Inference 1: Ages 18-44 across gender has voted the most for all genre- Inference 2: On an average females 18-44 voted less for movies of Action, Adventure, Animation, Comedy, Romance and Thriller genre when compared to males- Inference 3: Romance is the least voted by Male irrespective of their age compared to female, but if only male gender is considered, they voted least for romance# 2nd set of heat maps for Votes-related columns male_vote = round(genre_top10.groupby(genre_top10.index)['VotesU18M','Votes1829M','Votes3044M','Votes45AM'].mean(),2) female_vote = round(genre_top10.groupby(genre_top10.index)['VotesU18F','Votes1829F','Votes3044F','Votes45AF'].mean(),2) fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k') fig,(ax1,ax2,axcb) = plt.subplots(1,3, gridspec_kw={'width_ratios':[1,1,0.08]}) ax1.get_shared_y_axes().join(ax1,ax2) mcvote = sns.heatmap(male_vote,cmap='coolwarm', annot=True, fmt='g',cbar=False, ax=ax1) mcvote.set_ylabel('Genre', fontsize=10) mcvote.set_xlabel('Male Age Group', fontsize=10) fcvote = sns.heatmap(female_vote,cmap='coolwarm', annot=True, fmt='g',cbar_ax=axcb, ax=ax2) fcvote.set_xlabel('Female Age Group', fontsize=10) fcvote.set_yticks([ ]) plt.show()**`Inferences:`** Sci-Fi appears to be the highest rated genre in the age group of U18 for both males and females. Also, females in this age group have rated it a bit higher than the males in the same age group. What more can you infer from the two heatmaps that you have plotted? Write your three inferences/observations below:- Inference 1: It is interesting to see that the romance movies are watched less or voted less by males when compared to females especially for U18- Inference 2: Both genders of age group 30 and above the minimum rating ranges from 7.5 - 7.98 and has not reached 8- Inference 3: Comedy genre seems to be liked by both genres across age groups with a minor variance - Subtask 3.4: US vs non-US Cross AnalysisThe dataset contains both the US and non-US movies. Let's analyse how both the US and the non-US voters have responded to the US and the non-US movies.1. Create a column `IFUS` in the dataframe `movies`. The column `IFUS` should contain the value "USA" if the `Country` of the movie is "USA". For all other countries other than the USA, `IFUS` should contain the value `non-USA`.2. Now make a boxplot that shows how the number of votes from the US people i.e. `CVotesUS` is varying for the US and non-US movies. Make use of the column `IFUS` to make this plot. Similarly, make another subplot that shows how non US voters have voted for the US and non-US movies by plotting `CVotesnUS` for both the US and non-US movies. Write any of your two inferences/observations from these plots.3. Again do a similar analysis but with the ratings. Make a boxplot that shows how the ratings from the US people i.e. `VotesUS` is varying for the US and non-US movies. Similarly, make another subplot that shows how `VotesnUS` is varying for the US and non-US movies. Write any of your two inferences/observations from these plots.Note : Use `movies` dataframe for this subtask. Make use of this documention to format your boxplot - https://seaborn.pydata.org/generated/seaborn.boxplot.html# Creating IFUS column #below using lambda condition appended to column ['IFUS'] if the movie is from USA or non-USA movies['IFUS'] = movies['Country'].apply(lambda x:'USA' if x== 'USA' else 'non-USA') movies # listing the movies DF with the newly appended column # Box plot - 1: CVotesUS(y) vs IFUS(x) plt.figure(figsize=[10,10]) #Sizing the boxplot fig, axes = plt.subplots(nrows=1, ncols=2) #defining rows & cols for subplot #boxplot for cvotesus cvotesus = sns.boxplot(data=movies, x=movies.IFUS, y=movies.CVotesUS, orient ='v',ax=axes[0]) # Generating boxplot - IFUS , CVotesUS cvotesus.axes.set_title('Dist. of votes by USA voters', fontsize = 10)# Setting the title for the box plot cvotesus.set_ylabel('CVotesUS', fontsize =10) cvotesus.set_xlabel('IFUS', fontsize =10) #boxplot for cvotesnus cvotesnus = sns.boxplot(data=movies, x=movies.IFUS, y=movies.CVotesnUS, orient ='v',ax=axes[1]) # Generating boxplot - IFUS , CVotesnUS cvotesnus.axes.set_title('Dist. of votes by non-USA voters', fontsize = 10)# Setting the title for the box plot cvotesnus.set_ylabel('CVotesnUS', fontsize =10) cvotesnus.set_xlabel('IFUS', fontsize =10) plt.tight_layout() #for non-overlapping of box plot**`Inferences:`** Write your two inferences/observations below:- Inference 1: From both the plots we can see that there are some outliers for USA movies- Inference 2: We can observe non-USA movies the number of votes is uniformly distributed compared to USA movies# Box plot - 2: VotesUS(y) vs IFUS(x) plt.figure(figsize=[10,10]) #Sizing the boxplot fig, axes = plt.subplots(nrows=1, ncols=2) #defining rows & cols for subplot #boxplot for cvotesus votesus = sns.boxplot(data=movies, x=movies.IFUS, y=movies.VotesUS, orient ='v',ax=axes[0]) # Generating boxplot - IFUS , VotesUS votesus.axes.set_title('Dist. of votes by USA voters', fontsize = 10)# Setting the title for the box plot votesus.set_ylabel('VotesUS', fontsize =10) votesus.set_xlabel('IFUS', fontsize =10) #boxplot for cvotesnus votesnus = sns.boxplot(data=movies, x=movies.IFUS, y=movies.VotesnUS, orient ='v',ax=axes[1]) # Generating boxplot - IFUS , VotesnUS votesnus.axes.set_title('Dist. of votes by non-USA voters', fontsize = 10)# Setting the title for the box plot votesnus.set_ylabel('VotesnUS', fontsize =10) votesnus.set_xlabel('IFUS', fontsize =10) plt.tight_layout() #for non-overlapping of box plot**`Inferences:`** Write your two inferences/observations below:- Inference 1: From both the plots we can see that there are some outliers for USA movies- Inference 2: On average we can see the median rating is higher (around 7.9 to 8) from USA people compared to that from non- USA people - Subtask 3.5: Top 1000 Voters Vs GenresYou might have also observed the column `CVotes1000`. This column represents the top 1000 voters on IMDb and gives the count for the number of these voters who have voted for a particular movie. Let's see how these top 1000 voters have voted across the genres. 1. Sort the dataframe genre_top10 based on the value of `CVotes1000`in a descending order.2. Make a seaborn barplot for `genre` vs `CVotes1000`.3. Write your inferences. You can also try to relate it with the heatmaps you did in the previous subtasks.# Sorting by CVotes1000 genre_top10 = genre_top10[['CVotes1000']].sort_values(by='CVotes1000',ascending=False) # Bar plot plt.figure(figsize=[10,5]) sns.barplot(data=genre_top10,x=genre_top10.index,y='CVotes1000') plt.title('Top 1000 voters vs Genre', fontsize = 25) plt.xlabel('Movie Genre', fontsize = 15) plt.ylabel('CVotes1000', fontsize = 15) plt.show()Finding the Best Neighborhood in Pittsburgh: Factoring in Property ValuesData borrowed from https://data.wprdc.org/dataset/real-estate-sales Getting Startedimport pandas as pd import geopandas import numpy as np import plotly # Importing the data property_data = pd.read_csv("PghPropertySaleData.csv", low_memory = False) # Previewing the data property_data.head(5) # In the preview, I noticed that the property in the fourth row sold for a price of $0. # Looking through the .dbf file, I noticed that there are several other extremely low sold prices, such as $0, $1, and $10. # Therefore, I am only considering prices above $1,000 to mitigate the infuence of global outliers. property_data = property_data[property_data.PRICE > 1000] # In importing the CSV file, the zip codes were turned into floats. This will cast them back into the int data type property_data['PROPERTYZIP'] = property_data['PROPERTYZIP'].astype(int) # Finding the mean of the property sold prices for all properties sharing the same zip code price_property_data = property_data[['PROPERTYZIP','PRICE']].groupby(['PROPERTYZIP']).mean() price_property_data # Rounding the mean property sales prices to the nearest dollar price_property_data['PRICE'] = price_property_data['PRICE'].astype(int) # Sorting the data by price price_property_data.sort_values(by=['PRICE'],inplace=True) price_property_data.head(10)Establishing a points system and price bracketsZip codes with properties within a certain threshold will be assigned a fixed number of points. Team members' data sets also utilize a point system. The neighborood with the highest combined number of points will be considered the best.# First, price brackets need to be established. To do this, I will divide the distribution into five tiers, based on percentiles. # Creating a new column called "Percentile Rank", which shows the percentage of prices that any one price is greater than. price_property_data['Percentile Rank'] = price_property_data.PRICE.rank(pct = True) # Now, I am creating conditions for the program to check in order to set a point value based on the Percentile Rank values. conditions = [ (price_property_data['Percentile Rank'] <= .2), (price_property_data['Percentile Rank'] > .2) & (price_property_data['Percentile Rank'] <= .4), (price_property_data['Percentile Rank'] > .4) & (price_property_data['Percentile Rank'] <= .6), (price_property_data['Percentile Rank'] > .6) & (price_property_data['Percentile Rank'] <= .8), (price_property_data['Percentile Rank'] > .8)] # The points work with the above conditions. If the first condition is met (percentile rank below .2), one point is assigned. # If the second condition is met (percentile rank below or equal to .4 and greater than .2), then two poitns are assigned. # This method gives more points to zip codes with higher percentile prices. points = ['1', '2', '3', '4', '5'] # Making a new column called "Points" and adding point values based on the above conditions. price_property_data['Points'] = np.select(conditions, points) # Sorting the data first by points, and then by price. price_property_data.sort_values(by=['Points', 'PRICE'],inplace=True) price_property_data.head(5)Visualizing the dataprice_property_data = price_property_data.astype(str) price_property_data = price_property_data.reset_index() conditions2 = [ (price_property_data['PROPERTYZIP'] == '15028'), (price_property_data['PROPERTYZIP'] == '15089'), (price_property_data['PROPERTYZIP'] == '15104'), (price_property_data['PROPERTYZIP'] == '15110'), (price_property_data['PROPERTYZIP'] == '15045'), (price_property_data['PROPERTYZIP'] != '15089')] points2 = ['Central Oakland', 'North Oakland', 'West Oakland', 'Crawford-Roberts', 'Strip District', 'Central Business District '] price_property_data['neighborhood'] = np.select(conditions2, points2) price_property_data.head(119) neighborhoods = geopandas.read_file("Neighborhoods/Neighborhoods_.shp") neighborhoods.plot() property_values_map = neighborhoods.merge(price_property_data, how='left', left_on='hood',right_on='neighborhood') property_values_map[['hood','PRICE','geometry']].head(50) property_values_map.plot(column='PRICE')Alguns testes para as funçoes### Solver Functions ### # def num_moves(sudoku): 'returns the maximum number of moves to do on the current sudoku / Counting the zeroes(empty squares)' moves = 0 for line in sudoku: moves += list(line).count(0) return moves null_sudoku = [[0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0]] null_sol = [[1,2,3,4,5,6,7,8,9], [4,5,6,7,8,9,1,2,3], [7,8,9,1,2,3,4,5,6], [2,3,1,6,7,4,8,9,5], [8,7,5,9,1,2,3,6,4], [6,9,4,5,3,8,2,1,7], [3,1,7,2,6,5,9,4,8], [5,4,2,8,9,7,6,3,1], [9,6,8,3,4,1,5,7,2]] assert(num_moves(null_sudoku) == 81) assert(num_moves(null_sol) == 0) def all_moves(): 'generates a list with all the possible moves of every square of the board' ans = [[1,2,3,4,5,6,7,8,9] for _ in range(81)] return ans def available_moves(sudoku): 'returns the available guess_list of every square' guess_list = all_moves() for i in range(9): for j in range(9): if (not (sudoku[i][j])) == False: numago = sudoku[i][j] guess_list[(9*i + j)] = [] for k in range(9): if numago in guess_list[9*i +k]: guess_list[(9*i + k)].remove(numago) #remove da linha if numago in guess_list[(9*k + j)]: guess_list[(9*k + j)].remove(numago) #remove da coluna a,b = (i//3)*3,(j//3)*3 for m in range(a,a+3): for n in range(b,b+3): if numago in guess_list[9*m+n]: guess_list[(9*m+n)].remove(numago) return guess_list %timeit all_moves() assert(len(all_moves()) == 81) assert(len(sum(available_moves(null_sol),[])) == 0) def next_move(guess_list): 'returns a list with the next moves, and its indexes (q,r)' if sum(map(sum,guess_list)) == 0: # no more available guesses >.< return [],None,None minmoves = 10 idxmin = None for (i,move) in enumerate(guess_list): l = len(move) if 0 < l and l < minmoves: idxmin = i minmoves = l if l == 1: break q,r = divmod(idxmin,9) return guess_list[idxmin],q,r def next_branch(branch,move,q,r): 'returns the next branch to try (left to right)' if move == 0: #print(move,q,r,'wut') return -1 # error code j = branch.count([q,r]) if j >= len(move): return 0 return [move[j]] # starts from the *left* of the list def refresh_guess_list(move,q,r,guess_list): 'given a move on the [q][r] square, refreshes the guess_list' new_guess_list = guess_list new_guess_list[9*q+r] = [] k = 9*q l = 9*(q//3)*3 + (r//3)*3 mmm = move[0] for i in range(9): if mmm in new_guess_list[r+9*i]: new_guess_list[r+9*i].remove(mmm) # remove da coluna if mmm in new_guess_list[k+i]: new_guess_list[k+i].remove(mmm) # remove da linha for m in range(3): for n in range(3): if mmm in new_guess_list[l + m+9*n ]: new_guess_list[l + m+9*n ].remove(mmm) return new_guess_list def copy_GL(guess_list): return [l[:] for l in guess_list] def check_line(n,guess_list): 'returns the n_th line in the guess_list (n from 0 to 8)' nth_line = guess_list[n*9:9*(n+1)] idxs = range(9*n,9*(n+1)) return nth_line,idxs def check_col(n,guess_list): 'returns the n_th column in the guess_list (n from 0 to 8)' col_idxs = [(l*9)+n for l in range(9)] nth_column = [guess_list[i] for i in col_idxs] return nth_column,col_idxs def check_box(n,guess_list): 'returns the n_th box in the guess_list (n from 0 to 8)' #code gore basically math to get the right indexes from the boxes if n < 3: nth_box = guess_list[n*3:(n+1)*3] + guess_list[n*3 + 9:(n+1)*3 + 9] + guess_list[n*3 + 18:(n+1)*3 + 18] idxs = [n*3,3*n+1,3*n+2,n*3 + 9,n*3 + 10,n*3 + 11,n*3 + 18,n*3 + 19,n*3 + 20] elif n < 6: nth_box = guess_list[27+(n%3)*3:27+(n%3+1)*3] + guess_list[36+(n%3)*3:36+(n%3+1)*3] + guess_list[45+(n%3)*3:45+(n%3+1)*3] idxs = [27+(n%3)*3,28+(n%3)*3,29+(n%3)*3,36+(n%3)*3,37+(n%3)*3,38+(n%3)*3,45+(n%3)*3,46+(n%3)*3,47+(n%3)*3] elif n >= 6: nth_box = guess_list[54+(n%3)*3:54+(n%3+1)*3] + guess_list[63+(n%3)*3:63+(n%3+1)*3] + guess_list[72+(n%3)*3:72+(n%3+1)*3] idxs = [54+(n%3)*3,55+(n%3)*3,56+(n%3)*3,63+(n%3)*3,64+(n%3)*3,65+(n%3)*3,72+(n%3)*3,73+(n%3)*3,74+(n%3)*3] return nth_box,idxs def check_singles(guess_list): singles = [] for i in range(9): i_box,box_idxs = check_box(i,guess_list) i_line,line_idxs = check_line(i,guess_list) i_col,col_idxs = check_col(i,guess_list) for j in range(1,10): if sum(i_box,[]).count(j) == 1: for a,square in enumerate(i_box): if j in square: if [[j,box_idxs[a]]] not in singles: singles += [[j,box_idxs[a]]] if sum(i_line,[]).count(j) == 1: for a,square in enumerate(i_line): if j in square: if [[j,line_idxs[a]]] not in singles: singles += [[j,line_idxs[a]]] if sum(i_col,[]).count(j) == 1: for a,square in enumerate(i_col): if j in square: if [[j,col_idxs[a]]] not in singles: singles += [[j,col_idxs[a]]] return singles ### Solver ### # import numpy as np #from solver_funcs import * def sudoku_starter(board): 'adapting to the guess_list variable' backups = [] #sudokus backup branch = [] # branch of the guessing tree sudoku_board = np.empty((9,9), dtype = int) # empty sudoku 9x9 board sudoku_board[:] = board[:] #here we copy the board list moves = num_moves(sudoku_board) # counts the number of zeroes guess_list = available_moves(sudoku_board) #list with all the possible moves singles = check_singles(guess_list) for single in singles: q,r = divmod(single[1],9) if sudoku_board[q][r] == 0: move = [single[0]] sudoku_board[q][r] = move[0] moves -= 1 guess_list = refresh_guess_list(move,q,r,guess_list) move,q,r = next_move(guess_list) # next right move while moves: # while there are moves to be made we make'em l = len(move) if l > 1: return sudoku_solver(sudoku_board,moves,guess_list,branch,backups) if l == 1: sudoku_board[q][r] = move[0] moves -= 1 guess_list = refresh_guess_list(move,q,r,guess_list) move,q,r = next_move(guess_list) return sudoku_board def sudoku_solver(sudoku_board,moves,guess_list,branch,backups): 'solves the sudoku by trying to guess from the list with least possible guesses' while moves > 0: move,q,r = next_move(guess_list) if not move: backups = backups[:-1] branch = branch[:-1] sudoku_board = backups[-1][0] guess_list = backups[-1][1] moves = num_moves(sudoku_board) elif move == 1: sudoku_board[q][r] = move[0] guess_list = refresh_guess_list(move,q,r,guess_list) moves -= 1 else: guess = next_branch(branch,move,q,r) if not guess: k = (branch.index([q,r])) sudoku_board = backups[k-1][0] guess_list = backups[k-1][1] backups = backups[:k] branch = branch[:k] moves = num_moves(sudoku_board) else: backups.append([sudoku_board.copy(),copy_GL(guess_list)]) sudoku_board[q][r] = guess[0] moves -= 1 branch += [[q,r]] guess_list = refresh_guess_list(guess,q,r,guess_list) return sudoku_board sudoku_starter(null_sudoku) import cProfile cProfile.run("sudoku_starter(null_sudoku)", sort='tottime') def next_branch(branch,move,q,r): # invertendo a ordem dos galhos 'returns the next branch to try (right to left)' if move == 0: #print(move,q,r,'wut') return -1 # error code j = branch.count([q,r]) if j >= len(move): return 0 return [move[-j-1]] # starts from the *right* of the list sudoku_starter(null_sudoku) import cProfile cProfile.run("sudoku_starter(null_sudoku)", sort='tottime') normal_sudoku = [[8,0,0,0,0,0,0,0,0], [0,0,3,6,0,0,0,0,0], [0,7,0,0,9,0,2,0,0], [0,5,0,0,0,7,0,0,0], [0,0,0,0,4,5,7,0,0], [0,0,0,1,0,0,0,3,0], [0,0,1,0,0,0,0,6,8], [0,0,8,5,0,0,0,1,0], [0,9,0,0,0,0,4,0,0]] normal_sol = [[8,1,2,7,5,3,6,4,9], [9,4,3,6,8,2,1,7,5], [6,7,5,4,9,1,2,8,3], [1,5,4,2,3,7,8,9,6], [3,6,9,8,4,5,7,2,1], [2,8,7,1,6,9,5,3,4], [5,2,1,9,7,4,3,6,8], [4,3,8,5,2,6,9,1,7], [7,9,6,3,1,8,4,5,2]] %time sudoku_starter(normal_sudoku) ans = [[8, 1, 2, 7, 5, 3, 6, 4, 9], [9, 4, 3, 6, 8, 2, 1, 7, 5], [6, 7, 5, 4, 9, 1, 2, 8, 3], [1, 5, 4, 2, 3, 7, 8, 9, 6], [3, 6, 9, 8, 4, 5, 7, 2, 1], [2, 8, 7, 1, 6, 9, 5, 3, 4], [5, 2, 1, 9, 7, 4, 3, 6, 8], [4, 3, 8, 5, 2, 6, 9, 1, 7], [7, 9, 6, 3, 1, 8, 4, 5, 2]] def next_branch(branch,move,q,r): # voltando para a ordem normal 'returns the next branch to try (left to right)' if move == 0: #print(move,q,r,'wut') return -1 # error code j = branch.count([q,r]) if j >= len(move): return 0 return [move[j]] # starts from the *left* of the list %time sudoku_starter(normal_sudoku) ans2 = [[8, 1, 2, 7, 5, 3, 6, 4, 9], [9, 4, 3, 6, 8, 2, 1, 7, 5], [6, 7, 5, 4, 9, 1, 2, 8, 3], [1, 5, 4, 2, 3, 7, 8, 9, 6], [3, 6, 9, 8, 4, 5, 7, 2, 1], [2, 8, 7, 1, 6, 9, 5, 3, 4], [5, 2, 1, 9, 7, 4, 3, 6, 8], [4, 3, 8, 5, 2, 6, 9, 1, 7], [7, 9, 6, 3, 1, 8, 4, 5, 2]] assert(ans == ans2)Regression modeling with statsmodelimport numpy as np import statsmodels.api as sm import matplotlib.pyplot as plt from statsmodels.sandbox.regression.predstd import wls_prediction_std %matplotlib inlineBasic Example# load example data spector_data = sm.datasets.spector.load() spector_data.exog = sm.add_constant(spector_data.exog, prepend=False) # fit and summarize OLS model mod = sm.OLS(spector_data.endog, spector_data.exog) res = mod.fit() print(res.summary())OLS Regression Results ============================================================================== Dep. Variable: y R-squared: 0.416 Model: OLS Adj. R-squared: 0.353 Method: Least Squares F-statistic: 6.646 Date: Thu, 23 Jun 2016 Prob (F-statistic): 0.00157 Time: 17:57:48 Log-Likelihood: -12.978 No. Observations: 32 AIC: 33.96 Df Residuals: 28 BIC: 39.82 Df Model: 3 Covariance Type: nonrobust ============================================================================== coef std err t P>|[...]More detailed example# create artificial data np.random.seed(9876789) nsample = 100 x = np.linspace(0, 10, 100) X = np.column_stack((x, x**2)) beta = np.array([1, 0.1, 10]) e = np.random.normal(size=nsample) # we need an intercept so create a column of ones X = sm.add_constant(X) y = np.dot(X, beta) + e # fit and summary model = sm.OLS(y, X) results = model.fit() print(results.summary()) print('Parameters: ', results.params) print('R2: ', results.rsquared) # now simulate artificial data with a non-linear relationship between x and y nsample = 50 sig = 0.5 x = np.linspace(0, 20, nsample) X = np.column_stack((x, np.sin(x), (x-5)**2, np.ones(nsample))) beta = [0.5, 0.5, -0.02, 5.] y_true = np.dot(X, beta) y = y_true + sig * np.random.normal(size=nsample) # fit and summary res = sm.OLS(y, X).fit() print(res.summary()) print('Parameters: ', res.params) print('Standard errors: ', res.bse) print('Predicted values: ', res.predict()) # plot to compare the true relationship to OLS predictions prstd, iv_l, iv_u = wls_prediction_std(res) fig, ax = plt.subplots(figsize=(8,6)) ax.plot(x, y, 'o', label="data") ax.plot(x, y_true, 'b-', label="True") ax.plot(x, res.fittedvalues, 'r--.', label="OLS") ax.plot(x, iv_u, 'r--') ax.plot(x, iv_l, 'r--') ax.legend(loc='best');Exploratory data analysis (EDA)Exploratory data analysis is an approach of analyzing data sets to summarize their main characteristics, often using statistical graphics and other data visualization methods. A statistical model can be used or not, but primarily EDA is for seeing what the data can tell us beyond the formal modeling or hypothesis testing task. Importing scripts folderimport os import sys sys.path.append(os.path.abspath(os.path.join('../scripts')))Loading Libraries we will use in the projectimport pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px sns.set()Importing scripts & modulesfrom helper import MyHelper import utility import plotsLoading the dataCSV_PATH = "../data/data.csv" # the class has utility function we are going to use Helper = MyHelper() # we use our helper function to read csv data # we treate values like 'n/a', 'na', 'undefined' as missing values df = Helper.read_csv(CSV_PATH) df.head()Exploring the data Getting the basic info about the datasetdf.info() RangeIndex: 569 entries, 0 to 568 Data columns (total 33 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 569 non-null int64 1 diagnosis 569 non-null object 2 radius_mean 569 non-null float64 3 texture_mean 569 non-null float64 4 perimeter_mean 569 non-null float64 5 area_mean 569 non-null float64 6 smoothness_mean 569 non-null float64 7 compactness_mean 569 non-null float64 8 concavity_mean 569 non-null float64 9 concave points_mean 569 non-null float64 10 symmetry_mean 569 non-null float64 11 fractal_dimension_mean 569 non-null float64 12 radius_se 569 non-null float64 13 texture_se 569 non-null float64 14 perimeter_se 5[...]Getting the basic description of the datasetdf.describe() print(f" There are {df.shape[0]} rows and {df.shape[1]} columns")There are 569 rows and 33 columnsChecking for duplicatesdf.duplicated() #Droping if there is any duplicates in the dataset df.drop_duplicates()Checking the null countsdf.isnull().sum()Checking for missing values#The percent_missing function checkes any missing values and convert it into percentage utility.percent_missing(df)The dataset contains 3.03 % missing values.Eads et al Method, using NSF subsetted corpus to cfda = 47.070import pandas as pd import sklearn from sklearn.feature_extraction.text import CountVectorizer from sklearn.decomposition import LatentDirichletAllocation import numpy as np import filter #from git/dspg21RnD/wheat_filtration/wheat_filtration import keywords #from git/dspg21RnD/wheat_filtration/wheat_filtration import filter #import keywords def total_topic_proportion(document_topics, relevant_topics): """Return sum of relevant topic proportions for a document. Arguments: document_topics (iterable of float): topic proportions for one document. relevant topics (iterable of int): a list of the numbers corresponding with the topics considered relevant by the user.""" assert (len(relevant_topics) <= len(document_topics) ) # TODO make this the right kind of error return sum([document_topics[i] for i in relevant_topics]) def keyword_proportion(document, keyword_list): """Return percentage of words in the given doc that are present in keyword_list.""" doc_tokens = document.split() num_keywords = sum( [1 if word in keyword_list else 0 for word in doc_tokens]) return float(num_keywords)/len(doc_tokens) def superkeyword_presence(document, superkeywords): """Return 1 if document contains any superkeywords, 0 if not.""" for word in superkeywords: if word in document.split(): return True return False class FilterHelper(): """Creates a filter object containing filter criteria such as keyword list, superkeyword list, total topic proportion threshold, and keyword proportion threshold. Arguments: topic_model (TopicModel): a TopicModel object instantiated with a corpus or files from a Mallet topic model. relevant_topics (iterable of int): a list of the numbers corresponding with the topics considered relevant by the user. Note that the number corresponding with the first topic is '0', the second topic is '1', etc. n_keywords: number of keywords to include in keyword list. Default is 20. superkeywords (iterable of str): a list of keywords which signify immediate relevance of the document that contains them (better wording). Default is an empty list. keyword_list: A list of keywords ordered by [the relevance they signify]. Default is a keyword list generated using the relative entropy method. total_topic_prop_threshold (float): the threshold of relevance for the total proportion of relevant topics in a document. If a document surpases the threshold, it is considered relevant. keyword_prop_threshold (float): the threshold of relevance for the proportion of words on the keyword list that appear in a document. If a document surpases the threshold, it is considered relevant. Attributes: topic_model (TopicModel): a TopicModel object instantiated with a corpus or files from a Mallet topic model. relevant_topics (iterable of int): a list of the numbers corresponding with the topics considered relevant by the user. superkeywords (iterable of str): a list of keywords which signify immediate relevance of the document that contains them (better wording). Default is an empty list. keyword_list: A list of keywords ordered by [the relevance they signify]. Default is a keyword list generated using the relative entropy method. total_topic_prop_threshold (float): the threshold of relevance for the total proportion of relevant topics in a document. If a document surpases the threshold, it is considered relevant. Default is 0.25. keyword_prop_threshold (float): the threshold of relevance for the proportion of words on the keyword list that appear in a document. If a document surpases the threshold, it is considered relevant. Default is 0.15. Raises: RuntimeError: if user enters both keyword list and n_keywords when using the keyword_list setter method. """ def __init__(self, topic_model, vectorizer, relevant_topics, keyword_list=None, n_keywords=100, superkeywords=[], term_words = [], total_topic_prop_threshold=0.25, keyword_prop_threshold=0.15): self._relevant_topics = relevant_topics if keyword_list is None: keyword_list = keywords.rel_ent_key_list( topic_model, n_keywords, relevant_topics) self._keyword_list = keyword_list lower_superkeys = [word.lower() for word in superkeywords] # TODO: deal with this appropriately when making lowercasing optional extended_superkeys = [ word for word in vectorizer.get_feature_names() if word in lower_superkeys or any([(chunk in lower_superkeys) for chunk in word.split('_')]) ] self._superkeywords = extended_superkeys self._total_topic_prop_threshold = total_topic_prop_threshold self._keyword_prop_threshold = keyword_prop_threshold self.term_words = term_words self._topic_model = topic_model self._vectorizer = vectorizer @property def topic_model(self): """Get topic_model used to create filter""" return self._topic_model @property def relevant_topics(self): """Get list of relevant topics""" return self._relevant_topics @property def keyword_list(self): """Get or set keyword list. Input either a list of keywords, or input an integer n to generate a keyword list containing n words.""" return self._keyword_list @keyword_list.setter def keyword_list(self, keyword_list=None, n_keywords=None): if keyword_list is not None: self._keyword_list = keyword_list elif n_keywords is not None: self._keyword_list = keywords.rel_ent_key_list( self.topic_model, n_keywords, self.relevant_topics) else: raise RuntimeError( "Enter either a keyword list or an integer for number of keywords") @property def superkeywords(self): return self._superkeywords @superkeywords.setter def superkeywords(self, superkeywords): self._superkeywords = superkeywords @property def total_topic_prop_threshold(self): return self._total_topic_prop_threshold @total_topic_prop_threshold.setter def total_topic_prop_threshold(self, total_topic_prop_threshold): self._total_topic_prop_threshold = total_topic_prop_threshold @property def keyword_prop_threshold(self): return self._keyword_prop_threshold @keyword_prop_threshold.setter def keyword_prop_threshold(self, keyword_prop_threshold): self._keyword_prop_threshold = keyword_prop_threshold def proportion_lists(): """makes a matrix or list of ttp, superkeyword, and keyword proportion for the docs in corpus and sets the respective topic model attributes""" pass def subset_quality(threshs, labeled_subset): # also had args word_list_gen and scorefun """Calculate F1 score for the array of thresholds threshs (max topic prop, total topic prop, vocab prop, and number of words in vocabulary list) on labeled subset""" pass def subset_info(threshs): # seems like a cool feature to include """Return set of false positives, true positives, false negatives, and true negatives, as well as the sizes of the false neg and false pos sets, as well as the size of set predicted as relevant, about the subset created by the given set of thresholds (mtp, ttp, voc prop, and voc list length, in that order). This function can be edited to output any kind of info about the subset, eg the filenames.""" pass #functions for creating a topic dictionary, viewing the topics in the topic model, #and selecting only the relevant topics based on a threshold and our keyword list. def topic_dictionary(lda_model, lda_vectorizer, top_n = 10): topic_ls = {} #append keys, append the values for idx, topic in enumerate(lda_model.components_): # loop through each row of H. idx = row index. topic = actual row print_list = [(lda_vectorizer.get_feature_names()[i], topic[i]) for i in topic.argsort()[:-top_n - 1:-1]] topic_ls[idx] = print_list return topic_ls def print_topics(model, vectorizer, top_n=10): for idx, topic in enumerate(model.components_): # loop through each row of H. idx = row index. topic = actual row print("\nTopic %d:" % (idx)) print_list = [(vectorizer.get_feature_names()[i], topic[i]) for i in topic.argsort()[:-top_n - 1:-1]] for item in print_list: print(item) def rel_ent_key_list(topic_model, vectorizer, n_top_keywords, relevant_topics): """Returns a list of the top n keywords based on relative entropy score Arguments: topic_model (TopicModel): a topic by vocabulary word matrix where each entry is the total word count for that word in that topic n_top_words (int): the number of keywords the method will return relevant_topics (iterable of int) Returns: keyword_list (iterable of str): list of the top n keywords, sorted """ topic_word_matrix = topic_model.components_ lda_vectorizer = vectorizer # Log of probabilities of vocab words #this works vocab_logs = np.log(topic_word_matrix.sum( axis=0) / topic_word_matrix.sum()) # Log of probabilities of vocab words given they were in each relevant topic #this is being built to calculate p(w)*log[p(w)/q(w)] #this works topic_logs = np.log(topic_word_matrix[relevant_topics, :].sum( axis=0) / topic_word_matrix[relevant_topics, :].sum()) # relative entropy proportions, unsorted #log rules: log[p(w)/q(w)] = log(p(w)) - log(q(w)) unsorted_props = np.asarray(topic_word_matrix.sum(axis=0) / topic_word_matrix.sum()) * np.asarray(topic_logs - vocab_logs) unsorted_props = np.matrix.flatten(unsorted_props) sorted_props_and_voc = sorted([(unsorted_props[i], lda_vectorizer.get_feature_names()[i]) for i in list( np.argpartition(unsorted_props, len(lda_vectorizer.get_feature_names()) - n_top_keywords))[-n_top_keywords:]], reverse=True) ordered_vocab = [] for (_, voc) in sorted_props_and_voc: ordered_vocab.append(voc) return ordered_vocab #making a filter_corpus function (copied from wheat_filtration package) def total_topic_proportion(document_topics, relevant_topics, doc_number = 0): """Return sum of relevant topic proportions for a document. Arguments: document_topics (iterable of float): topic proportions for one document. relevant topics (iterable of int): a list of the numbers corresponding with the topics considered relevant by the user.""" assert (len(relevant_topics) <= len(document_topics) ) # TODO make this the right kind of error document = document_topics[doc_number] topic_prop = 0 for i in relevant_topics: topic_prop += document[i] return topic_prop def keyword_proportion(document, keyword_list): """Return percentage of words in the given doc that are present in keyword_list.""" doc_tokens = document num_keywords = sum( [1 if word in keyword_list else 0 for word in doc_tokens]) return float(num_keywords)/len(doc_tokens) def superkeyword_presence(document, superkeywords): """Return 1 if document contains any superkeywords, 0 if not.""" for word in superkeywords: if word in document: return True return False def in_ai_phrases(abstract, ai_phrases): text = " ".join(abstract) for phrase in ai_phrases: if phrase in text: return True return False def is_relevant(doc, doc_topics, filter_helper, doc_number = 0, ai_phrases = ["machine learn", "deep learn", "deep learning", "artificial intelligence", "natural language processing"]): """Returns a boolean for relevance of given document. A document is considered relevant if: it contains any superkeywords(filter_helper.superkeywords), passes the total topic proportion threshold(filter_helper.total_topic_prop_threshold), or passes the keyword proportion threshold(filter_helper.keyword_prop_threshold). Arguments: doc (string): preprocessed document from the corpus doc_topics (iterable of float): proportion of each topic present in the given document filter_helper (FilterHelper): an object containing the necessary information to label the relevance of the given document Returns: (bool): Representing whether or not the given document is relevant according to the information in filter_helper""" has_superkeyword = superkeyword_presence( doc, filter_helper.superkeywords) in_phrases = in_ai_phrases(doc, ai_phrases) passes_total_topic_thresh = total_topic_proportion( doc_topics, filter_helper.relevant_topics, doc_number) > (filter_helper.total_topic_prop_threshold) passes_keyword_thresh = keyword_proportion( doc, filter_helper.keyword_list) > filter_helper.keyword_prop_threshold return has_superkeyword or passes_total_topic_thresh or passes_keyword_thresh or in_phrases def filter_corpus(abstract_column, doc_topics, filter_helper, ai_phrases = ["machine learn", "deep learn", "deep learning", "artificial intelligence", "natural language processing"]): subcorpus_id = [] superkey = 0 topic_thresh = 0 keyword_thresh = 0 phrases = 0 for i, abstract in enumerate(abstract_column): doc = abstract if is_relevant(doc, doc_topics, filter_helper, doc_number = i): if superkeyword_presence(doc, filter_helper.superkeywords): superkey += 1 if total_topic_proportion(doc_topics, filter_helper.relevant_topics, doc_number = i) > (filter_helper.total_topic_prop_threshold): topic_thresh += 1 if keyword_proportion(doc, filter_helper.keyword_list) > filter_helper.keyword_prop_threshold : keyword_thresh += 1 if in_ai_phrases(doc, ai_phrases): phrases += 1 subcorpus_id.append(i) print("Superkeyword presence: ", superkey, "\nTotal Topic Proportion: ", topic_thresh, "\nKeyword Threshold: ", keyword_thresh, "\nPhrase words matched: ", phrases, "\nTotal docs: ", len(subcorpus_id)) return subcorpus_id #start with the core terms from the OECD paper core_terms = ["adaboost","artificial intelligence","artificial neural network","back propagation" ,"back propagation neural network","computational intelligence","computer vision" ,"convolutional neural network","deep belief network","deep convolutional neural network" ,"deep learn","deep neural network","elman network","elman neural network" ,"expert system","fee forward neural network","inference engine","machine intelligence" ,"machine learn","machine translation","machine vision","multilayer neural network" ,"natural language process","perceptron","random forest","rbf neural network","recurrent neural network" ,"self organize map","spike neural network","supervise learn","support vector machine" ,"svm classifier","unsupervised learn","artificial_intelligence","artificial_neural_network","back_propagation" ,"back_propagation_neural_network","computational_intelligence","computer_vision" ,"convolutional_neural_network","deep_belief_network","deep_convolutional_neural_network" ,"deep_learn","deep_neural_network","elman_network","elman_neural_network" ,"expert_system","fee_forward_neural_network","inference_engine","machine_intelligence" ,"machine_learn","machine_translation","machine_vision","multilayer_neural_network" ,"natural_language_process","random_forest","rbf_neural_network","recurrent_neural_network" ,"self_organize_map","spike_neural_network","supervise_learn","support_vector_machine" ,"svm_classifier","unsupervised_learn", "machine_learning"] def relevant_topics(topic_dictionary, keyword_list, threshold = 1): """returns a list of the topics which contain a threshold % of the relevant words in the keyword list""" relevant_topic = [] for key in topic_dictionary: relevant_words = 0 for i in range(len(topic_dictionary[key])): if topic_dictionary[key][i][0] in keyword_list: relevant_words += 1 else: relevant_words += 0 if (relevant_words) >= threshold :#/ len(topic_dictionary[key]) >= threshold : relevant_topic.append(key) return relevant_topic df = pd.read_pickle("../../data/dspg21RnD/smaller-final-dataset.pkl") nsf = df[df["AGENCY"] == "NSF"] # filter where cfda = 47.070 nsf_csci = nsf[nsf["CFDA_CODE"] == "47.070"] df.reset_index(inplace=True) # I don't think I need to do this, but just in case nsf_csci.reset_index(inplace=True) tokens = nsf_csci["final_frqwds_removed"] text = [] # text will contain the processed tokens in string form (1 string per abstract) for abstract in tokens: text.append(" ".join(abstract)) text = pd.Series(text) lda_vectorizer = CountVectorizer(max_df=0.6, min_df=20) lda_dtm = lda_vectorizer.fit_transform(text) num_topics = 100 lda_model_100 = LatentDirichletAllocation(n_components=num_topics, doc_topic_prior = 1/num_topics, topic_word_prior=0.1, n_jobs=39, random_state = 0) doc_top_dist_100 = lda_model_100.fit_transform(lda_dtm) top_term_dist_100 = lda_model_100.components_ nsfcs_dic100 = topic_dictionary(lda_model_100, lda_vectorizer, 50) relevant_topics(nsfcs_dic100, core_terms, 0.04)So, we get 5 topics when we do 1 word out of 50 ahhaha. We only get topic 97 when we use 2 words out of 50. I will look through these topics and add to the topics i picked out myself and decide the relevant topics, then pick out the relative entropy keyword list before making a superkeyword list. "AI" is the 20th term. and there are only 34 times it comes up in this topic? Not gonna include I will keep 27.relevant_topics_HT = [27] relevant_topics_HT.append(87) relevant_topics_HT.append(97)When I ran it on my own, I picked out 19, 52, 54, 76, 79, 86, 97relevant_topics_HT relevant_topics_HT.append(19) relevant_topics_HT.append(52) #I don't know about this, since it is just robot and not the other AI terms. relevant_topics_HT.append(79) relevant_topics_HT.append(86) relevant_topics_HT print_topics(lda_model_100, lda_vectorizer)Topic 0: ('event', 730.781845371416) ('discovery', 583.8164989359399) ('domain', 329.10975423693554) ('analytics', 273.5916752530804) ('framework', 270.4499109174751) ('software', 219.42028034058512) ('scientific', 206.20389200158652) ('workflow', 183.222440789522) ('science', 159.09815332735093) ('source', 155.7388801028597) Topic 1: ('vehicle', 1003.5540341166559) ('transportation', 374.6671616662269) ('traffic', 351.07919632386125) ('driver', 252.09186600333013) ('driving', 198.9768789225774) ('road', 197.41491396313737) ('autonomous_vehicle', 188.85933542912068) ('trajectory', 182.416660865909) ('safety', 171.8632244973694) ('vehicular', 148.5576938678973) Topic 2: ('team', 1133.5323661560801) ('engineering', 327.3253606310934) ('technology', 316.82114344244604) ('workshop', 201.58176970036058) ('report', 196.04222913384802) ('competition', 184.7872157240752) ('individual', 165.7674683226673) ('university', 148.41031049446593) ('disability', 144.2238105266517) ('nsf', 143.824387[...]Ok, so with my judgement plus some that the relevant_topics function picked out, we have 7 topics that should be roughly about AI. Creating the relative entropy keyword list:rel_ent_top200 = rel_ent_key_list(lda_model_100, lda_vectorizer, 200, relevant_topics_HT)Creating the superkeyword list: "To create the super keyword list, we examine an expanded list -- the top 1000 words -- of high-relative-entropy-constribution words from the last step and select those words that are unambiguously related to the concept of interest, i.e. likely to be used when referring to the concept of interest and no other concepts. creating the filter helper to see if we can start trying to filter the corpus to get some sort of sense the abstracts that are about AIai_HT_KL = ['machine_learning', 'artificial_intelligence', 'artificial_intelligence_ai', 'convolutional_neural_network', 'recognition_asr', 'artificial_intelligence_machine_learning'] phrase = ['learning', 'learn', 'processing', 'natural', 'deep', 'intelligence', 'artificial'] phrase = ["machine learn", "deep learn", "deep learning", "artificial intelligence", "natural language processing"] my_filter_helper = FilterHelper(topic_model = lda_model_100, vectorizer = lda_vectorizer, relevant_topics = relevant_topics_HT, superkeywords = ai_HT_KL, keyword_list = rel_ent_top200, total_topic_prop_threshold = 0.25, keyword_prop_threshold = 0.25)16k rows because one for each document. 100 columns because 1 for each topic.#creating a new document-topic-distribution with the full corpus tokens2 = df["final_frqwds_removed"] fullcorpus = [] # text will contain the processed tokens in string form (1 string per abstract) for abstract in tokens2: fullcorpus.append(" ".join(abstract)) fullcorpus = pd.Series(fullcorpus) newdocs = fullcorpus new_doc_term_matrix = lda_vectorizer.transform(newdocs) new_doc_term_dist = lda_model_100.transform(new_doc_term_matrix) pd.DataFrame(new_doc_term_dist) my_subcorpus = filter_corpus(nsf_csci["final_frqwds_removed"], doc_top_dist_100, my_filter_helper) len(my_subcorpus) my_subcorpus[0:10]Kathryn - adapt to full corpusdef nsf_filter_corpus_KL(df, doc_topics, filter_helper, ai_phrases = ["machine learn", "deep learn", "deep learning", "artificial intelligence", "natural language processing"]): subcorpus_id = [] superkey = 0 topic_thresh = 0 keyword_thresh = 0 phrases = 0 for i, abstract in enumerate(df["final_frqwds_removed"]): doc = abstract if is_relevant(doc, doc_topics, filter_helper, doc_number = i): if superkeyword_presence(doc, filter_helper.superkeywords): superkey += 1 if total_topic_proportion(doc_topics, filter_helper.relevant_topics, doc_number = i) > (filter_helper.total_topic_prop_threshold): topic_thresh += 1 if keyword_proportion(doc, filter_helper.keyword_list) > filter_helper.keyword_prop_threshold : keyword_thresh += 1 if in_ai_phrases(doc, ai_phrases): phrases += 1 subcorpus_id.append(i) print("Superkeyword presence: ", superkey, "\nTotal Topic Proportion: ", topic_thresh, "\nKeyword Threshold: ", keyword_thresh, "\nPhrase words matched: ", phrases, "\nTotal docs: ", len(subcorpus_id)) return subcorpus_id nsf_idx = nsf_filter_corpus_KL(nsf_csci, doc_top_dist_100, my_filter_helper) nsf_idx[0:10] nsf_csci.head(10) def full_filter_corpus_KL(df, filter_helper, ai_phrases = ["machine learn", "deep learn", "deep learning", "artificial intelligence", "natural language processing"]): subcorpus_id = [] superkey = 0 topic_thresh = 0 keyword_thresh = 0 phrases = 0 for i, abstract in enumerate(df["final_frqwds_removed"]): doc = abstract # check conditions other than total_topic_proportion has_superkeyword = superkeyword_presence(doc, filter_helper.superkeywords) in_phrases = in_ai_phrases(doc, ai_phrases) passes_keyword_thresh = keyword_proportion(doc, filter_helper.keyword_list) > filter_helper.keyword_prop_threshold if has_superkeyword: superkey += 1 if in_phrases: phrases += 1 if passes_keyword_thresh: keyword_thresh += 1 if(has_superkeyword or in_phrases or passes_keyword_thresh): subcorpus_id.append(i) print("Superkeyword presence: ", superkey, "\nTotal Topic Proportion: ", topic_thresh, "\nKeyword Threshold: ", keyword_thresh, "\nPhrase words matched: ", phrases, "\nTotal docs: ", len(subcorpus_id)) return subcorpus_id full_ids = full_filter_corpus_KL(df, my_filter_helper) full_ids[1:10] # find unique set of ids proj_id_nsf = nsf_csci.loc[nsf_idx, "PROJECT_ID"] proj_id_full = df.loc[full_ids, "PROJECT_ID"] print(proj_id_nsf[0:10]) print(proj_id_full[0:10]) ai_proj_ids = np.concatenate([proj_id_nsf, proj_id_full]) ai_proj_ids = np.unique(ai_proj_ids) len(ai_proj_ids) ai_corpus = df[df["PROJECT_ID"].isin(ai_proj_ids)] ai_corpus.head() len(ai_corpus) ai_corpus["ABSTRACT"].iloc[1000] ai_corpus["is_ai_eads"] = True ai_corpus.head() ai_corpus.to_csv("../../data/dspg21RnD/Eads_AI_abstracts-KL.csv", index = False) my_subcorpus[0:10] # nsf_csci dataframe of NSF # my_subcorpus = list of indices nsf_csci["index"] = range(len(nsf_csci)) subcorpus_df = pd.DataFrame(my_subcorpus) subcorpus_df["is_ai_eads"] = True subcorpus_df = subcorpus_df.rename(columns = {0: "index"}) subcorpus_df ai_test = pd.merge(nsf_csci, subcorpus_df, on="index", how = "right") ai_test ai_test.to_csv("../../data/dspg21RnD/Eads_AI_abstracts.csv", index = False)Yesterday, when I just used the top 100 or so relative entropy, I had 685,677 out of 690,814 as the subcorpus. Today, when picking out just a couple superkeywords, I got This was a pretty complicated method that we had to adapt to our problem, so there are a lot of things we are going to have to work out such as deciding what to do about lemmatization (if we want to fuzzy match), work on the sensitivity of including a corpus, 210907 with a shorter list on the full corpus. 11,219 within NSF CSCI dataset with the shorter list.my_subcorpus[0] nsf_csci["ABSTRACT"].iloc[1] my_subcorpus[1] nsf_csci["ABSTRACT"].iloc[5] my_subcorpus[2] nsf_csci["ABSTRACT"].iloc[6]WeatherPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time from scipy.stats import linregress # Import API key from api_keys import weather_api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180)Generate Cities List# List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(low=-90.000, high=90.000, size=1500) lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) city_name_list = [] cloudiness_list = [] country_list = [] date_list = [] humidity_list = [] lat_list = [] lng_list = [] max_temp_list = [] wind_speed_list = [] index_counter = 0 set_counter = 1Perform API Calls* Perform a weather check on each city using a series of successive API calls.* Include a print log of each city as it'sbeing processed (with the city number and city name).print("Beginning Data Retrieval ") print("-----------------------------") base_url = "http://api.openweathermap.org/data/2.5/weather?" units = "imperial" query_url = f"{base_url}appid={weather_api_key}&units={units}&q=" # For each city name in cities list, do below things... for index, city in enumerate(cities, start = 1): try: response = requests.get(query_url + city).json() city_name_list.append(response["name"]) cloudiness_list.append(response["clouds"]["all"]) country_list.append(response["sys"]["country"]) date_list.append(response["dt"]) humidity_list.append(response["main"]["humidity"]) lat_list.append(response["coord"]["lat"]) lng_list.append(response["coord"]["lon"]) max_temp_list.append(response['main']['temp_max']) wind_speed_list.append(response["wind"]["speed"]) if index_counter > 49: index_counter = 0 set_counter = set_counter + 1 else: index_counter = index_counter + 1 print(f"Processing Record {index_counter} of Set {set_counter} : {city}") except(KeyError, IndexError): print("City not found. Skipping...") print("-----------------------------") print("Data Retrieval Complete") print("-----------------------------")Beginning Data Retrieval ----------------------------- Processing Record 3 of Set 2 : mataura Processing Record 4 of Set 2 : inuvik Processing Record 5 of Set 2 : inirida Processing Record 6 of Set 2 : chokurdakh Processing Record 7 of Set 2 : esperance Processing Record 8 of Set 2 : camabatela Processing Record 9 of Set 2 : albany City not found. Skipping... Processing Record 10 of Set 2 : lagoa Processing Record 11 of Set 2 : port hardy Processing Record 12 of Set 2 : corinto Processing Record 13 of Set 2 : rikitea Processing Record 14 of Set 2 : port blair Processing Record 15 of Set 2 : jamestown Processing Record 16 of Set 2 : sangar City not found. Skipping... Processing Record 17 of Set 2 : lodja Processing Record 18 of Set 2 : cockburn town Processing Record 19 of Set 2 : vaini Processing Record 20 of Set 2 : chumikan Processing Record 21 of Set 2 : cape town Processing Record 22 of Set 2 : westport Processing Record 23 of Set 2 : talnakh Processing Record 24 of Set 2 : port e[...]Convert Raw Data to DataFrame* Export the city data into a .csv.* Display the DataFrame# Create a panda data frame using data retrieved weather_dict = pd.DataFrame({ "City" : city_name_list, "Cloudiness" : cloudiness_list, "Country" : country_list, "Date" : date_list, "Humidity" : humidity_list, "Lat" : lat_list, "Lng" : lng_list, "Max Temp" : max_temp_list, "Wind Speed" : wind_speed_list }) # Display the Data Frame weather_dict.count() # Display the Data Frame weather_dict # Save data to .csv in working directory. weather_dict.to_csv('city_weather_data.csv', index=False)Plotting the Data* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.* Save the plotted figures as .pngs. Latitude vs. Temperature Plot# Create Latitude vs. Temperature Plot scatter plot fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(weather_dict["Lat"], weather_dict["Max Temp"], facecolor = "steelblue", edgecolor = "black") # Set title plt.title("City Latitude vs. Max Temperature (01/17/20)") # Set x axis label plt.xlabel("Laitude") # Set y axis label plt.ylabel("Max Temperature (F)") # Set grid line plt.grid(linestyle='-', linewidth=1, alpha = 0.5) # Save the plotted figure as .pngs plt.savefig("../Images/City Latitude vs Max Temperature.png")Latitude vs. Humidity Plot# Create Latitude vs. Humidity Plot scatter plot fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(weather_dict["Lat"], weather_dict["Humidity"], facecolor = "steelblue", edgecolor = "black") # Set title plt.title("City Latitude vs. Humidity (01/17/20)") # Set x axis label plt.xlabel("Laitude") # Set y axis label plt.ylabel("Humidity (%)") # Set grid line plt.grid(linestyle='-', linewidth=1, alpha = 0.5) # Save the plotted figure as .pngs plt.savefig("../Images/City Latitude vs Humidity.png")Latitude vs. Cloudiness Plot# Create Latitude vs. Cloudiness Plot scatter plot fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(weather_dict["Lat"], weather_dict["Cloudiness"], facecolor = "steelblue", edgecolor = "black") # Set title plt.title("City Latitude vs. Cloudiness (01/17/20)") # Set x axis label plt.xlabel("Laitude") # Set y axis label plt.ylabel("Cloudiness (%)") # Set y axis limit plt.ylim(-5,105) # Set grid line plt.grid(linestyle='-', linewidth=1, alpha = 0.5) # Save the plotted figure as .pngs plt.savefig("../Images/City Latitude vs Cloudiness.png")Latitude vs. Wind Speed Plot# Create Latitude vs. Wind Speed scatter plot fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(weather_dict["Lat"], weather_dict["Wind Speed"], facecolor = "steelblue", edgecolor = "black") plt.title("City Latitude vs. Wind Speed (mph) (01/17/20)") # Set x axis label plt.xlabel("Laitude") # Set y axis label plt.ylabel("Wind Speed (%)") # Set y axis limit plt.ylim(-2,50) # Set grid line plt.grid(linestyle='-', linewidth=1, alpha = 0.5) # Save the plotted figure as .pngs plt.savefig("../Images/City Latitude vs Wind Speed (mph).png")Linear Regressiondef plot_linear_regression(x, y, ax=None, **kwargs): # --- use the axes specified --- ax = ax # --- get linear regression values and store in variables --- slope, intercept, rvalue, pvalue, stderror = linregress(x,y) print(f"Linear Regression Completed...") print(f"The R Squared is: {rvalue**2}") # y = m * x + b regress_values = slope * x + intercept # --- create line equation --- line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) # --- plot the linear model --- ax.plot(x, regress_values, color="firebrick") # --- return the plot and line equation created --- return ax, line_eq northern_hemisphere = weather_dict.loc[weather_dict["Lat"] >= 0, :].copy().reset_index(drop=True) southern_hemisphere = weather_dict.loc[weather_dict["Lat"] < 0, :].copy().reset_index(drop=True) # --- extract relevant fields from both data frames for plotting --- north_latitudes = northern_hemisphere["Lat"] north_temperatures = northern_hemisphere["Max Temp"] north_humidity = northern_hemisphere["Humidity"] north_cloudiness = northern_hemisphere["Cloudiness"] north_windspeed = northern_hemisphere["Wind Speed"] south_latitudes = southern_hemisphere["Lat"] south_temperatures = southern_hemisphere["Max Temp"] south_humidity = southern_hemisphere["Humidity"] south_cloudiness = southern_hemisphere["Cloudiness"] south_windspeed = southern_hemisphere["Wind Speed"]Northern Hemisphere - Max Temp vs. Latitude Linear Regression# --- create scatter plot --- fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(north_latitudes, north_temperatures, marker="o", facecolors="steelblue", edgecolors="black") # --- call linear regression plot function to retrieve plot and line equation (for annotation) --- ax, line_eq = plot_linear_regression(north_latitudes, north_temperatures, ax) # --- add labels, title, limits & grid --- plt.title("City Latitude vs. Max Temperature in the Northern Hemisphere (19/06/20)", fontsize=14) plt.ylabel("Max Temperature (F)", fontsize=12) plt.xlabel("Latitude", fontsize=12) plt.grid() # --- annotate the linear regression line equation on the plot --- plt.annotate(line_eq,(5,45),fontsize=16,color="firebrick") # --- display and save the plot --- plt.savefig("../Images/northern-latitude-vs-temp-linreg.png") plt.show()Linear Regression Completed... The R Squared is: 0.7094456114610278Southern Hemisphere - Max Temp vs. Latitude Linear Regression# --- create scatter plot --- fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(south_latitudes, south_temperatures, marker="o", facecolors="pink", edgecolors="pink") # --- call linear regression plot function to retrieve plot and line equation (for annotation) --- ax, line_eq = plot_linear_regression(south_latitudes, south_temperatures, ax) # --- add labels, title, limits & grid --- plt.title("City Latitude vs. Max Temperature in the Southern Hemisphere (19/06/20)", fontsize=14) plt.ylabel("Max Temperature (F)", fontsize=12) plt.xlabel("Latitude", fontsize=12) plt.grid() # --- annotate the linear regression line equation on the plot --- plt.annotate(line_eq,(-25, 35),fontsize=16,color="firebrick") # --- display and save the plot --- plt.savefig("../Images/southern-latitude-vs-temp-linreg.png") plt.show()Linear Regression Completed... The R Squared is: 0.4810116815575026Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression# --- create scatter plot --- fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(north_latitudes, north_humidity, marker="o", facecolors="steelblue", edgecolors="black") # --- call linear regression plot function to retrieve plot and line equation (for annotation) --- ax, line_eq = plot_linear_regression(north_latitudes, north_humidity, ax) # --- add labels, title, limits & grid --- plt.title("City Latitude vs. Humidity in the Northern Hemisphere (19/06/20)", fontsize=14) plt.ylabel("Humidity (%)", fontsize=12) plt.xlabel("Latitude", fontsize=12) plt.grid() # --- annotate the linear regression line equation on the plot --- plt.annotate(line_eq,(50,10),fontsize=16,color="firebrick") # --- display and save the plot --- plt.savefig("../Images/northern-latitude-vs-humidity-linreg.png") plt.show()Linear Regression Completed... The R Squared is: 0.0017719383913902638Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression# --- create scatter plot --- fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(south_latitudes, south_humidity, marker="o", facecolors="pink", edgecolors="pink") # --- call linear regression plot function to retrieve plot and line equation (for annotation) --- ax, line_eq = plot_linear_regression(south_latitudes, south_humidity, ax) # --- add labels, title, limits & grid --- plt.title("City Latitude vs. Humidity in the Southern Hemisphere (19/06/20)", fontsize=14) plt.ylabel("Humidity (%)", fontsize=12) plt.xlabel("Latitude", fontsize=12) plt.grid() # --- annotate the linear regression line equation on the plot --- plt.annotate(line_eq,(-55, 25),fontsize=16,color="firebrick") # --- display and save the plot --- plt.savefig("../Images/southern-latitude-vs-humidity-linreg.png") plt.show()Linear Regression Completed... The R Squared is: 0.014903292985022333Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression# --- create scatter plot --- fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(north_latitudes, north_cloudiness, marker="o", facecolors="steelblue", edgecolors="black") # --- call linear regression plot function to retrieve plot and line equation (for annotation) --- ax, line_eq = plot_linear_regression(north_latitudes, north_cloudiness, ax) # --- add labels, title, limits & grid --- plt.title("City Latitude vs. Cloudiness in the Northern Hemisphere (19/06/20)", fontsize=14) plt.ylabel("Cloudiness (%)", fontsize=12) plt.xlabel("Latitude", fontsize=12) plt.grid() # --- annotate the linear regression line equation on the plot --- plt.annotate(line_eq,(1,10),fontsize=16,color="firebrick") # --- display and save the plot --- plt.savefig("../Images/northern-latitude-vs-cloudiness-linreg.png") plt.show()Linear Regression Completed... The R Squared is: 8.244131908131963e-05Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression# --- create scatter plot --- fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(south_latitudes, south_cloudiness, marker="o", facecolors="pink", edgecolors="pink") # --- call linear regression plot function to retrieve plot and line equation (for annotation) --- ax, line_eq = plot_linear_regression(south_latitudes, south_cloudiness, ax) # --- add labels, title, limits & grid --- plt.title("City Latitude vs. Cloudiness in the Southern Hemisphere (19/06/20)", fontsize=14) plt.ylabel("Humidity (%)", fontsize=12) plt.xlabel("Latitude", fontsize=12) plt.grid() # --- annotate the linear regression line equation on the plot --- plt.annotate(line_eq,(-55, 25),fontsize=16,color="firebrick") # --- display and save the plot --- plt.savefig("../Images/southern-latitude-vs-cloudiness-linreg.png") plt.show()Linear Regression Completed... The R Squared is: 0.0002739824062365588Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression# --- create scatter plot --- fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(north_latitudes, north_windspeed, marker="o", facecolors="steelblue", edgecolors="black") # --- call linear regression plot function to retrieve plot and line equation (for annotation) --- ax, line_eq = plot_linear_regression(north_latitudes, north_windspeed, ax) # --- add labels, title, limits & grid --- plt.title("City Latitude vs. Wind Speed in the Northern Hemisphere (19/06/20)", fontsize=14) plt.ylabel("Wind Speed (mph)", fontsize=12) plt.xlabel("Latitude", fontsize=12) plt.grid() # --- annotate the linear regression line equation on the plot --- plt.annotate(line_eq,(5,26),fontsize=16,color="firebrick") # --- display and save the plot --- plt.savefig("../Images/northern-latitude-vs-windspeed-linreg.png") plt.show()Linear Regression Completed... The R Squared is: 0.028772963607048056Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression# --- create scatter plot --- fig, ax = plt.subplots(figsize = (8,6)) plt.scatter(south_latitudes, south_windspeed, marker="o", facecolors="pink", edgecolors="pink") # --- call linear regression plot function to retrieve plot and line equation (for annotation) --- ax, line_eq = plot_linear_regression(south_latitudes, south_windspeed, ax) # --- add labels, title, limits & grid --- plt.title("City Latitude vs. Wind Speed in the Southern Hemisphere (19/06/20)", fontsize=14) plt.ylabel("Wind Speed (mph)", fontsize=12) plt.xlabel("Latitude", fontsize=12) plt.grid() # --- annotate the linear regression line equation on the plot --- plt.annotate(line_eq,(-55, 20),fontsize=16,color="firebrick") # --- display and save the plot --- plt.savefig("../Images/southern-latitude-vs-windspeed-linreg.png") plt.show()Linear Regression Completed... The R Squared is: 0.020189123750474595***BIKE SHARE ANALYSIS******PROJECT 1:******SUBMITTED BY ******IMPORT PYTHON PACKAGES***import pandas as pd # pandas for data manipulation and processing import numpy as np %matplotlib inline import seaborn as sns # seaborn for visualizations***DATA EXPLORATION AND INSIGHTS*** 1. Any particular bikes which were used a lot?> My definition of particular bikes which used a lot is a follows.- This block helps to identify the top 5 specific bikes which were used most often with bike_number and with maximum count of no of trips count which these bikes went far.- By having the list of specific bike_number which done maximum trips it helps us to identify the bike condition whether it operates in a good manner or do we need to replace the most rided bike with new one or do we need perform the complete bike health checkup. In order to reduce the risks of bike failure in a trip. - So by getting the list of most used bikes it helps us to replace the most used bike with new bike or bike health service can be done for most used bikes which will be helpful in reducing the accident or Bike Failure.%%bigquery --project fresh-electron-305718 particular_bikes SELECT bike_number,Count(*) AS trip_count FROM `bigquery-public-data.san_francisco.bikeshare_trips` GROUP BY bike_number HAVING bike_number is NOT Null ORDER BY trip_count DESC LIMIT 5 particular_bikes.filter(["bike_number", "trip_count"]).head() ax = sns.barplot(particular_bikes["bike_number"],particular_bikes["trip_count"],palette = 'hls',color = 'tab:purple') ax.set(xlabel = "Bike Number", ylabel = "Trips Count", title = "Top 5 Particular Bikes which used the Most")/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarning2. Did the bike_number = "389" frequently start a ride from a particular station? The below analysis helps us to find the frequent start ride for a specific bike number in a particular locations is as follows.- There might be a case you would like to find a specific bike which frequently starts in a specific location.- There might be some people who regularly like to ride the same vehicle in the same station. So this analysis helps us to identify the frequent start ride from a particular location for a specific Bike.%%bigquery --project fresh-electron-305718 particular_location SELECT start_station_id, SUM(CASE WHEN bike_number = 389 THEN 1 END ) AS bike_number_389, FROM `bigquery-public-data.san_francisco.bikeshare_trips` where bike_number = 389 GROUP BY start_station_id ORDER BY bike_number_389 DESC LIMIT 5 particular_location.filter(["start_station_id", "bike_number_389"]).head() ax = sns.barplot(particular_location["bike_number_389"],particular_location["start_station_id"],palette = 'hls',color = 'tab:purple') ax.set(xlabel = "Bike Number 389 Count used in Each Station", ylabel = "Start Station ID", title = "Frequently Used Bikes in a Particular Location")/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarning3. What are some popular routes? >My definition for some popular routes is as follows.- By listing the popular routes we can able to increase the Customers by providing the bikes in those specific stations with an discount offers.- This analysis will give us a best profit.%%bigquery --project fresh-electron-305718 popular_routes SELECT start_station_id, end_station_id, COUNT(*) AS Trips_on_route FROM `bigquery-public-data.san_francisco.bikeshare_trips` WHERE (start_station_id != end_station_id) GROUP BY start_station_id, end_station_id ORDER BY Trips_on_route DESC LIMIT 5 popular_routes.filter(["start_station_id", "end_station_id","Trips_on_route"]).head() ax = sns.barplot(popular_routes["start_station_id"],popular_routes["Trips_on_route"],palette = 'hls',color = 'tab:purple') ax.set(xlabel = "Start Station ID", ylabel = "Total Trips Count on Specifc Station", title = "Most Popular Routes")/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarning4. Filter out the bike trips which are 60 minutes or more? > The below analysis helps us to find out the bike trips which are 60 Minutes or More.- It helps us to find the subscriber type (i.e., Customer or Subscriber) who is riding the vehicle for 60 minutes or more than 60 minutes.- Here we can able to analyse whether already subscribed customer is driving more than 60 minutes, or new customers driving the vechicle for more than 60 minutes. Here new customer wins.- From the below analysis we can able to understand new customers are riding the vehicle for 60 mintues or more than 60 minutes.%%bigquery --project fresh-electron-305718 bike_trips SELECT subscriber_type, start_date, duration_sec FROM `bigquery-public-data.san_francisco.bikeshare_trips` WHERE duration_sec >= 3600 LIMIT 5 bike_trips.filter(["subscriber_type", "start_date","duration_sec"]).head() ax = sns.barplot(bike_trips["duration_sec"],bike_trips["subscriber_type"],palette = 'hls',color = 'tab:purple') ax.set(xlabel = "Duration in Sec", ylabel = "", title = "Bike Trips which are 60 Mins or More than 60 Mins")/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarning5. Do unregistered users take longer or shorter trips? > The below analysis helps us to find does unregistered users take longer or shorter trips.- By performing the average duration for the subscriber type, we can able to understand that unregistered users (i.e., Customer) is taking longer trips than subscriber.%%bigquery --project fresh-electron-305718 unregistered_users SELECT subscriber_type, AVG(duration_sec)/60 AS Average_Duration FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_trips` GROUP BY Subscriber_type LIMIT 5 unregistered_users.filter(["subscriber_type", "Average_Duration"]).head() ax = sns.barplot(unregistered_users["Average_Duration"],unregistered_users["subscriber_type"],palette = 'hls',color = 'tab:purple') ax.set(xlabel = "AVG Duration in Sec", ylabel = "", title = "Do unregistered users take longer or shorter trips")/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarningOffline Plotting TutorialThe new dataset comes with a tool for offline (i.e. not live as the data are coming in) plotting. This notebook explains how to use it and what it is capable of plotting.The tool in question is the function `plot_by_id`.%matplotlib notebook import numpy as np import qcodes as qc from typing import List, Dict, Tuple, Any import matplotlib.pyplot as plt import qcodes as qc from qcodes import ParamSpec, new_data_set, new_experiment from qcodes.dataset.plotting import plot_by_id from qcodes.dataset.database import initialise_databaseFirst we make an experimental run, so that we have something to plot.# if you just deleted your database file, you'll need to init a new one initialise_database() new_experiment('test_plot_by_id', 'nosample') # Make a handful of parameters to be used in the examples x = ParamSpec('x', 'numeric', label='Voltage', unit='V') t = ParamSpec('t', 'numeric', label='Time', unit='s') y = ParamSpec('y', 'numeric', label='Voltage', unit='V', depends_on=[x]) y2 = ParamSpec('y2', 'numeric', label='Current', unit='A', depends_on=[x]) z = ParamSpec('z', 'numeric', label='Majorana number', unit='Anyon', depends_on=[x, t])A single, simple 1D sweepdata_set = new_data_set('1D-sweep') data_set.add_parameter(x) data_set.add_parameter(y) %%time xvals = np.linspace(-3.4, 4.2, 250) for xnum in xvals: noise = np.random.randn()*0.1 # multiplicative noise yeah yeah data_set.add_result({'x': xnum, 'y': 2*(xnum+noise)**3 - 5*(xnum+noise)**2}) data_set.mark_complete()Wall time: 6.63 sNow let us plot that run. The function `plot_by_id` takes the `run_id` of the run to plot as a positional argument. Furthermore, the user may specify the matplotlib axis object (or list of axis objects) to plot on. If no axes are specified, the function creates new axis object(s). The function returns a tuple of a list of the axes and a list of the colorbar axes (just `None`s if there are no colorbars).axes, cbaxes = plot_by_id(data_set.run_id)Using the returned axis, we can e.g. change the plot linewidth and color. We refer to the matplotlib documentation for details on matplotlib plot customization.my_ax = axes[0] line = my_ax.lines[0] line.set_color('#223344') line.set_linewidth(3)Two interleaved 1D sweepsNow we make a run where two parameters are measured as a function of the same parameter.data_set = new_data_set('interleaved-1Ds') data_set.add_parameter(x) data_set.add_parameter(y) data_set.add_parameter(y2) xvals = np.linspace(-5, 5, 250) for xnum in xvals: data_set.add_result({'x': xnum, 'y': xnum**2}) data_set.add_result({'x': xnum, 'y2': -xnum**2}) data_set.mark_complete()In such a situation, `plot_by_id` by default creates a new axis for **each** dependent parameter. Sometimes this is not desirable; we'd rather have both plots on the same axis. In such a case, we might pass the same axis twice to `plot_by_id`.axes, cbaxes = plot_by_id(data_set.run_id)Let's do that nowfig, ax = plt.subplots(1) axes, cbaxes = plot_by_id(data_set.run_id, axes=[ax, ax])Regular 2D rectangular sweep scanFor 2D plots, a colorbar is usually present. As mentioned above, `plot_by_id` returns this.data_set = new_data_set('regular-2D-scan') data_set.add_parameter(x) data_set.add_parameter(t) data_set.add_parameter(z) xvals = np.linspace(-4, 5, 50) tvals = np.linspace(-500, 1500, 25) for xv in xvals: for tv in tvals: # just some arbitrary semi good looking function zv = np.sin(2*np.pi*xv)*np.cos(2*np.pi*0.001*tv) + 0.001*tv data_set.add_result({'x': xv, 't': tv, 'z': zv}) data_set.mark_complete() axes, colorbars = plot_by_id(data_set.run_id)A somewhat normal situation is that the colorbar was somehow mislabelled. Using the returned colorbar, the label can be overwritten.colorbar = colorbars[0] colorbar.set_label('Correct science label')Warped 2D rectangular sweep scanA nice feature of the new dataset is that the grid may be warped; it makes no difference.Here we warp the x axis of the previous scan to increase the resolution in the right half plane.data_set = new_data_set('warped-2D-scan') data_set.add_parameter(x) data_set.add_parameter(t) data_set.add_parameter(z) xvals = np.linspace(-4, 5, 50) + np.cos(2/9*np.pi*xvals+np.pi/4) tvals = np.linspace(-500, 1500, 25) for xv in xvals: for tv in tvals: # just some arbitrary semi good looking function zv = np.sin(2*np.pi*xv)*np.cos(2*np.pi*0.001*tv) + 0.001*tv data_set.add_result({'x': xv, 't': tv, 'z': zv}) data_set.mark_complete() axes, cbaxes = plot_by_id(data_set.run_id)Interrupted 2D scans (a hole in the cheese)In case a sweep in interrupted, the entire grid will not be filled out. This is also supported,in fact, any single rectangular hole is alloweddata_set = new_data_set('warped-with-hole-2D-scan') data_set.add_parameter(x) data_set.add_parameter(t) data_set.add_parameter(z) xvals = np.linspace(-4, 5, 50) + np.cos(2/9*np.pi*xvals+np.pi/4) tvals = np.linspace(-500, 1500, 25) # define two small forbidden range functions def no_x(xv): if xv > 0 and xv < 3: return True else: return False def no_t(tv): if tv > 0 and tv < 450: return True else: return False for xv in xvals: for tv in tvals: if no_x(xv) and no_t(tv): continue else: # just some arbitrary semi good looking function zv = np.sin(2*np.pi*xv)*np.cos(2*np.pi*0.001*tv) + 0.001*tv data_set.add_result({'x': xv, 't': tv, 'z': zv}) data_set.mark_complete() axes, colorbars = plot_by_id(data_set.run_id)Fancy plotting As a final example, let us combine several plots in one window.We first make a little grid of axes.fig, figaxes = plt.subplots(2, 2)Next, we make some runs (shamelessly copy-pasting from above).# First run data_set = new_data_set('1D-sweep') data_set.add_parameter(x) data_set.add_parameter(y) xvals = np.linspace(-3.4, 4.2, 250) for xnum in xvals: noise = np.random.randn()*0.1 # multiplicative noise yeah yeah data_set.add_result({'x': xnum, 'y': 2*(xnum+noise)**3 - 5*(xnum+noise)**2}) data_set.mark_complete() rid1 = data_set.run_id # Second run data_set = new_data_set('2D-sweep') data_set.add_parameter(x) data_set.add_parameter(t) data_set.add_parameter(z) xvals = np.linspace(-4, 5, 50) tvals = np.linspace(-500, 1500, 25) for xv in xvals: for tv in tvals: # just some arbitrary semi good looking function zv = np.sin(2*np.pi*xv)*np.cos(2*np.pi*0.001*tv) + 0.001*tv data_set.add_result({'x': xv, 't': tv, 'z': zv}) data_set.mark_complete() rid2 = data_set.run_idAnd then we put them just where we please.axes, colorbars = plot_by_id(rid1, figaxes[0, 0]) axes, colorbars = plot_by_id(rid2, figaxes[1, 1], colorbars)Note that if we want to replot on an axis with a colorbar we probably also want to reuse the colorbaraxes, colorbars = plot_by_id(rid2, figaxes[1, 1], colorbars) fig.tight_layout()Linetracker ExampleThis example shows how to use the [Linetracker](http://www.dfrobot.com.cn/goods-784.html) on the board. The Linetraker has 2 models: digital and analog model.For this notebook, a PYNQ Arduino is required, and the pins of linetracker should be set in the A0-A5.# Make sure the base overlay is loaded from pynq.overlays.base import BaseOverlay base = BaseOverlay("base.bit")1. Instantiate linetracker Before use the linetrackers(at most 2), you should set the pins for themThis notebook is also for testing the capacity of linetrackerLT_PINS = ["CHANNEL_A0","CHANNEL_A1","CHANNEL_A2", "CHANNEL_A3","CHANNEL_A4","CHANNEL_A5"] from pynq.lib.arduino import LT_sen # Instantiate linetracker on Arduino linetrack = LT_sen(base.ARDUINO,"CHANNEL_A3","CHANNEL_A4") #default pins is left:CHANNEL_A3, right:CHANNEL_A4 #if you set the pins as the default configuration #linetrack = LT_sen(base.ARDUINO) such a format is legal2. Test the capacity of linetrackerrun the code below, linetrackers will print the voltage level every secondfrom time import sleep while (1): print(linetrack.read_lt_data()) sleep(1)[223, 340, 0.01, 0.02] [229, 351, 0.01, 0.02] [1278, 346, 0.07, 0.02] [65535, 342, 3.33, 0.02] [881, 344, 0.04, 0.02] [221, 344, 0.01, 0.02] [65535, 360, 3.33, 0.02] [65535, 341, 3.33, 0.02] [65535, 354, 3.33, 0.02] [65535, 346, 3.33, 0.02] [65535, 361, 3.33, 0.02] [65535, 979, 3.33, 0.05] [65535, 65535, 3.33, 3.33] [270, 329, 0.01, 0.02] [263, 379, 0.01, 0.02] [1028, 65535, 0.05, 3.33] [324, 65535, 0.02, 3.33] [265, 333, 0.01, 0.02] [261, 340, 0.01, 0.02] [254, 335, 0.01, 0.02] [264, 337, 0.01, 0.02] [262, 342, 0.01, 0.02] [272, 341, 0.01, 0.02] [266, 339, 0.01, 0.02] [257, 341, 0.01, 0.02] [256, 329, 0.01, 0.02] [235, 351, 0.01, 0.02] [231, 344, 0.01, 0.02] [230, 338, 0.01, 0.02] [226, 338, 0.01, 0.02] [228, 340, 0.01, 0.02]df3[:5]newdf = pd.concat([df3,labels], axis=1) rnewdf = newdf.rename(index=str, columns={"0": "label"}) rnewdf[:5] from sklearn.utils import shuffle rnewdf = shuffle(newdf) rnewdf[:10] rnewdf=rnewdf.fillna(0) # Separate the dataset intot train data and test data # newdf1 = np.random.rand(len(rnewdf)) < 0.8 train = rnewdf[newdf1] test = rnewdf[~newdf1] train[250:260] trainfeatures = train.iloc[:, :-1] trainlabel = train.iloc[:, -1:] testfeatures = test.iloc[:, :-1] testlabel = test.iloc[:, -1:] from keras.utils import np_utils from sklearn.preprocessing import LabelEncoder X_train = np.array(trainfeatures) y_train = np.array(trainlabel) X_test = np.array(testfeatures) y_test = np.array(testlabel) lb = LabelEncoder() y_train = np_utils.to_categorical(lb.fit_transform(y_train)) y_test = np_utils.to_categorical(lb.fit_transform(y_test)) y_train X_train.shape # Changing dimension for CNN model # x_traincnn =np.expand_dims(X_train, axis=2) x_testcnn =np.expand_dims(X_test, axis=2) model = Sequential() model.add(Conv1D(256, 5,padding='same',input_shape=(216,1))) model.add(Activation('relu')) model.add(Conv1D(128, 5,padding='same')) model.add(Activation('relu')) model.add(Dropout(0.1)) model.add(MaxPooling1D(pool_size=(10))) # model.add(MaxPooling1D(pool_size=(8)))# model.add(Conv1D(128, 5,padding='same',)) model.add(Activation('relu')) #model.add(Conv1D(128, 5,padding='same',)) #model.add(Activation('relu')) #model.add(Conv1D(128, 5,padding='same',)) #model.add(Activation('relu')) #model.add(Dropout(0.2)) model.add(Conv1D(128, 5,padding='same',)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(10)) model.add(Activation('softmax')) opt = keras.optimizers.rmsprop(lr=0.00001, decay=1e-6) model.summary() model.compile(loss='categorical_crossentropy', optimizer=opt,metrics=['accuracy']) # train the CNN model "you can reduce the (epochs)" cnnhistory=model.fit(x_traincnn, y_train, batch_size=16, epochs=300, validation_data=(x_testcnn, y_test)) plt.plot(cnnhistory.history['loss']) plt.plot(cnnhistory.history['val_loss']) plt.title('Model loss') plt.ylabel('loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper right') plt.show() # Sigmoid plt.plot(cnnhistory.history['acc']) plt.plot(cnnhistory.history['val_acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # Saving the model# model_name = 'Emotion_Voice_Detection_Model.h5' save_dir = os.path.join(os.getcwd(), 'saved_models') # Save model and weights if not os.path.isdir(save_dir): os.makedirs(save_dir) model_path = os.path.join(save_dir, model_name) model.save(model_path) print('Saved trained model at %s ' % model_path) import json model_json = model.to_json() with open("model.json", "w") as json_file: json_file.write(model_json) # Loading the model from the saved loction # # loading json and creating model # from keras.models import model_from_json json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("saved_models/Emotion_Voice_Detection_Model.h5") print("Loaded model from disk") # evaluate loaded model on test data loaded_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) score = loaded_model.evaluate(x_testcnn, y_test, verbose=0) print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100)) # Predicting emotions on the test data # preds = loaded_model.predict(x_testcnn, batch_size=32, verbose=1) preds preds1=preds.argmax(axis=1) preds1 abc = preds1.astype(int).flatten() predictions = (lb.inverse_transform((abc))) preddf = pd.DataFrame({'predictedvalues': predictions}) preddf[:10] actual=y_test.argmax(axis=1) abc123 = actual.astype(int).flatten() actualvalues = (lb.inverse_transform((abc123))) actualdf = pd.DataFrame({'actualvalues': actualvalues}) actualdf[:10] finaldf = actualdf.join(preddf) # The % of the Actual emotions v/s the Predicted emotions # finaldf[70:100] finaldf.groupby('actualvalues').count() finaldf.groupby('predictedvalues').count() # real time Input speech # # Change the file name after RawData/ .wav data, sampling_rate = librosa.load('SAVEE Dataset/01-01-01-01 (25).wav') %pylab inline import os import pandas as pd import librosa import glob plt.figure(figsize=(15, 5)) librosa.display.waveplot(data, sr=sampling_rate) sr,x = scipy.io.wavfile.read('SAVEE Dataset/01-01-01-01 (25).wav') ## Parameters: 10ms step, 30ms window nstep = int(sr * 0.01) nwin = int(sr * 0.03) nfft = nwin window = np.hamming(nwin) ## will take windows x[n1:n2]. generate ## and loop over n2 such that all frames ## fit within the waveform nn = range(nwin, len(x), nstep) X = np.zeros( (len(nn), nfft//2) ) for i,n in enumerate(nn): xseg = x[n-nwin:n] z = np.fft.fft(window * xseg, nfft) X[i,:] = np.log(np.abs(z[:nfft//2])) plt.imshow(X.T, interpolation='nearest',origin='lower',aspect='auto') plt.show() #livedf= pd.DataFrame(columns=['feature']) X, sample_rate = librosa.load('SAVEE Dataset/01-01-01-01 (25).wav', res_type='kaiser_fast',duration=2.5,sr=22050*2,offset=0.5) sample_rate = np.array(sample_rate) mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13),axis=0) featurelive = mfccs livedf2 = featurelive from oct2py import octave as oct oct.eval("'SAVEE Dataset/01-01-01-01 (25).wav'") oct.eval("myscript") oct.eval("'SAVEE Dataset/01-01-01-01 (25).wav'") !pip install octave-kernel livedf2= pd.DataFrame(data=livedf2) livedf2 = livedf2.stack().to_frame().T livedf2 twodim= np.expand_dims(livedf2, axis=2) livepreds = loaded_model.predict(twodim, batch_size=32, verbose=1) livepreds livepreds1=livepreds.argmax(axis=1) liveabc = livepreds1.astype(int).flatten() livepredictions = (lb.inverse_transform((liveabc))) livepredictions's Game of LifeThe cellular automata game *Life*, invented by the mathematician [](https://en.wikipedia.org/wiki/John_Horton_Conway), makes a fun programming exercise. Let's review the [rules](http://en.wikipedia.org/wiki/Conway%27s_Game_of_Life):The *world* of the Game of Life is an infinite two-dimensional orthogonal grid of *cells*, each of which is in one of two possible states, *live* or *empty*. Each cell has eight *neighbors*, the cells that are horizontally, vertically, or diagonally adjacent. At each step in time, the following rules are applied to create the next *generation*:+ Any live cell with two or three live neighbors lives on to the next generation.+ Any empty cell with exactly three live neighbors becomes a live cell in the next generation.+ All other cells are empty in the next generation.For example, in the diagram below, "`@`" cells are live. In the transition from Generation 0 to 1, the cell marked "`,`" becomes empty (dies off) because it has zero live neighbors. In the next transition, a fourth `@` becomes live, because it has 3 live neighbors. All other cells stay the same. . . . . . . . . . . . . . . . . . . @ . . . . , . . . . . . . @ . . . . @ . . . . @ @ . . . @ @ . . . @ @ . . . @ @ . . . . . . . . . . . . . . . . . Gen 0 Gen 1 Gen 2 The world continues to evolve by these rules for as long as you care to observe. Developing a Life ProgramTo create a program to play Life, start with the vocabulary of concepts:+ **World**+ **Cell**+ **Live/Empty**+ **Neighbors**+ **Next Generation**+ **Display**+ **Live Neighbor Counts**and consider how to implement them:+ **World**: The state of the world must represent which cells are empty and which are live. The tricky part is that the number of cells is infinite, and we can't store an infinite array in a finite computer. I can think of three ways to deal with this problem: 1. **Change the rules**; make the world finite instead of infinite. (Cells at the edge of the world have fewer neighbors, or perhaps they wrap around to the other side of the world.) 2. Use a **finite rectangular window** that covers all the live cells in the infinite grid. As the world evolves, this window may have to grow or shift.Example: `world = [[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 1, 1, 0, 0], [0, 0, 0, 0, 0]]` 3. Represent a world as a **set of live cells.** This set will grow and shrink in size from one generation to the next, but we don't have to worry about overflowing the edges of an array.Example: `world = {(3, 1), (1, 2), (1, 3), (2, 3)}` I will go with this choice.+ **Cell**: Each cell will be represented as an (x, y) pair of integer coordinates. Example: `cell = (1, 2)`.+ **Live/Empty**: A cell is live if it is a member of the set of live cells. Example: "`cell in world`" is True, given the definition of `cell` and `world` above, so `cell` is live.+ **Neighbors**: The cell `(x, y)` has eight neighbors, formed by adding or subtracting 1 from `x` or `y` or both. We can definea function `neighbors(cell)` to return this set.Example: `neighbors((8, 8)) == [(7, 7), (8, 7), (9, 7), (7, 8), (9, 8), (7, 9), (8, 9), (9, 9)]`+ **Display**: We will need some way to display the state of the world. Let's defer that for now.+ **Next Generation**: We can define a function, `next_generation(world)`, that takes a world as input and returnsa new world with the new set of live cells according to the rules.Example: `next_generation({(3, 1), (1, 2), (1, 3), (2, 3)}) == {(1, 2), (1, 3), (2, 3)}`+ **Live Neighbor Counts**: I need to know how many live neighbors each cell has. A good way to represent this is a dict of `{(x, y): count}`. But which cells need to be the keys of this dict? We can start with the live cells, and also add any cells neighboring the live cells. An easy way to generate this dict is to create a `Counter` and pass it every neighbor of every live cell. This may feel like we're doing the counting "backwards." Instead of asking "for each cell, how many live neighbors does it have?" we are saying "for each live cell, increment the count of each of its neighbors." The two amount to the same thing because *neighbor* is symmetric—if P is a neighbor of Q, then Q is a neighbor of P. Below we see the neighbor counts for each of the three generations; in each generation the top diagram gives the neighbor counts for the empty cells, and the bottom diagram gives the counts for the live cells. This is just to make the diagram easier to read; in the code these are combined into one `Counter`. Here are the counts: . . 1 1 1 . . . . . . . . . . 1 1 2 @ 1 1 1 1 , . 1 2 2 1 . 2 @ 4 2 1 2 @ 3 1 . 2 @ @ 2 . 2 @ @ 1 . 2 @ @ 1 . 2 @ @ 2 . 1 2 2 1 . 1 2 2 1 . 1 2 2 1 . Gen 0 Gen 1 Gen 2 . . . . . . . . . . . . . . . . . . 0 . . . . , . . . . . . . 2 . . . . 2 . . . . 3 3 . . . 2 2 . . . 2 2 . . . 3 3 . . . . . . . . . . . . . . . . . Here is the implementation. Note that in `next_generation` the `neighbor_counts` is used two ways so I decided to use two different names for clarity: `possible_cells` is used to iterate over all cells that might be live, and `counts` is used to check if a cell has the right number of neighbors.from collections import Counter def next_generation(world): "The set of live cells in the next generation." possible_cells = counts = neighbor_counts(world) return {cell for cell in possible_cells if (counts[cell] == 3) or (counts[cell] == 2 and cell in world)} def neighbor_counts(world): "A {cell: int} counter of the number of live neighbors for each cell that has neighbors." return Counter(nb for cell in world for nb in neighbors(cell)) def neighbors(cell): "All 8 adjacent neighbors of cell." (x, y) = cell return [(x-1, y-1), (x, y-1), (x+1, y-1), (x-1, y), (x+1, y), (x-1, y+1), (x, y+1), (x+1, y+1)]We can see how this works:world = {(3, 1), (1, 2), (1, 3), (2, 3)} next_generation(world) next_generation(next_generation(world)) neighbors((2, 4)) neighbor_counts(world)`run` is a function to play n generations of Life:def run(world, n): "Run the world for n generations. No display; just return the nth generation." for g in range(n): world = next_generation(world) return world run(world, 100)DisplayNow let's see how to display worlds. We'll consider a rectangular window on the infinite plane, specified as ranges of `Xs` and `Ys` coordinates. The function `picture` turns a world into a string showing what the world looks like:import time from IPython.display import clear_output, display_html LIVE = '@' EMPTY = '.' PAD = ' ' def picture(world, Xs, Ys): "Return a picture: a grid of characters representing the cells in this window." def row(y): return PAD.join(LIVE if (x, y) in world else EMPTY for x in Xs) return '\n'.join(row(y) for y in Ys) print(picture(world, range(5), range(5))). . . . . . . . @ . . @ . . . . @ @ . . . . . . .The function `display_run` runs the world for `n` steps, displaying the picture at each step:def display_run(world, n=10, Xs=range(10), Ys=range(10), pause=0.2): "Step and display the world for the given number of generations." for g in range(n + 1): html = ('Generation {}, Population {}\n{}' .format(g, len(world), pre(picture(world, Xs, Ys)))) clear_output() display_html(html, raw=True) time.sleep(pause) world = next_generation(world) def pre(text): return '
' + text + '
' display_run(world, 5, range(5), range(5))Interesting WorldsNow let's take a look at some initial worlds that *Life* enthusiasts have discovered. It would be tedious to enumerate these with an explicit set of `(x, y)` coordinates, so we will define the function `shape` that takes a picture as input and returns a world; `shape` and `picture` are more-or-less inverses.def shape(picture, offset=(3, 3)): "Convert a graphical picture (e.g. '@ @ .\n. @ @') into a world (set of cells)." cells = {(x, y) for (y, row) in enumerate(picture.splitlines()) for (x, c) in enumerate(row.replace(PAD, '')) if c == LIVE} return move(cells, offset) def move(cells, offset): "Move/Translate/slide a set of cells by a (dx, dy) displacement/offset." (dx, dy) = offset return {(x+dx, y+dy) for (x, y) in cells} blinker = shape("@@@") block = shape("@@\n@@") beacon = block | move(block, (2, 2)) toad = shape(".@@@\n@@@.") glider = shape(".@.\n..@\n@@@") rpentomino = shape(".@@\n@@.\n.@.", (36, 20)) line = shape(".@@@@@@@@.@@@@@...@@@......@@@@@@@.@@@@@", (10, 10)) growth = shape("@@@.@\n@\n...@@\n.@@.@\n@.@.@", (10, 10))Here is how `shape` and `move` work:shape("""@ @ . . @ @""") block move(block, (100, 200))Let's run some examples. If you are viewing a static notebook, you will only see the last generation; rerun each cell to see all the generations.display_run(blinker) display_run(beacon) display_run(toad) display_run(glider, 15) display_run(rpentomino, 130, range(48), range(40)) zoo = (move(blinker, (5, 25)) | move(glider, (8, 13)) | move(blinker, (20, 25)) | move(beacon, (24, 25)) | move(toad, (30, 25)) | move(block, (13, 25)) | move(block, (17, 33))) display_run(zoo, 160, range(48), range(40)) display_run(growth, 100, range(40), range(40))Outside of IPythonIf you want to run this code in your terminal, outside of an Ipython/Jupyter notebook, you can remove the line: from IPython.display import clear_output, display_html and add these lines: def clear_output(): print("\033[;H\033[2J") ANSI terminal home and clear def display_html(text, raw=False): print(text) def pre(text): return text Coding KataI once attended a [code kata](https://en.wikipedia.org/wiki/Kata_(programming%29) in which one of the exercises was to write the Game of Life without using any conditional (e.g. `if`) statements. I did it by using roughly the program shown here, but changing the lone `if` to a `filter` in `next_generation`:def next_generation(world): "The set of live cells in the next generation." possible_cells = counts = neighbor_counts(world) def good(cell): return counts[cell] == 3 or (counts[cell] == 2 and cell in world) return set(filter(good, possible_cells))Simulación de los Resultados de las Eliminatorias usando PythonA falta de 2 fechas para que se termine de jugar la eliminatoria sudamericana para el mundial de Catar 2022, en los medios de comunicación se han barajado múltiples escenarios de las posibilidades que tiene Ecuador de llegar a la cita mundialista. Con eso en mente, desarrollé un tutorial donde se muestra como podemos realizar una simulación de los resultados de los partidos faltantes y analizar las posibilidades que tiene la tricolor de obtener un cupo directo o de llegar al repechaje.En este tutorial te mostraré como descargar la tabla de posiciones y el calendario de juegos restantes desde una página web, simular los resultados de los partidos y presentar los resultados finales. Descripción del ProcesoPara determinar que equipos clasficarán al mundial de Catar debemos considerar las posiciones en la tabla luego de jugadas las dos fechas finales. Por tanto, simularemos cada uno de los partidos faltantes y computaremos el puntaje final y el gol diferencia para definir las ubicaciones. Luego, repetiremos múltiples ocasiones las simulaciones para determinar la probabilidad final de cada selección.Este tutorial está dirigido a personas con conocimientos básicos o intermedios de Python. Puedes seguir el tutorial paso a paso descargando el Notebook directamente de Github. En caso de que tengas dudas, preguntas o sugerencias, puedes escribirme a Paquetes requeridosPara llevar a cabo nuestra simulación, necesitaremos herramientas para hacer web scrapping, simulaciones y análisis y visualización de datos.Los paquetes que utilizaremos son requests para solictar información a una página web, Beautiful Soup para procesar el contenido de la página web, pandas para la manipulación de datos, numpy para generar muestras aleatorias y matplotlib para las visualizaciones.import requests from bs4 import BeautifulSoup import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inlineDescargar datos de internetEl primer paso es determinar el estado inicial de la cuestión en análisis, para después entender como las simulaciones modificarán la situación en el futuro. En nuestro ejemplo, queremos saber como se encuentra actualmente la tabla de posiciones, y simular como terminará despues de "jugadas" las fechas 17 y 18.Iniciaremos descargando la tabla actual de posiciones. Recomiendo buscar una página web que muestre los resultado de la manera más sencilla posible para facilitar el trabajo posterior. Luego de pocos minutos de navegación, encontré que la página Fútbol Peruano es precisamente lo que estamos buscando.Solicitaremos la información a la página web usando requests, procesaremos el texto usando beautiful soup, para encontrar la tabla de posiciones que está identificada con la etiqueta class : table table-sm mb-0. Esto último se puede determinar usando Inspección de Elemento o su similar en tu navegador web. Finalmente, leeremos los resultados en forma de un dataframe de pandas.url = 'https://www.futbolperuano.com/eliminatorias/tabla-de-posiciones' page = requests.get(url) soup = BeautifulSoup(page.text, 'lxml') table = soup.find('table', attrs = {'class' : 'table table-sm mb-0'}) # Convertiremos la tabla en texto y dado que pd.read_html arroja como resultado una lista, tomaremos únicamente el primer elemento. posiciones = pd.read_html(str(table))[0] posicionesComo se aprecia, tenemos la misma información que consta en la página web. Sin embargo, los logos de las banderas de cada país fueron interpretados como texto, lo que generó que el nombre de cada país se duplique. A continuación, resolveremos ese inconveniente y eliminaremos las columnas que no usaremos en la simulaciónposiciones['Equipo'] = posiciones.Equipo.apply(lambda x : (x[:(len(x)//2)])) posiciones.drop(['Pos', 'PJ', 'G', 'E', 'P', 'GF', 'GC'], axis=1, inplace=True)Lo que sigue es crear una lista de los partidos faltantes, que la obtendremos de la misma página web. Al desarrollar el tutorial, percibí que la información de los partidos estaba bastante más anidada que la tabla de posiciones, por lo que hizo falta crear una función para extraer el listado de países y organizarlo adecuadamente en función de la localía.juegos = 'https://www.futbolperuano.com/eliminatorias/resultados' partidos = BeautifulSoup(requests.get(juegos).text, 'lxml') def partidos_restantes(partidos): # identificador del calendario en la página web atr = 'score_contenido_TorneoTabs_collapse1_{}' todos = [] # repetir el proceso para la fecha 17 y 18. for a in ['17', '18']: # buscar la información de la fecha fecha = partidos.find('div', attrs = {'id':atr.format(a)}) # buscar el listado de paises fecha = fecha.findAll('span', attrs={'class':'team-name d-md-none'}, text=True) # filtrar los equipos locales usando indices pares locales = [x.text for x in fecha if fecha.index(x) in list(range(0, len(fecha), 2))] # los equipos visitantes tienen indices impares visitantes = [x.text for x in fecha if fecha.index(x) in list(range(1, len(fecha), 2))] # emparejar locales y visitantes. todos += list(zip(locales, visitantes)) return todosAl ejecutar la función podemos ver la lista de partidos de las fechas 17 y 18todos = partidos_restantes(partidos) todosSimular los resultados de los partidosPara determinar el ganador de cada encuentro, realizaremos un muestreo aleatorio de los posibles resultados. Para efectos demostrativos asignaremos la misma probabilidad a la ganancia del local o visitante y al empate (lo que correspondería a una distribución uniforme de probabilidades, tal como lanzar un dado y obtener cualquier valor entre 1 y 6). Esto puede ser facilmente modificado utilizando el argumento p de np.random.choice para asignar diferentes probabilidades a los equipos. Por ejemplo, usar un valor alto como probabilidad de ganancia para Argentina y Brasil, en vista de su desempeño en las eliminatorias. De igual manera, la cantidad de goles será aleatoria, siendo como mínimo 1 y como máximo 6 (el mayor número de goles anotados por un equipo en esta eliminatoria).Luego de conocer el resultado de la simulación de cada partido, modificaremos la tabla de posiciones sumando 3 puntos al ganador y tantos goles como corresponda. En caso de empate sumamos 1 punto a cada equipo.goles_max = np.arange(1, 6) def clasificacion(cotejo, goles_max, posiciones): # 0 significa gana loca, 1 visitante y 2 empate. Cada resultado tiene sus puntos correspondientes. resultados = {0 : 3, 1 : 3, 2 : 1} # Muestreo aleatorio. Escogemos un número entre 0, 1 y 2. El resultado es una lista, escogemos el primer elemento. simulacion = np.random.choice(np.array([0, 1, 2]), 1)[0] puntos = resultados[simulacion] # En caso de ganancia try: # cotejo es un emparejamiento (local, visitante), en ese orden. # Escojemos al ganador en función de la simulación. pais = cotejo[simulacion] # Sumamos puntos y goles en la tabla de clasificaciones. posiciones.loc[posiciones['Equipo'] == pais , 'Pts'] += puntos posiciones.loc[posiciones['Equipo'] == pais , 'DG'] += np.random.choice(goles_max, 1)[0] # En caso de empate, el indice será mayor que la cantidad de elementos en el par (local, visitante) except IndexError: # Sumamos 1 punto a cada equipo posiciones.loc[posiciones['Equipo'] == cotejo[0] , 'Pts'] += puntos posiciones.loc[posiciones['Equipo'] == cotejo[1] , 'Pts'] += puntosToda vez que hemos simuladas las dos fechas restantes, corresponde evaluar la posición final de cada selección para determinar si obtuvieron un cupo a Catar. Para esto ordenaremos los equipos en forma descendiente (mayor a menor puntaje y gol diferencia) y obtendremos un listado de equipos y posiciones.def reporte(posiciones): posiciones.sort_values(by=['Pts', 'DG'], ascending=False, inplace=True) posiciones.set_index(pd.Index(np.arange(1, 11)), inplace=True) final = dict(zip(posiciones.Equipo, posiciones.index)) return finalFinalmente, repetiremos este proceso n veces (2500 en nuestro ejemplo) para observar la variabilidad de los resultados en una muestra mayor. En el computo final, contaremos cuantas veces un equipo terminó en una posición determinada (tabla de frecuencias). Es importante señalar que para el inicio de cada simulación debemos tomar como base la tabla actual de posiciones, es por eso que en cada iteración de la simulación, haremos una copia de la tabla original.l = [] for i in range(2500): df = posiciones.copy() for p in todos: clasificacion(p, goles_max, df) l.append(reporte(df)) ok = pd.DataFrame.from_records(l) ok.apply(pd.value_counts)Podemos apreciar que Ecuador terminó en la 3ra posición 2224 veces en las 2500 simulaciones, y 276 en 4to. En ningún caso terminó en 5to puesto o más atrás. Es decir, lo más probable es que debamos hacer las maletas para CatarPara una mejor comprensión, expresaremos los resultados como porcentaje y remplazaremos los valores nulos por zeros.g = ok.apply(pd.value_counts, normalize=True) g.set_index(pd.Index(list(range(1,11))), inplace=True) g.fillna(0, inplace=True) g = g.apply(lambda x: x*100) gResulta más didáctico apreciar los resultados en forma gráfica. Seleccionamos únicamente los países que junto a Ecuador disputan el 3ro, 4to y 5to puesto.plt.style.use('fivethirtyeight') g.loc[[3, 4, 5], ['Ecuador', 'Uruguay', 'Perú', 'Chile', 'Colombia']].plot.bar(rot=0, figsize=(4, 5), xlabel='Posición final', ylabel='Probabilidad %') with plt.xkcd(): g.loc[[3, 4, 5], ['Ecuador', 'Uruguay', 'Perú', 'Chile', 'Colombia']].plot.bar(rot=0, figsize=(4, 5), xlabel='Posición final', ylabel='Probabilidad %')Tissue Detection**Overview:** This includes tools to detect tissue from an item (slide) using its thumbnail. The basic functionality includes a series of gaussian smoothing and otsu thresholding steps to detect background versus foreground pixels. Optionally, an initial step is performed whereby color deconvolution is used to deparate hematoxylin and eosin stains (assuming H&E stained slides) to make sure only cellular areas are segmented. This proves to be useful in getting rid of sharpie markers. A size threshold is used to keep only largest contiguous tissue regions.![tissuedetectionMethod](https://user-images.githubusercontent.com/22067552/80084299-af06d980-8524-11ea-9521-a3ec48253dfc.png)**Where to look?**```|_ histomicstk/ |_saliency/ |_tissue_detection.py |_tests/ |_test_saliency.py```import girder_client import numpy as np from matplotlib import pylab as plt from matplotlib.colors import ListedColormap from histomicstk.saliency.tissue_detection import ( get_slide_thumbnail, get_tissue_mask) %matplotlib inlineConstants and PrepworkAPIURL = 'http://candygram.neurology.emory.edu:8080/api/v1/' # SAMPLE_SLIDE_ID = '5d586d57bd4404c6b1f28640' SAMPLE_SLIDE_ID = "5d817f5abd4404c6b1f744bb" gc = girder_client.GirderClient(apiUrl=APIURL) # gc.authenticate(interactive=True) _ = gc.authenticate(apiKey='')First, let's fetch the slide thumbnailthumbnail_rgb = get_slide_thumbnail(gc, SAMPLE_SLIDE_ID) plt.imshow(thumbnail_rgb)(Optional) Color normalization of thumbnailSee documentation for ``color_normalization`` module. Now we fetch the tissue mask This is the method you want to useprint(get_tissue_mask.__doc__)Get binary tissue mask from slide thumbnail. Parameters ----------- thumbnail_rgb : np array (m, n, 3) nd array of thumbnail RGB image deconvolve_first : bool use hematoxylin channel to find cellular areas? This will make things ever-so-slightly slower but is better in getting rid of sharpie marker (if it's green, for example). Sometimes things work better without it, though. stain_matrix_method : str see deconv_color method in seed_utils n_thresholding_steps : int number of gaussian smoothign steps sigma : float sigma of gaussian filter min_size : int minimum size (in pixels) of contiguous tissue regions to keep Returns -------- np bool array largest contiguous tissue region. np int32 array each unique value represents a unique tissue regionGet the tissue maskslabeled, mask = get_tissue_mask( thumbnail_rgb, deconvolve_first=True, n_thresholding_steps=2, sigma=0., min_size=30)/home/mtageld/Desktop/HistomicsTK/histomicstk/preprocessing/color_conversion/rgb_to_sda.py:48: RuntimeWarning: divide by zero encountered in log im_sda = -np.log(im_rgb/(1.*I_0)) * 255/np.log(I_0)Visualize the resultvals = np.random.rand(256,3) vals[0, ...] = [0.9, 0.9, 0.9] cMap = ListedColormap(1 - vals) f, ax = plt.subplots(1, 3, figsize=(20, 20)) ax[0].imshow(thumbnail_rgb) ax[1].imshow(labeled, cmap=cMap) # all tissue regions ax[2].imshow(mask, cmap=cMap) # largest tissue region plt.show()Note effect of hyperparametersfor deconvolve_first in [False, True]: for n_thresholding_steps in [1, 2]: labeled, mask = get_tissue_mask( thumbnail_rgb, deconvolve_first=deconvolve_first, n_thresholding_steps=n_thresholding_steps, sigma=0., min_size=30) f, ax = plt.subplots(1, 3, figsize=(20, 5)) ax[0].imshow(thumbnail_rgb) ax[1].imshow(labeled, cmap=cMap) ax[2].imshow(mask, cmap=cMap) plt.suptitle("deconvolve = %s, n_thresholding_steps = %d" % (deconvolve_first, n_thresholding_steps), fontsize=20) plt.show()Using the os library path to the Train folder is being given and a list of files in the real and fake is obtained. Using that list of file names the files are being readpath = "/content/drive/My Drive/NLP/Train/Real" os.chdir(path) real_files = os.listdir(path) real_corpus = list() for filename in real_files: with open(os.path.join(path, filename), 'r') as f: text = f.read() real_corpus.append(text) path = "/content/drive/My Drive/NLP/Train/Fake" os.chdir(path) fake_files = os.listdir(path) fake_corpus = list() for filename in fake_files: with open(os.path.join(path, filename), 'r') as f: text = f.read() fake_corpus.append(text)In the function below the real and fake corpus is being tokenised into a list of words using spacy and vocabulary is being extracted Making Vocabularyreal_copy = real_corpus.copy() #***********making copies so the actual corpus does no get modified otherwise the corpus gets modified************ fake_copy = fake_corpus.copy() def extract_vocab(): #extracting vocabulary from the training corpus vocab = list() #list to contain all the words of the vocabulary i = 0 #for real traing corpus while i < len(real_corpus): #traversing through the real news text files doc = nlp(real_copy[i]) #using spacy tokenizer for word in doc: vocab.append(str(word)) #appending word to the vocab list i += 1 #for fake training corpus i = 0 while i < len(fake_corpus): #traversing through the fake news text files doc = nlp(fake_copy[i]) #using spacy tokenizer for word in doc: vocab.append(str(word)) #appending word to the vocab list i += 1 return vocabMaking a copy of corpus just to make sure the original corpuses are not modified in any wayreal_copy = list() fake_copy = list() real_copy = real_corpus.copy() fake_copy = fake_corpus.copy()To count the freq of words in both the real and fake files corpusreal_dict = dict() #dictionary to store unigram of words from the real corpus fake_dict = dict() #dictionary to store unigram of words from the fake corpus def unigram(v): i = 0 while i < len(real_corpus): #traversing through the real news text files doc = nlp(real_copy[i]) for word in doc: if str(word) in v: #if word exists in the vocabulary if str(word) not in real_dict.keys(): #if the word is not already in the dictionary real_dict[str(word)] = 1 #then initialise it by 1 elif str(word) in real_dict: #if it is in the dict real_dict[str(word)] += 1 #add 1 to the freq i += 1 i = 0 while i < len(fake_corpus): #traversing through the fake news text files doc = nlp(fake_copy[i]) for word in doc: if str(word) in v: #if word exists in the vocabulary if str(word) in fake_dict.keys(): #if the word is already in the dictionary fake_dict[str(word)] += 1 #then add 1 to the count else: #if it is not in the dict fake_dict[str(word)] = 1 #initialise the count by 1 i += 1Function to count all the texts in the training corpus i.e all the filesdef count_texts(r, f): #counting all the texts in the training corpus n = len(r) + len(f) #both real and fake texts return nFunction to count all the words in the class by using spacy to tokenise them and making a list of wordsdef count_all_words(class_text): words_list = list() #to store all the word in the class text = list() text = class_text.copy() i = 0 while i < len(class_text): #traversing through the class texts doc = nlp(text[i]) for word in doc: words_list.append(str(word)) #making a list of words by appending each word in the word list i += 1 return len(words_list)function to count how many times a word has appeared in a particular classdef count_token_of_words(word, Doc): word_count = 0 word_count = Doc[word] #Doc is a dict that contains unigrams of all the words of the class return word_count #returning the total number of times has has appeared in the said classMaking dict() for conditional probability, prior and scorecond_prob = dict() #dictionary to store all the conditional probabilities cond_prob['Real'] = dict() cond_prob['Fake'] = dict() prior = dict() #dictionary to store prior of real and fake corpus score = dict() #dictionary to store score of real and fake corpusTraining through the multinomial Naive Bayes theoremdef TrainMultinomialNB(v, r, f, real_cor, fake_cor, real_dic, fake_dic): Vocabulary = v #extracting vocab of the whole corpus N = count_texts(r, f) #counting all the texts in the corpus Nc = len(r) #count texts in each class Nw = count_all_words(real_cor) #count words in all the texts of the said class prior['Real'] = Nc/N #calculate prior Doc_c = real_dic #dict() containing all teh counts of the words that appeared for w in Vocabulary: #if word in vocab if w in Doc_c: #and word in doc Ni = count_token_of_words(w, Doc_c) #get count of that word cond_prob['Real'][w] = (Ni + 1) / (Nw + len(Vocabulary)) #calculate conditional probability Nc = 0 Nw = 0 Doc_c = dict() Nc = len(f) #count texts in each class Nw = count_all_words(fake_cor) #count words in all the texts of the said class prior['Fake'] = Nc/N #calculate prior Doc_c = fake_dic #dict() containing all teh counts of the words that appeared for w1 in Vocabulary: #if word in vocab if w1 in Doc_c: #and word in doc Ni = count_token_of_words(w1, Doc_c) #get count of that word cond_prob['Fake'][w1] = (Ni + 1) / (Nw + len(Vocabulary)) #calculate conditional probability return Vocabulary, prior, cond_prob #return vocab, prior and conditional prob v = extract_vocab() #extracting vocab unigram(v) #making unigrams of both classes Vocab, Prior, Cond_prob = TrainMultinomialNB(v, real_files, fake_files, real_corpus, fake_corpus, real_dict, fake_dict) #training through Naive BayesFunction to extract all the words from text by using spacydef extractWordsFromText(text): w_list = list() #list to append the words in doc = nlp(text) for word in doc: #for each word in the doc w_list.append(str(word)) #append word into the word list return w_list #return list of wordsApplying the Multinomial Theoremdef ApplyMultinomialNB(Cond_prob, Prior, test): Word_list = list() Word_list = extractWordsFromText(test) #getting all the words from the text score['Real'] = math.log(Prior['Real']) #get prior of the class for w in Word_list: #for each word in the list if str(w) in Cond_prob['Real']: score['Real'] += math.log(Cond_prob['Real'][w]) #get its conditional prob and add it to the prior and so on score['Fake'] = math.log(Prior['Fake']) #get prior of the class for w in Word_list: #for each word in the list if str(w) in Cond_prob['Fake']: score['Fake'] += math.log(Cond_prob['Fake'][w]) #get its conditional prob and add it to the prior and so on return score #return score def Detection(): path = "/content/drive/My Drive/NLP/Test/Real" os.chdir(path) r_files = os.listdir(path) #getting a list of all the txt file names in the test real folder path2 = "/content/drive/My Drive/NLP/Test/Fake" os.chdir(path2) f_files = os.listdir(path2) #getting a list of all the txt file names in the test fake folder answer = list() #list to get the actual answer after applying the algo predicted = list() #list to contain the predicted answer for filename in r_files: #traversing through real files with open(os.path.join(path, filename), 'r') as f: #reading all the texts from the real class test = f.read() final = dict() final = ApplyMultinomialNB(Cond_prob, Prior, test) #applying the multinomial algo argmax = max(final, key=final.get) #get max value of score predicted.append('Real') #predicted answers answer.append(argmax) #answer after applying algo for filename in f_files: #traversing through fake files with open(os.path.join(path, filename), 'r') as f: #reading all the texts from the fake class test = f.read() final = dict() final = ApplyMultinomialNB(Cond_prob, Prior, test) #applying the nultinomial algo argmax = max(final, key=final.get) #get max value of score predicted.append('Fake') #predicted answers answer.append(argmax) #answer after applying algo accuracy = accuracy_score(answer, predicted) #getting accuracy precision = precision_score(answer, predicted, average = 'macro') #getting precision recall = recall_score(answer, predicted, average = 'macro') #getting recall f1 = f1_score(answer, predicted, average = 'macro') #getting f1 mearsure print("Accuracy = ", accuracy) print("Precision = ", precision) print("Recall = ", recall) print("F1 = ", f1)Without Removing stopwords and duplicates from each textDetection() #Detecting real vs fake newsAccuracy = 0.5 Precision = 0.5056547619047619 Recall = 0.505568255084696 F1 = 0.4994093023934192After removing Stopwords Reading the stop words filewith open('/content/drive/My Drive/NLP/stopwords-ur.txt', mode = 'r') as f: stopwords = f.read() f.close()Making a copy of corpus just to make sure the original corpuses are not modified in any way.Doing this again for without the stopwords and text duplicatesreal_copy = list() fake_copy = list() real_copy = real_corpus.copy() fake_copy = fake_corpus.copy()Extracting vocabulary and removing stop wordsdef extract_vocab_no_stopwords(): #extracting vocabulary from the training corpus vocab = list() #list to contain all the words of the vocabulary i = 0 while i < len(real_corpus): #traversing through the real news text files doc = nlp(real_copy[i]) for word in doc: if str(word) not in stopwords: #if word is not in the stopwords list only then add to the vocab vocab.append(str(word)) i += 1 i = 0 while i < len(fake_corpus): #traversing through the fake news text files doc = nlp(fake_copy[i]) for word in doc: if str(word) not in stopwords: #if word is not in the stopwords list only then add to the vocab vocab.append(str(word)) i += 1 return vocabMaking unigram of the corpuses without stopwordsreal_dict = dict() #dictionary to store unigram of words from the real corpus fake_dict = dict() #dictionary to store unigram of words from the fake corpus real_copy = list() fake_copy = list() real_copy = real_corpus.copy() fake_copy = fake_corpus.copy() def unigram_without_sw(v): i = 0 while i < len(real_corpus): #traversing through the real news text files doc = nlp(real_copy[i]) for word in doc: if str(word) not in stopwords: if str(word) in v: #if word exists in the vocabulary if str(word) not in real_dict.keys(): #if the word is not already in the dictionary real_dict[str(word)] = 1 #then initialise it by 1 elif str(word) in real_dict: #if it is in the dict real_dict[str(word)] += 1 #add 1 to the freq i += 1 i = 0 while i < len(fake_corpus): #traversing through the fake news text files doc = nlp(fake_copy[i]) for word in doc: if str(word) not in stopwords: if str(word) in v: #if word exists in the vocabulary if str(word) in fake_dict.keys(): #if the word is already in the dictionary fake_dict[str(word)] += 1 #then add 1 to the count else: #if it is not in the dict fake_dict[str(word)] = 1 #initialise the count by 1 i += 1With only stopwords words removedcond_prob = dict() #dictionary to store all the conditional probabilities cond_prob['Real'] = dict() cond_prob['Fake'] = dict() prior = dict() #dictionary to store prior of real and fake corpus score = dict() #dictionary to store score of real and fake corpus v = extract_vocab_no_stopwords() #extracting vocab unigram_without_sw(v) #making unigrams of both classes Vocab, Prior, Cond_prob = TrainMultinomialNB(v, real_files, fake_files, real_corpus, fake_corpus, real_dict, fake_dict) #training through Naive Bayes Detection() #Detection of real vs fake news using Boolean Naive BayesAccuracy = 0.46946564885496184 Precision = 0.5038690476190476 Recall = 0.5048809791995195 F1 = 0.45477954936746756Removing duplicates from each textreal_copy = list() fake_copy = list() real_copy = real_corpus.copy() fake_copy = fake_corpus.copy() noDup_real = dict() #to store count of each word appearing in a real texts noDup_fake = dict() #to store count of each word appearing in a fake texts real_corpus_noDup = list() #list to store real text after removal of duplicates fake_corpus_noDup = list() #list to store fake text after removal of duplicates def remove_duplicates(): i = 0 while i < len(real_corpus): #traversing through real corpus w_list = real_copy[i].split() #splitting the text into words done = '' for word in w_list: if word not in done: #if word not already visited than add to list if word in noDup_real.keys(): #if word already exists in the dict because it might have been present in the last text than add 1 to count noDup_real[word] += 1 else: noDup_real[word] = 1 #else just initialise word count by 1 done = done + word + ' ' #concatenate words together for form the text without any duplicates real_corpus_noDup.append(done) #append to real corpus texts list i += 1 i = 0 while i < len(fake_corpus): #traversing through fake corpus w_list1 = fake_copy[i].split() #splitting the text into words done = '' for word in w_list1: if word not in done: #if word not already visited than add to list if word in noDup_fake.keys(): #if word already exists in the dict because it might have been present in the last text than add 1 to count noDup_fake[word] += 1 else: noDup_fake[word] = 1 #else just initialise word count by 1 done = done + word + ' ' #concatenate words together for form the text without any duplicates fake_corpus_noDup.append(done) #append to fake corpus texts list i += 1BOOLEAN NAIVE BAYES(With stopwords but duplicates removed from the coupus)cond_prob = dict() #dictionary to store all the conditional probabilities cond_prob['Real'] = dict() cond_prob['Fake'] = dict() real_copy = real_corpus.copy() fake_copy = fake_corpus.copy() prior = dict() #dictionary to store prior of real and fake corpus score = dict() #dictionary to store score of real and fake corpus v = extract_vocab() remove_duplicates() #remove duplicates from each text Vocab, Prior, Cond_prob = TrainMultinomialNB(v, real_files, fake_files, real_corpus_noDup, fake_corpus_noDup, noDup_real, noDup_fake) #Training through multinomial Naive Bayes Detection() #Detection of real vs fake news using Boolean Naive BayesAccuracy = 0.4580152671755725 Precision = 0.49386904761904765 Recall = 0.49203772418058134 F1 = 0.4407768157768158Removing Stopwords and duplicates from each text (Basically removing tsopwords from Boolean Naive Bayes)def remove_duplicates_and_stopwords(): i = 0 while i < len(real_corpus): #traversing through real corpus w_list = real_copy[i].split() #splitting the text into words done = '' for word in w_list: if word not in stopwords: #if word in not in the stopwords list if word not in done: #if word not already visited than add to list if word in noDup_real.keys(): #if word already exists in the dict because it might have been present in the last text than add 1 to count noDup_real[word] += 1 else: noDup_real[word] = 1 #else just initialise word count by 1 done = done + word + ' ' #concatenate words together for form the text without any duplicates real_corpus_noDup.append(done) #append to real corpus texts list i += 1 i = 0 while i < len(fake_corpus): #traversing through fake corpus w_list1 = fake_copy[i].split() #splitting the text into words done = '' for word in w_list1: if word not in stopwords: #if word in not in the stopwords list if word not in done: #if word not already visited than add to list if word in noDup_fake.keys(): #if word already exists in the dict because it might have been present in the last text than add 1 to count noDup_fake[word] += 1 else: noDup_fake[word] = 1 #else just initialise word count by 1 done = done + word + ' ' #concatenate words together for form the text without any duplicates fake_corpus_noDup.append(done) #append to fake corpus texts list i += 1 cond_prob = dict() #dictionary to store all the conditional probabilities cond_prob['Real'] = dict() cond_prob['Fake'] = dict() prior = dict() #dictionary to store prior of real and fake corpus score = dict() #dictionary to store score of real and fake corpus v = extract_vocab_no_stopwords() #extracting vocab without any stopwords in it remove_duplicates_and_stopwords() #remove duplicates from each text Vocab, Prior, Cond_prob = TrainMultinomialNB(v, real_files, fake_files, real_corpus_noDup, fake_corpus_noDup, noDup_real, noDup_fake) #Training through multinomial Naive Bayes Detection() #Detection of real vs fake news after stopwords and duplicatesAccuracy = 0.4580152671755725 Precision = 0.49839285714285714 Recall = 0.4977180527383367 F1 = 0.4339703018500487Efficient Frontier **Scenario: Portfolio optimization**> Efficient frontier is the Nobel Prize Winner Theory To Gain Higher Returns In Your Investment.Let’s consider you have \$10'000 of cash available and you are interested in investing it. Your aim is to invest the money for a year. Like any rational investor, you expect the final amount in a years time to be higher than the $10'000 amount you want to invest.There are many investment options available, such as buying a T-bill or company shares, etc. Some of the investment options are riskier than the others because they attract us to gain higher returns. Hence, the point to note is that there exists a risk-return trade-off.If we buy a number of assets such as shares of different companies then the total risk of the portfolio can be reduced due to diversification. This means that an investor can reduce the total risk and increase the return by choosing different assets with different proportions in a portfolio. This is due to the fact that the assets can be correlated with each other.We understood that the allocations (weights) of the assets can change the risk of the portfolio. Hence, we can generate 1000s of portfolios randomly where each portfolio will contain a different set of weights for the assets.We know that as we increase the number of portfolios, we will get closer to the real optimum portfolio. This is the brute force approach and it can turn out to be a time-consuming task. Furthermore, there is no guarantee that we will find the right allocations. Setup!wget -q --show-progress https://github.com/rian-dolphin/Efficient-Frontier-Python/raw/main/daily_returns.csv import pandas as pd import numpy as np from tqdm import tqdm import plotly import plotly.graph_objects as go from plotly.subplots import make_subplots import plotly.express as px import plotly.figure_factory as ff daily_returns = pd.read_csv('daily_returns.csv', index_col=0) daily_returns.head() #-- Get annualised mean returns mus = (1+daily_returns.mean())**252 - 1 #-- Get covariances #- Multiply by 252 to annualise it (square root time for volatility but no square root for variance) #- Note: 252 trading days in a year #- https://quant.stackexchange.com/questions/4753/annualized-covariance cov = daily_returns.cov()*252Create Random Portfolios#- How many assests to include in each portfolio n_assets = 5 #-- How many portfolios to generate n_portfolios = 1000 #-- Initialize empty list to store mean-variance pairs for plotting mean_variance_pairs = [] np.random.seed(75) #-- Loop through and generate lots of random portfolios for i in range(n_portfolios): #- Choose assets randomly without replacement assets = np.random.choice(list(daily_returns.columns), n_assets, replace=False) #- Choose weights randomly weights = np.random.rand(n_assets) #- Ensure weights sum to 1 weights = weights/sum(weights) #-- Loop over asset pairs and compute portfolio return and variance #- https://quant.stackexchange.com/questions/43442/portfolio-variance-explanation-for-equation-investments-by-zvi-bodie portfolio_E_Variance = 0 portfolio_E_Return = 0 for i in range(len(assets)): portfolio_E_Return += weights[i] * mus.loc[assets[i]] for j in range(len(assets)): #-- Add variance/covariance for each asset pair #- Note that when i==j this adds the variance portfolio_E_Variance += weights[i] * weights[j] * cov.loc[assets[i], assets[j]] #-- Add the mean/variance pairs to a list for plotting mean_variance_pairs.append([portfolio_E_Return, portfolio_E_Variance]) #-- Plot the risk vs. return of randomly generated portfolios #-- Convert the list from before into an array for easy plotting mean_variance_pairs = np.array(mean_variance_pairs) risk_free_rate=0 #-- Include risk free rate here fig = go.Figure() fig.add_trace(go.Scatter(x=mean_variance_pairs[:,1]**0.5, y=mean_variance_pairs[:,0], marker=dict(color=(mean_variance_pairs[:,0]-risk_free_rate)/(mean_variance_pairs[:,1]**0.5), showscale=True, size=7, line=dict(width=1), colorscale="RdBu", colorbar=dict(title="Sharpe
Ratio") ), mode='markers')) fig.update_layout(template='plotly_white', xaxis=dict(title='Annualised Risk (Volatility)'), yaxis=dict(title='Annualised Return'), title='Sample of Random Portfolios', width=850, height=500) fig.update_xaxes(range=[0.18, 0.32]) fig.update_yaxes(range=[0.02,0.27]) fig.update_layout(coloraxis_colorbar=dict(title="Sharpe Ratio"))Sample only from efficient frontier#-- Create random portfolio weights and indexes #- How many assests in the portfolio n_assets = 5 mean_variance_pairs = [] weights_list=[] tickers_list=[] for i in tqdm(range(10000)): next_i = False while True: #- Choose assets randomly without replacement assets = np.random.choice(list(daily_returns.columns), n_assets, replace=False) #- Choose weights randomly ensuring they sum to one weights = np.random.rand(n_assets) weights = weights/sum(weights) #-- Loop over asset pairs and compute portfolio return and variance portfolio_E_Variance = 0 portfolio_E_Return = 0 for i in range(len(assets)): portfolio_E_Return += weights[i] * mus.loc[assets[i]] for j in range(len(assets)): portfolio_E_Variance += weights[i] * weights[j] * cov.loc[assets[i], assets[j]] #-- Skip over dominated portfolios for R,V in mean_variance_pairs: if (R > portfolio_E_Return) & (V < portfolio_E_Variance): next_i = True break if next_i: break #-- Add the mean/variance pairs to a list for plotting mean_variance_pairs.append([portfolio_E_Return, portfolio_E_Variance]) weights_list.append(weights) tickers_list.append(assets) break len(mean_variance_pairs)If we plot the risk and return for each of the portfolios on a chart then we will see an arch line at the top of the portfolios.#-- Plot the risk vs. return of randomly generated portfolios #-- Convert the list from before into an array for easy plotting mean_variance_pairs = np.array(mean_variance_pairs) risk_free_rate=0 #-- Include risk free rate here fig = go.Figure() fig.add_trace(go.Scatter(x=mean_variance_pairs[:,1]**0.5, y=mean_variance_pairs[:,0], marker=dict(color=(mean_variance_pairs[:,0]-risk_free_rate)/(mean_variance_pairs[:,1]**0.5), showscale=True, size=7, line=dict(width=1), colorscale="RdBu", colorbar=dict(title="Sharpe
Ratio") ), mode='markers', text=[str(np.array(tickers_list[i])) + "
" + str(np.array(weights_list[i]).round(2)) for i in range(len(tickers_list))])) fig.update_layout(template='plotly_white', xaxis=dict(title='Annualised Risk (Volatility)'), yaxis=dict(title='Annualised Return'), title='Sample of Random Portfolios', width=850, height=500) fig.update_xaxes(range=[0.18, 0.35]) fig.update_yaxes(range=[0.05,0.29]) fig.update_layout(coloraxis_colorbar=dict(title="Sharpe Ratio"))The Half-Edge Data Structure CS 480 Computational Geometry Dr. We’ve seen in this class that storing polygons as doubly-linked lists of vertices is convenient for supporting many of the operations needed for dealing with polygons in algorithms. For example, when combining two convex polygons across tangents into a larger convex polygon (in the divide-and-conquer convex hull algorithm), the required operations were very fast–once the tangents where identified, the actual combination operation runs in $O(1)$ time.Recall, however, our early triangulation of polygons lab. In that lab we triangulated a polygon using a naive algorithm for finding a maximal non-crossing set of diagonals. We returned a list of `Polygon` objects, each of which represented one of the triangles. This is not terribly convenient. For instance, suppose you are writing the AI for a computer game and the game map is represented as a triangluation. You want to do path planning for your NPCs, so given a current location of an NPC and its desired location, you'd like to get a path through the map that will move the NPC from the starting position to the ending position. As a first pass, you'd just like the sequence of map triangles the NPC will traverse. If your map is stored as a "triangle soup" (a flat list of triangles) then just finding the three neighboring triangles to any given triangle requires a linear-time search. This will not lead to efficient algorithms. What is needed is a spacial data structure that can elegantly store subdivisions of the plane into polygonal regions and answer lots of queries efficiently. Queries like, what are the vertices / edges of some given face? What are the edges incident to a given vertex? What are the faces incident to a given edge? Whast are the faces incident along the edges of some other face? Etc. Remarkably a powerful data structure exists that answers these queries and more _as efficiently as possible_. The data structure is actually quite simple, though it is a little bit fiddly to get right--because it is a generalization of doubly-linked lists. The data structure is often called the _half-edge datastructure_ (and is also known as a _doubly-connected edge list_ or _DCEL_).In this lab we will develop a half-edge data structure for storing subdivisions of some polygon into polygonal regions without holes. Note though that with slight modifications the data structure developed here can be extended to store more generally _any_ planar striaght-line graph--_PSLG_--a non-crossing embedding of a planar graph in the plane with edges represented as striaght line segments. It can also be used to store piecewise-linear surfaces in $\mathbb{R}^3$ like polyhedra so long as they are manifolds--meaning that each edge is incident to at most two faces. The basic ideaConsider the following subdivision of a large outer polygon into smaller polygons: ![A planar subdivision.](img/planarsubdiv.png)The subdivision above has 8 vertices, 12 edges, and 6 faces (note that we are counting the outer face here, and this is important, because it will allow us to keep special cases out of our data structure) Suppose we want a data structure for representing the vertices, edges, and faces of our subdivision. We could try to use a doubly-linked list for representing each polygon, like we did before, but there’s a problem: given some edge e, where should its next pointer point? What’s the problem exactly? The problem is that each edge is incident to two faces, not one, and so there are really two “next” edges, one in each of e’s incident faces. The problem is made even worse by the following observation, the counter-clockwise orientation of the two faces incident e runs in opposite directions along e in the two incident faces. It seems there is just no good way to store an edge in a doubly-linked list as we did before.Or is there?Enter the half-edge data structure. The basic idea is to split each edge $e$ into two _half-edges_ _h1_ and _h2_. The split is not what you'd think. We're __not__ splitting the edge in half at its midpoint to get two edges that are half as long. Instead, the split is like Robin Hood's arrow splitting an already shot arrow straight through its core. We are splitting the edge along its entire length into a left-side half-edge and a right-side half-edge. This allows us to maintain a few useful invariants: 1. A half-edge is incident to only one face, unlike an edge which is incident to two. 2. Each half-edge runs counter-clockwise along the face it is incident to. The two half-edges that together make up the left-side and right-side of an edge of our planar subdivision are called _twins_. A face can now be represented by a doubly-linked list of half-edges, just as we did for representing polygons. Two different faces that share a common edge will simply have two half-edges (that represent the common edge) that are twins of each other. Here’s a drawing of the half-edge data structure for the planar subdivision drawing above (note how the orange edges have been split into red half-edges):![img/dcel1.png](img/dcel1.png)Side note: in some of the more topological literature _half-edges_ are referred to as _darts_. A few more detailsSo much for the basic idea, let's dive into a few more details. First, it's convenient to store three different types of objects: vertices, half-edges, and faces. We may even add an edge structure if we need to maintain data on a per-edge basis--in this case the edge structure is just a container for data and both its incident half-edges maintain a reference to it. Each half-edge stores the following: 1. A reference to its _originating vertex_--the first vertex of its edge in the ccw ordering of the half-edge's face. 2. A reference to its _incident face_. 3. References to its _next_ and _previous_ half-edges in ccw order along the incident face. 4. A reference to its _twin_ half-edge.Notice that the half-edge stores a reference to its origin vertex, but not its destination vertex. How can we recover the destination? Suppose `he` is a reference to a half-edge and `he.origin` is the origin vertex. Then the destination vertex is simply `he.twin.origin`. __Question:__ What is another way we can get the destination of a given half-edge `he`? Each `Face` object simply stores a pointer to _any one of its incident half-edges (arbitrarily)_. This has the role of the _firstVertex_ reference we used in the `Polygon` structure for the convex-hull lab. If you have a `Face` object, you can start a loop over all its incident half-edges by starting from `.incidentEdge` and following `.next` references (to obtain the half-edges in ccw order) or `.prev` references (to obtain the half-edges in cw order) . __Question:__ How can you list all the vertex coordinates incident to a face in ccw order? Similar to a `Face`, each `Vertex` simply stores a pointer to _one of its out-going half-edges (arbitrarily)_. This gives us linear-time access to all of the edges incident to a vertex. __Question:__ How can we loop over all of the half-edges outgoing from a vertex? HINT: Think about starting from the vertex's `.outgoingHalfEdge` refference and following `twin` and `next` pointers to pick up the others. Here's a more complete picture oif the previous half-edge structure: ![img/dcel2.png](img/dcel2.png) A Combinatorial StructureNotice that nothing in the above discussion is geometric. In fact, the half-edge structure is a combinatorial structure that maintains how the faces, edges (half-edges), and vertices of a manifold are combined--but not _where_ they are in some particular geometry. In order to add geometric information, we need to store a `.pos` attribute at each vertex. UML DiagramTODO The LabA skeleton of the `DCEL`, `Vertex`, `HalfEdge`, and `Face` data structures appears int the code block below. Note that the constructors for `Vertex`, `HalfEdge`, and `Face` all take their parent `DCEL` object as a parameter. This way, each element is able to automatically add itself to the `DCEL`'s `.verts`, `.halfEdges`, and `.faces` lists so that you don't have to do the book-keeping directly. Have a look at the code and get comfortable with it.class DCEL: """ The basic DCEL container. Attributes: verts: List[Vertex] - The list of vertices. halfEdges: List[HalfEdge] - The list of half-edges. faces: List[Face] - The list of faces. outerFace: Face - The outerFace (None if there isn't one.) """ def __init__(self): self.verts = [] self.halfEdges = [] self.faces = [] self.outerFace = None def splitFace(he1, he2): """ Splits the face incident to he1 and he2 by adding an edge between the origins of he1 and he2. If he1 and he2 are not incident to the same face, or he1 is the .next or .prev of he2, then the operation is illegal and no change should occur to the DCEL. Parameters: he1: HalfEdge he2: HalfEdge Returns: True if the oepration succeeds, False if there is an error. """ # TODO # 0. Check whether the call is legal and return False if not. # 1. Create a new face for the second half of the split. # 2. Create new half-edges for the splitting edge. # 3. Insert your new half-edges into the cyclic lists. # 4. Make sure that all the half-edges point to the right face. # 5. Make sure that each face correctly points to an incident half edge. pass class Vertex: """ The basic Vertex object. Attributes: dcel: DCEL - The parent DCEL. outgoingHalfEdge: HalfEdge - Any one half-edge with this vertex as its origin. """ def __init__(self, dcel, outgoingHalfEdge=None, pos=None): self.dcel = dcel self.dcel.verts.append(self) self.outgoingHalfEdge = outgoingHalfEdge self.pos = pos class Face: def __init__(self, dcel, incidentHalfEdge=None): self.dcel = dcel self.dcel.faces.append(self) self.incidentHalfEdge = incidentHalfEdge def incidentHalfEdges(self): # TODO return [] def incidentVertices(self): # TODO return [] class HalfEdge: def __init__(self, dcel, origin=None, face=None, prev=None, next=None, twin=None): self.dcel = dcel self.dcel.halfEdges.append(self) self.origin = origin self.face = face self.prev = prev self.next = next self.twin = twin def makeNext(self, he): self.next = he he.prev = self def makeTwin(self, he): self.twin = he he.twin = self @property # The @property annotation let's you "run" this as # he.destination instead of he.destination() def destination(self): return self.twin.origin def splitInHalf(self): # TODO # 1. Create a vertex for the mid-point # 2. Create new half-edges # 3. Set next/prev/twin/face pointers correctly passTasksYour tasks are to implement the following methods:* `Face.incidentHalfEdges() -> List[HalfEdge]`. Returns a list of the incident half-edges in order. * `Face.incidentVertices() -> List[Vertex]`. Returns a list of the incident vertices in order.* `DCEL.splitFace(he1, he2) -> bool`. Splits the face incident to the half-edges `he1` and `he2` by adding an edge (i.e. two half-edges that are twins) between the origins of `he1` and `he2`. Your code should check that the two are incident to the same face and are also not coincident along the face. If both checks pass, then you should split the face and return `True`. Otherwise, you shouldn't modify the `DCEL` and return `False`. * `HalfEdge.splitInHalf()`. This splits the half-edge and its twin by introducing a vertex at its mid-point and creating a few additional half-edges. Note that if `p` and `q` are `PointE2` objects, then the midpoint is given by `((p.x + q.x) / 2, (p.y + q.y) / 2)`. Once you have implemented the methods above, test your split face code by reproducing the following drawing using only calls to `splitFace` and `splitInHalf` methods. Add your code for this after the comment ` YOUR CODE HERE`.from koebe.geometries.euclidean2 import PointE2, PolygonE2 coords = [PointE2(-250, -150), PointE2(0, -150), PointE2( 250, -150), PointE2( 250, 150), PointE2(0, 150), PointE2(-250, 150)] dcel = DCEL() # Create vertices for p in coords: Vertex(dcel, pos=p) # Create the outerFace: dcel.outerFace = Face(dcel) # Create the inner face: f = Face(dcel) # Create six half-edges for the inner face innerHEs = [HalfEdge(dcel, v, f) for v in dcel.verts] twinHEs = [HalfEdge(dcel, v, dcel.outerFace) for v in dcel.verts[1:] + [dcel.verts[0]]] for i in range(len(innerHEs)): innerHEs[i-1].makeNext(innerHEs[i]) twinHEs[i].makeNext(twinHEs[i-1]) innerHEs[i].makeTwin(twinHEs[i]) f.incidentHalfEdge = innerHEs[0] dcel.outerFace.incidentHalfEde = twinHEs[0] # YOUR CODE HERE from koebe.graphics.euclidean2viewer import E2Viewer, makeStyle viewer = E2Viewer(600, 600) viewer.addAll([SegmentE2(he.origin.pos, he.destination.pos) for he in dcel.halfEdges]) viewer.addAll([v.pos for v in dcel.verts]) facePolys = [PolygonE2(vertices=[v.pos for v in face.incidentVertices()]) for face in dcel.faces if face != dcel.outerFace] for poly in facePolys: viewer.add(facePolys, makeStyle(fill="#f00")) viewer.show()Working with remote files * **Difficulty level**: intermediate* **Time need to lean**: 10 minutes or less* **Key points**: * Input targets marked by `remote` will be considered as remote files and will not be copied to remote host. * Output targets marked by `remote` will be considered as remote files and will not be copied to local host Remote Targets The task execution model automatically synchronize input and output files between local and remote hosts. This can be convenient but 1. If the input files are large and reside on remote host already, there is no need to make the input files available on local host for them to be processed.2. If the output files are large and do not need to be processed locally, there is no need to copy output files to local host.To solve these problems, you can use `remote` targets to specify that the targets are on remote host, and do not need to be synchronized. For example, you could use the following step to process a large input file and only synchronize small output files to local desktop for further analysis:```[10]input: remote('/path/to/large/input/file')output: remote('large_output'), 'summary.stat'task:sh: script to generate large_output and summary.stat from large input files.``` The `remote` function accept any one or more SoS targets (e.g. `remote('input.txt')`, `remote('input1.txt', 'input2.txt')`, `remote(fastq_files)`, or `remote(R_Library('ggplot2'))`.%run -q bcb output: remote('result.png') task: walltime='1h', mem='2G', nodes=1, cores=1 R: set.seed(1) x <- 1:100 y <- -0.03*x + rnorm(50) png("result.png", height=400, width=600) plot(x, y, pch=19, col=rgb(0.5, 0.5, 0.5, 0.5), cex=1.5) abline(lm(y ~ x)) dev.off()INFO: No matching tasks are identified. Use option -a to check all tasks. INFO: 68398bb67cbef06a startedThe task is executed successfully on remote host `bcb` but the result file `result.png`, marked as `remote('result.png')` is not synchronized to localhost after the completion of the task.!ls result.pngls: result.png: No such file or directoryPandasPandas ist ein Python-Modul, welches auf Tabellen sowie Tabellenkalkulationsprogrammen (wie es auch MS Excel tut) beruht. Eine besondere Fähigkeit von Pandas ist, dass es direkt CSV-, DSV- und Excel-Dateien einlesen und schreiben kann.Mehr zu Pandas auf der offiziellen Website: http://pandas.org/ Download von Matplotlib# nicht starten, da Matplotlib bereits installiert wurde und die notwendigen Rechte fehlen !pip3 install pandasVerwenden von Pandas Vergleich zwischen NumPy und Pandas# import numpy as np # X = np.array([11, 28, 72, 3, 5, 8]) # both are the same type:Individuelle Indizesfruits = ['apples', 'oranges', 'cherries', 'pears'] quantities = [20, 33, 52, 10] fruits = ['apples', 'oranges', 'cherries', 'pears'] fruits_tr = ['elma', 'portakal', 'kiraz', 'armut']Zugriff auf Indizes Ändern des Spaltennamenyears = range(2014, 2018) # shop1 = pd.Series([2409.14, 2941.01, 3496.83, 3119.55], index=years) # shop2 = pd.Series([1203.45, 3441.62, 3007.83, 3619.53], index=years) # shop3 = pd.Series([3412.12, 3491.16, 3457.19, 1963.10], index=years)---cities = ["Zürich", "Winterthur", "Freiburg"]Zugriff auf Spalten Index änderncities = {"name": ["London", "Berlin", "Madrid", "Rome", "Paris", "Vienna", "Bucharest", "Hamburg", "Budapest", "Warsaw", "Barcelona", "Munich", "Milan"], "population": [8615246, 3562166, 3165235, 2874038, 2273305, 1805681, 1803425, 1760433, 1754000, 1740119, 1602386, 1493900, 1350680], "area": [1572, 891.85, 605.77, 1285, 105.4, 414.6, 228, 755, 525.2, 517, 101.9, 310.4, 181.8], "country": ["England", "Germany", "Spain", "Italy", "France", "Austria", "Romania", "Germany", "Hungary", "Poland", "Spain", "Germany", "Italy"]} ordinals = ["first", "second", "third", "fourth", "fifth", "sixth", "seventh", "eigth", "ninth", "tenth", "eleventh", "twelvth", "thirteenth"]Spalten umsortieren# city_frame.reindex(index=[0, 2, 4, 6, 8, 10, 12, 1, 3, 5, 7, 9, 11], columns=['country', 'name', 'area', 'population'])Sortierung# absteigend nach Fläche sortieren # aufsteigend nach Einwohner sortierenAuslesen von Dateienimport zipfile # Quelle: https://www.kaggle.com/unsdsn/world-happiness#2019.csv target = 'data/world-happiness.zip' handle = zipfile.ZipFile(target) handle.extractall('data/') handle.close()Predicting a customer's next purchase using automated feature engineering **As customers use your product, they leave behind a trail of behaviors that indicate how they will act in the future. Through automated feature engineering we can identify the predictive patterns in granular customer behavioral data that can be used to improve the customer's experience and generate additional revenue for your business.**In this tutorial, we show how [Featuretools](www.featuretools.com) can be used to perform feature engineering on a multi-table dataset of 3 million online grocery orders provided by Instacart to train an accurate machine learning model to predict what product a customer buys next.*Note: If you are running this notebook yourself, refer to the [read me on Github](https://github.com/featuretools/predict_next_purchaserunning-the-tutorial) for instructions to download the Instacart dataset* Highlights* We automatically generate 150+ features using Deep Feature Synthesis and select the 20 most important features for predictive modeling* We build a pipeline that it can be reused for numerous prediction problems (you can try this yourself!)* We quickly develop a model on a subset of the data and validate on the entire dataset in a scalable manner using [Dask](http://dask.pydata.org/en/latest/).import featuretools as ft from dask import bag from dask.diagnostics import ProgressBar import pandas as pd import utils from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score import os ft.__version__Step 1. Load dataWe start by loading in just one partion of our datast. In this case, a partition of our dataset contains the complete purchase history for each user within it. At the end of the tutorial, we will run the pipeline on every partiton to generate a final model. To learn more about loading data into Featuretools, read the guide [here](https://docs.featuretools.com/loading_data/using_entitysets.html).es = utils.load_entityset("partitioned_data/part_1/") esStep 2. Make LabelsFor supervised machine learning, we need labels. These labels define what our predictive model will be used for. In this tutorial, we will predict if a customer will buy Bananas in the next 4 weeks.We generate training examples by selecting a `cutoff_time` in the past to make our labels. Using users who had acivity during `training_window` days before the `cutoff_time`, we look to see if they purchase the product in the `prediction_window`. If you are running this code yourself, feel free to experiment with any of these parameters! For example, try to predict if a customer will buy "Limes" instead of "Bananas" or increase the size of your `prediction_window`.label_times = utils.make_labels(es=es, product_name = "Banana", cutoff_time = pd.Timestamp('March 15, 2015'), prediction_window = ft.Timedelta("4 weeks"), training_window = ft.Timedelta("60 days")) label_times.head(5)We can see above the our training examples contain three pieces of information: a user id, the last time we can use data before feature engineering (called the "cutoff time"), and the label to predict. These are called our "label times". The distribution of the labelslabel_times["label"].value_counts()3. Automated Feature EngineeringWith our label times in hand, we can use Deep Feature Synthesis to automatically generate features.When we use DFS, we specify* `target_entity` - the table to build feature for* `cutoff_time` the point in time to calculate the features* `training_window` - the amount of historical data we want to use when calculating featuresA good way to think of the `cutoff_time` is that it let's us "pretend" we are at an earlier point in time when generating our features so we can simulate making predictions. We get this time for each customer from the label times we generated above.feature_matrix, features = ft.dfs(target_entity="users", cutoff_time=label_times, training_window=ft.Timedelta("60 days"), # same as above entityset=es, verbose=True) # encode categorical values fm_encoded, features_encoded = ft.encode_features(feature_matrix, features) print "Number of features %s" % len(features_encoded) fm_encoded.head(10)Building features: 120it [00:00, 6144.38it/s] Progress: 100%|██████████| 1/1 [00:09<00:00, 9.61s/cutoff time] Number of features 161Step 4. Machine LearningUsing the default parameters, we generated 160 potential features for our prediction problem. With a few simple commands, this feature matrix can be used for machine learningX = utils.merge_features_labels(fm_encoded, label_times) X.drop(["user_id", "time"], axis=1, inplace=True) X = X.fillna(0) y = X.pop("label")Let's train a Random Forest and validate using 3-fold cross validationclf = RandomForestClassifier(n_estimators=400, n_jobs=-1) scores = cross_val_score(estimator=clf,X=X, y=y, cv=3, scoring="roc_auc", verbose=True) "AUC %.2f +/- %.2f" % (scores.mean(), scores.std())[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 4.6s finishedWe can see we perform noticably better than guessing! However, we have a pretty high difference in performance accross folds.To improve this, let's identify the top 20 features using a Random Forest and then perform machine learning on the whole dataset (all of the partions).clf.fit(X, y) top_features = utils.feature_importances(clf, features_encoded, n=20)1: Feature: COUNT(order_products WHERE product_name = Banana), 0.129 2: Feature: MODE(order_products.product_name) = Banana, 0.046 3: Feature: MODE(orders.MODE(order_products.product_name)) = Banana, 0.025 4: Feature: MEAN(orders.COUNT(order_products)), 0.020 5: Feature: MEAN(orders.NUM_UNIQUE(order_products.aisle_id)), 0.019 6: Feature: COUNT(order_products WHERE department = produce), 0.019 7: Feature: COUNT(order_products WHERE department = dairy eggs), 0.019 8: Feature: STD(orders.NUM_UNIQUE(order_products.department)), 0.018 9: Feature: MEAN(orders.NUM_UNIQUE(order_products.product_name)), 0.017 10: Feature: STD(orders.PERCENT_TRUE(order_products.reordered)), 0.017 11: Feature: MEAN(orders.PERCENT_TRUE(order_products.reordered)), 0.017 12: Feature: PERCENT_TRUE(order_products.reordered), 0.016 13: Feature: STD(orders.COUNT(order_products)), 0.016 14: Feature: COUNT(order_products), 0.016 15: Feature: NUM_UNIQUE(order_products.product_name), 0.015 16: Feature: SKEW(orders.NUM_UNIQU[...]To persist this features, we can save them to disk.ft.save_features(top_features, "top_features")Understanding feature engineering in Featuretools Before moving forward, take a look at the feature we created. You will see that they are more than just simple transformations of columns in our raw data. Instead, they aggregations (and sometimes stacking of aggregations) across the relationships in our dataset. If you're curious how this works, learn about the Deep Feature Synthesis algorithm in our documentation [here](https://docs.featuretools.com/automated_feature_engineering/afe.html).DFS is so powerful because with no manual work, the library figured out that historical purchases of bananas are important for predicting future purchases. Additionally, it surfaces that purchasing dairy or eggs and reordering behavior are important features. Even though these features are intuitive, Deep Feature Synthesis will automatically adapt as we change the prediction problem, saving us the time of manually brainstorming and implementing these data transformation. Scaling to full datasetOnce we have written the pipeline for one partition, we can easily scale it out to the full dataset using [Dask](dask.pydata.org). A similar pipeline could also be built using [Spark](http://spark.apache.org/docs/2.2.0/api/python/).pbar = ProgressBar() pbar.register()First, we assemble our partitions and map them to entity sets using the function from before. A single partition contains all the data for each user within it, so this computation is easily parallelized.path = "partitioned_data/" _, dirnames, _ = os.walk(path).next() dirnames = [path+d for d in dirnames] b = bag.from_sequence(dirnames) entity_sets = b.map(utils.load_entityset)Next, we create label times for each entity setlabel_times = entity_sets.map(utils.dask_make_labels, product_name = "Banana", cutoff_time = pd.Timestamp('March 1, 2015'), prediction_window = ft.Timedelta("4 weeks"), training_window = ft.Timedelta("60 days")) label_times # load in the features from before top_features = ft.load_features("top_features", es) feature_matrices = label_times.map(utils.calculate_feature_matrix, features=top_features)Now, we compute with Dask. Running on a Macbook Pro with a 2.2 GHz Intel Core i7 and 16gb of ram, this takes about 20 minutes to run.fms_out = feature_matrices.compute() X = pd.concat(fms_out)[########################################] | 100% Completed | 20min 31.3sNow, we repeat the same machine learning steps from the sample datasetX.drop(["user_id", "time"], axis=1, inplace=True) y = X.pop("label") clf = RandomForestClassifier(n_estimators=400, n_jobs=-1) scores = cross_val_score(estimator=clf,X=X, y=y, cv=3, scoring="roc_auc", verbose=True) "AUC %.2f +/- %.2f" % (scores.mean(), scores.std())[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 1.5min finishedWe can now we that our accuracy has stabalized across folds, giving us much more confidence in our model.Now, let's look at the top featuresclf.fit(X, y) top_features = utils.feature_importances(clf, top_features, n=20)1: Feature: COUNT(order_products WHERE product_name = Banana), 0.272 2: Feature: MODE(order_products.product_name) = Banana, 0.065 3: Feature: MODE(orders.MODE(order_products.product_name)) = Banana, 0.045 4: Feature: STD(orders.COUNT(order_products)), 0.043 5: Feature: COUNT(order_products WHERE department = produce), 0.042 6: Feature: STD(orders.PERCENT_TRUE(order_products.reordered)), 0.040 7: Feature: MEAN(orders.PERCENT_TRUE(order_products.reordered)), 0.040 8: Feature: PERCENT_TRUE(order_products.reordered), 0.040 9: Feature: SUM(orders.PERCENT_TRUE(order_products.reordered)), 0.039 10: Feature: STD(orders.NUM_UNIQUE(order_products.department)), 0.037 11: Feature: SKEW(orders.PERCENT_TRUE(order_products.reordered)), 0.036 12: Feature: MEAN(orders.NUM_UNIQUE(order_products.aisle_id)), 0.036 13: Feature: MEAN(orders.NUM_UNIQUE(order_products.department)), 0.035 14: Feature: MEAN(orders.NUM_UNIQUE(order_products.product_name)), 0.034 15: Feature: MEAN(orders.COUNT(order_products)), [...]Face Recognitionfrom google.colab import drive drive.mount('/content/drive') !pip install face_recognition pwd cd /content/drive/MyDrive/Colab Notebooks/FaceRecognition pwd ls import face_recognition import cv2 import os from google.colab.patches import cv2_imshow def read_img(path): img = cv2.imread(path) (h, w) = img.shape[:2] width = 200 ratio = width/float(w) height = int(h*ratio) return cv2.resize(img, (height, width)) known_encodings = [] known_names = [] known_dir = 'known' for file in os.listdir(known_dir): img = read_img(known_dir + '/' + file) img_enc = face_recognition.face_encodings(img)[0] known_encodings.append(img_enc) known_names.append(file.split('.')[0]) known_encodings unknown_dir = 'unknown' results_list = [] for file in os.listdir(unknown_dir): img_enc = [] file_path = unknown_dir + '/' + file print("\nProcessing", file) img = read_img(file_path) img_enc = face_recognition.face_encodings(img)[0] results = face_recognition.compare_faces(known_encodings, img_enc) results_list.append(results) for j in range(len(results)): if results[j]: name = known_names[j] (top, right, bottom, left) = face_recognition.face_locations(img)[0] cv2.rectangle(img, (left, top), (right, bottom), (0,255,0), 2) font = cv2.FONT_HERSHEY_SIMPLEX bottomLeftCornerOfText = (round(left*1.1),round(bottom*1.15)) fontScale = 0.5 fontColor = (0,255,0) lineThickness = 1 image = cv2.putText(img, name, bottomLeftCornerOfText, font, fontScale, fontColor, lineThickness) cv2_imshow(image)The goal os `punk` is to make available sime wrappers for a variety of machine learning pipelines.The pipelines are termed `primitves` and each primitive is designed with a functional programming approach in mind.At the time of this writing, `punk` is being periodically updated. Any new primitives will be realesed as a pip-installable python package every friday along with their corresponding annotations files for the broader D3M community.Here we will briefly show how the primitives in the punk package can be utilized.import punk help(punk)Help on package punk: NAME punk PACKAGE CONTENTS base feature_selection (package) novelty_detection (package) DATA __all__ = ['feature_selection', 'novelty_detection'] VERSION 1.0.0 FILE /home/alarcj/Documents/datasci/NewKnowledge/primitives_repo/CODE/examples/punk/__init__.pyNovelty Detection - Dataset Summarization Testing HeteroscedasticityAn interesting set we can do on our datasets is a test for Heteroscedasticity which may be able to tell us whether there are some subpopulations in our dataset (latent variables), sampling bias, or something of the sort. Future primitives will aid on this task.%matplotlib inline import numpy as np from scipy import linalg import matplotlib.pyplot as plt from punk import novelty_detection # Make some data up n_samples, n_features, rank = 1000, 50, 10 sigma = 1. rng = np.random.RandomState(42) U, _, _ = linalg.svd(rng.randn(n_features, n_features)) X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T) # Adding homoscedastic noise X_homo = X + sigma * rng.randn(n_samples, n_features) # Adding heteroscedastic noise sigmas = sigma * rng.rand(n_features) + sigma / 2. X_hetero = X + rng.randn(n_samples, n_features) * sigmas %%time # run the primitive against a dataset with homocedatic noise test_homo = novelty_detection.HeteroscedasticityTest(max_iter=1000, tol=0.01) test_homo = test_homo.fit(["matrix"], X_homo) %%time # run the primitive against a dataset with homocedatic noise test_hetero = novelty_detection.HeteroscedasticityTest(max_iter=1000, tol=0.01) test_hetero = test_hetero.fit(["matrix"], X_hetero)CPU times: user 8.45 s, sys: 9.98 s, total: 18.4 s Wall time: 5.01 sNotice that for Homoscedastic noise the difference between PCA and FactorAnalysis is relatively small and both are able to pick out a lower-rank principal subspace of dimensioanlity 10.In the case of Nonisotropic noise FactorAnalysis does better than PCA and is able to get a much lower-rank subspace than PCA - 10 versus 40 dimensions.print(test_homo.pca, test_homo.fa) print(test_hetero.pca, test_hetero.fa)(-74.953690343714229, 10) (-75.029704299751259, 10) (-77.65986584714085, 40) (-76.948541495525788, 10)Compute Scores The primitive `test_heteroscedasticity` is a wrapper for the function `compute_scores`. More details on this can be seen in [Model selection with Probabilistic PCA and FA](http://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_fa_model_selection.html).scores = novelty_detection.HeteroscedasticityTest(max_iter=1000, tol=0.01) %%time pca_scores_ho, fa_scores_ho = scores.compute_scores(X_homo) %%time pca_scores_he, fa_scores_he = scores.compute_scores(X_hetero) pca_scores_ho, fa_scores_ho = novelty_detection.compute_scores(X_homo, max_iter=1000, tol=0.01) pca_scores_he, fa_scores_he = novelty_detection.compute_scores(X_hetero, max_iter=1000, tol=0.01) plt.plot([x for y, x in pca_scores_ho], [y for y, x in pca_scores_ho], 'b', label='PCA scores') plt.plot([x for y, x in fa_scores_ho], [y for y, x in fa_scores_ho], 'r', label='FA scores') plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-') plt.axvline(test_homo.pca[1], color='b', label='PCA CV: %d' %test_homo.pca[1] , linestyle='--') plt.axvline(test_homo.fa[1], color='r', label='FactorAnalysis CV: %d' % test_homo.fa[1], linestyle='--') plt.xlabel("# of components") plt.ylabel("CV scores") plt.legend(loc="best") plt.title("Homoscedastic Noise"); plt.plot([x for y, x in pca_scores_he], [y for y, x in pca_scores_he], 'b', label='PCA scores') plt.plot([x for y, x in fa_scores_he], [y for y, x in fa_scores_he], 'r', label='FA scores') plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-') plt.axvline(test_hetero.pca[1], color='b', label='PCA CV: %d' %test_hetero.pca[1] , linestyle='--') plt.axvline(test_hetero.fa[1], color='r', label='FactorAnalysis CV: %d'%test_hetero.fa[1], linestyle='--') plt.xlabel("# of components") plt.ylabel("CV scores") plt.legend(loc="best") plt.title("Heteroscedastic Noise");Visualize Orbitalsimport exatomic from exatomic.base import resource # Easy access to static files from exatomic import UniverseWidget as UW # The visualization systemQuantum chemistry codes don't always make it easy to get all the necessary information but provided the outputs have the data and the parser is implemented to handle the print-out from a specific code, this is what the API looks like for orbital viewing.- First you parse the output - must contain full basis set specification and a matrix of basis function coefficients ```ed = exatomic.myqmcode.Output('/path/to/my/output')```- Inspect the parsed data. Does it look correct?```ed.basis_set houses primitive exponents, coefficientsed.momatrix contains the basis function coefficientsed.basis_set_order indices specifying full basis set order```- `ed` is an Editor, an object that makes parsing files fun! - `ed` must be converted to a universe for the magic to happen```uni = ed.to_universe()```- A Universe has an add_molecular_orbitals method```uni.add_molecular_orbitals? to see all keyword argumentsuni.add_molecular_orbitals() tries to guess smart defaults```- Don't forget python uses 0-based indexing, so check your vector values! Gaussian APIfrom exatomic import gaussian uni = gaussian.Output(resource('g09-ch3nh2-631g.out')).to_universe() fni = gaussian.Fchk(resource('g09-ch3nh2-631g.fchk')).to_universe() uni.atom # Inspect the geometry uni.basis_set_order.head() # And the first few basis functions # Let's add the first 10 molecular orbitals uni.add_molecular_orbitals(vector=range(10)) fni.add_molecular_orbitals(vector=range(10))Evaluating 28 basis functions once. Timing: compute orbitals - 2.81s. Evaluating 28 basis functions once. Timing: compute orbitals - 0.37s.The UniverseWidget accepts up to 9 universes- Play around with the buttons: - Clear removes contents from the scene. (Press Fill to get the geometry back) - Active Scenes controls which scenes are controlled by the buttons (default all) - Image allows for saving PNGs of the scenes (can specify a directory and file names) - Camera allows to link the camera between scenes (load and save are buggy) - Fill switches between WebGL fragment shaders and fancy three.js Spheres - Axis adds a unit axis (often hidden within an atom if centered at the origin) - Animate will play dynamics (if a universe has multiple frames) - Fields expands to show: - Isosurfaces tab shows the molecular orbitals - Contours tab shows contour lines in x, y, or z axes- Be sure to use the close button to clean up the javascript side of thingsUW(uni, fni)Recursive solid harmonics means no hard-coded maximum angular momentumuo2 = gaussian.Output(resource('g09-uo2.out')).to_universe() uo2.add_molecular_orbitals() UW(uo2)Evaluating 141 basis functions once. Timing: compute orbitals - 1.06s.Molcas API- You must include the `BSSHOW` keyword in the `SEWARD` module or `GATEWAY` with a higher than normal print threshold to print the basis set specification- Molcas prints out Orb files which contain basis function coefficients so we use 2 `Editors`from exatomic import molcas mol = molcas.Output(resource('mol-uo2-anomb.out')) orb = molcas.Orb(resource('mol-uo2-anomb.scforb')) # Just attach it to the universe # mol.momatrix = orb.momatrix # mol.orbital = orb.orbital # Alternatively there's a convenience method on molcas.Output mol.add_orb(resource('mol-uo2-anomb.scforb')) # adds momatrix and orbital uni = mol.to_universe() uni.add_molecular_orbitals(vector=range(40, 60)) UW(uni)NWChem APIfrom exatomic import nwchem nw = nwchem.Output(resource('nw-ch3nh2-631g.out')).to_universe() nw.add_molecular_orbitals(vector=range(20)) UW(nw)Evaluating 28 basis functions once. Timing: compute orbitals - 0.43s.Filter Non-English TweetsHere, we remove tweets from our sample that aren't in English.We should have done this in the previous step but didn't, so we rely on TextBlob here.%matplotlib inline import pandas as pd import json import matplotlib.pyplot as plt from textblob import TextBlob tweet_list = [] with open("full_diverse_sample.json", "r") as in_file: tweet_list = json.load(in_file) x = TextBlob(tweet_list[0]) x.detect_language() eng_tweets = list(filter(lambda x: TextBlob(x).detect_language() == "en", tweet_list)) len(eng_tweets) with open("misogyny_en_samples_to_label.json", "w") as out_file: json.dump(eng_tweets, out_file)Cold Face Test – Saliva Plotsimport json import re from pathlib import Path import pandas as pd import numpy as np import pingouin as pg import matplotlib.pyplot as plt import seaborn as sns from fau_colors import cmaps import biopsykit as bp from biopsykit.utils.dataframe_handling import multi_xs from biopsykit.protocols import MIST from cft_analysis.datasets import CftDatasetProcessed %load_ext autoreload %autoreload 2 %matplotlib widget plt.close("all") palette = sns.color_palette(cmaps.faculties) sns.set_theme(context="notebook", style="ticks", palette=palette) plt.rcParams["figure.figsize"] = (10, 4) plt.rcParams["pdf.fonttype"] = 42 plt.rcParams["mathtext.default"] = "regular" paletteData Import# get path to analysis results base_path = Path("../../data") results_path = base_path.joinpath("../results") stats_path = results_path.joinpath("statistics") tex_path = stats_path.joinpath("tex_tables") plot_path = results_path.joinpath("plots") bp.utils.file_handling.mkdirs([results_path, stats_path, plot_path, tex_path]) paper_path = Path("../paper_path.json") paper_tex_path = None paper_img_path = None if paper_path.exists(): paper_path = Path(json.load(paper_path.open(encoding="utf-8"))["paper_path"]) paper_tex_path = paper_path.joinpath("tab") paper_img_path = paper_path.joinpath("img") bp.utils.file_handling.mkdirs([paper_tex_path, paper_img_path]) dataset = CftDatasetProcessed(base_path, exclude_subjects=True) dataset hue_order = ["Control", "CFT"] mist = MIST.from_file(base_path.joinpath("mist_cft.json")) mist mist.add_saliva_data(dataset.cortisol, "cortisol", sample_times=dataset.sample_times)Plots Cortisol Responsefig, ax = plt.subplots(figsize=(8, 4)) mist.saliva_plot( "cortisol", legend_loc="upper right", legend_fontsize="medium", hue_order=hue_order, linestyle=["-", "--"], marker=["o", "P"], ax=ax, ) for path in [plot_path, paper_img_path]: if path is not None: fig.savefig(path.joinpath("img_cortisol_response.pdf"), transparent=True)Cortisol Features Prepare Datafeatures = ["auc_g", "auc_i", "auc_i_post", "max_inc", "slopeS1S4"] cort_analysis = multi_xs(dataset.cortisol_features, features, level="saliva_feature") cort_analysis.head()Statisticssteps = [ ("prep", "normality"), ("test", "pairwise_ttests"), ] params = {"dv": "cortisol", "between": "condition", "groupby": "saliva_feature", "test__parametric": False} stats = bp.stats.StatsPipeline(steps, params) stats.apply(cort_analysis) stats.display_results(prep=False)Boxplotsfeatures = {"auc_g": ["auc_g"], "auc_i": ["auc_i"], "max_inc": ["max_inc"], "slope": ["slopeS1S4"]} box_pairs, pvalues = stats.sig_brackets( "test", stats_effect_type="between", plot_type="multi", x="saliva_feature", features=features, subplots=True ) fig, axs = plt.subplots(ncols=len(features.keys())) bp.protocols.plotting.saliva_multi_feature_boxplot( dataset.cortisol_features, "cortisol", features=features, hue="condition", hue_order=hue_order, legend_loc="upper center", legend_orientation="horizontal", stats_kwargs={"box_pairs": box_pairs, "pvalues": pvalues}, palette=cmaps.faculties, axs=axs, ) for path in [plot_path, paper_img_path]: if path is not None: fig.savefig(path.joinpath("img_cortisol_features.pdf"), transparent=True)Recognizing faces of friendsThis notebook documents and shows how I have made a Convolutional Neural Network to recognize the faces of 3 friends**`[, Rudra]`**The GitHub repo for the complete project can be found below[*Click here for GitHub*](https://github.com/monacotime/2.Recognizing-faces-of-friends) STEP 1: Importing the images from github to the virtual machineSince i have got over 1500+ pictures of me and my frineds in the training set, it is not practical for me to upload them to the Google's VM everytime I want to use them on the Google's cloud service. It is not practical because my interent connection is slower (2 MBps) ( 25 mbps)Whereas the connection of the Google VM to the GitHub server is ~36.85 MiB/sTherefore I upload my images to the the GitHub repo only once and make Google download it from there using the `!git clone` command# Clone the repo. !git clone -l -s https://github.com/monacotime/2.Recognizing-faces-of-friends.gitCloning into '2.Recognizing-faces-of-friends'... warning: --local is ignored remote: Enumerating objects: 1633, done. remote: Total 1633 (delta 0), reused 0 (delta 0), pack-reused 1633 Receiving objects: 100% (1633/1633), 1.30 GiB | 36.85 MiB/s, done. Checking out files: 100% (1626/1626), done.STEP 2: Creating the Convolutional NN and training ithence forth the actual processing and training of the model begins Importing the librarieswe have got to import the keras libray and some other libraries to work with our data:- __ImageDataGenerator__ because with this class, we can create augmented data using the pre existing data that we have. It has some properties to change the input image like: ``` rescale = changes the value ( pixel data) eg: /255 -> between 0-1 shear_range = stretch the imgae at an angle (shearing) zoom_range = zooms into the image horizontal_flip = flips the image (true/false) ... ClassOfTypeImageDataGenerator.flow_from_directory() this function directly takes the image from the directory and feeds it to the neural network ``` more details on this function can be found in: [Keras documentation for image Preprocessing](https://keras.io/api/preprocessing/image/imagedatagenerator-class)- __Sequential__ The NN model we will be making is of type Sequential... it has got one input node(tensor) and only one output node(tensor) why? because it has got Conv2D and MaxPooling2D in it? im not sure, can someone tell me why we use Sequential? 😕 [Keras documentation for Sequential](https://keras.io/guides/sequential_model/)- __Conv2D__ from what i understand, Conv2D is a type of LAYER in keras that takes in a 2D image. The shape of the input is required to be specified when it is used in the first layer after which it may not be specified and it will find its shape from the previous layers [Keras documentation for Conv2D](https://keras.io/api/layers/convolution_layers/convolution2d/)- __MaxPooling2D__ Takes the maximum argument of the 2D image (pixel data) and simplifies the image (makes it smaller because the original number of pixels keep decreasing with each Argmax of pooling) Better explained later in the notebook- __Activation, Dropout, Flatten, Dense__ Applies an activation function to an output. You have to choose from the list of all the Activation functions [Activation](https://keras.io/api/layers/regularization_layers/dropout/) The Dropout layer randomly sets input units to 0 with a frequency of `rate` at each step during training time, which helps prevent overfitting. Inputs not set to `0` are scaled up by `1/(1 - rate)` such that the sum over all inputs is unchanged. [Dropout](https://keras.io/api/layers/regularization_layers/dropout/) > "*Just your regular densely-connected NN layer.*" - Keras documentation about Dense layers [Dense](https://keras.io/api/layers/core_layers/dense/)- __image__ ``` python from keras.preprocessing import image ``` Not sure what this is - __np and os__ We all know what this is right? I dont need to write anything right? RIGHT? **YES, IM TALKING TO YOU, FUTURE ME** 😡from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense from keras import backend as K import numpy as np from keras.preprocessing import image import osUsing TensorFlow backend.Initializing control variablespretty straight forward, `img_size` and other control global variables are declaredthe data directory below is local to the VM and not my pc, it is taking the data from the VM directory and not mineimg_width, img_height = 64, 64 train_data_dir = '/content/2.Recognizing-faces-of-friends/project files/images/train images' test_data_dir = '/content/2.Recognizing-faces-of-friends/project files/images/valid images' nb_train_samples = 1578 nb_valid_samples = 30 image_channel = 1 batch_size = 20 epochs = 5Pre processing the data to be fed into the networkUsinf the `ImageDataGenerator` we are augmenting the data and making more data out of the original data by varying the parameter detailed above alreadyAnd by using the `flow_from_directory()` we are setting it up so that the images from the directory are being directly fed into the neural network input layerwe are doing this for both train and validation data while we are not changing too much of the validation data compared to train data because we want it to be as close to the original picture that was taken so we are just changing the scales as in we are making the 0-255 RGB values into 0-1 GreyScale data and feeding it to the NNif K.image_data_format() == 'channels_first': input_shape = (image_channel, img_width, img_width) else: input_shape = (img_width, img_height, image_channel) train_datagen = ImageDataGenerator( rescale = 1. /255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) valid_datagen = ImageDataGenerator( rescale = 1. /255) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size = (img_width, img_height), color_mode = "grayscale", batch_size = batch_size, class_mode = 'categorical') valid_generator = valid_datagen.flow_from_directory( test_data_dir, target_size = (img_width, img_height), color_mode = "grayscale", batch_size = batch_size, class_mode = 'categorical')Found 1578 images belonging to 3 classes. Found 30 images belonging to 3 classes.Structure and working of the convolutional Neural Network*okay bois hold onto your pants now*---the way this works is...![conv nn](https://miro.medium.com/max/2000/1*vkQ0hXDaQv57sALXAJquxA.jpeg)Okay lets break it down step by step: 1. First, if you have eyes, you will see that on the left a picture of a fantastic blue car is taken as an input2. A Box, called Conv2D below, takes the pixels on the image and then projects it onto itself and applies an `activation` to it. now whats an `activation` you ask? so basically it decides if the pixel value is "bright enough" or "important enogh" for the network to even care. and this is done using a curve caled a rectified linear unit or in short ReLU. Want to see what the curve looks like? ![relu](https://miro.medium.com/max/357/1*oePAhrm74RNnNEolprmTaQ.png) this here is relu, its a simple graph. okay now that is out the way lets go to the next thing called `Convloving`? `Convoluting`? well the thing is... see this ![alt text](https://miro.medium.com/max/1000/1*GcI7G-JLAQiEoCON7xFbhg.gif) You have to understand what a `kernal` and `weights` is first and that Yellow thing that is moving and you can see it is called a `kernal`, it has got its `weights` in red if you can see it closely and that is what desides what values get passed onto the convolved features in the picture above the `kernal` is: ``` K= 1 0 1 0 1 0 1 0 1 ``` and this kernal goes down the page in this manner: ![alt text](https://miro.medium.com/max/1400/1*ciDgQEjViWLnCbmX-EeSrA.gif) Here the "page" is the input space of the Convolutional layer. in this manner the Convolutional layer is able to extract the `features` from the image and then store it in a smaller matrix of the extracted `features`. the extracted matrix of feature is smaller because it is very feature dense as shown below: ![alt text](https://miro.medium.com/max/790/1*1VJDP6qDY9-ExTuQVEOlVg.gif) And this way we extract the higher level features in the input and decrease the computional necessaty by making the input space to the next layer even smaller. The now extracted feature matrix is then passed into a `MaxPooling2D` layer if you notice below in the codes.3. Pooling, or in our case MaxPooling2D, takes the image from the output of the previous layer (Conv2D layer that applies the activation to the input) and then takes only the max values in a kernal... okay lets break it down Similar to the `Convolutional Layer`, the `Pooling layer` is responsible for reducing the spatial size of the `Convolved Feature`. This is to decrease the computational power required to process the data through dimensionality reduction. Furthermore, it is useful for extracting dominant features which are rotational and positional invariant, thus maintaining the process of effectively training of the model. how the pooling layer works looks very similar to how the convolutional layer works: ![alt text](https://miro.medium.com/max/792/1*uoWYsCV5vBU8SHFPAPao-w.gif) regular pooling vs max pooling: ![](https://miro.medium.com/max/1000/1*KQIEqhxzICU7thjaQBfPBQ.png) if you noticed above, the max pooling only takes the maximum value instead of the average. this has performs better aparantly due to reasons beyond me.--->Ayway, this process of `1 -> 2 -> 3` is repeated 3 times in our code to extract very high level featuresAnd then the final output after doing it 3 times is taken to a `flatten` layerthis layer makes our 2D vector into a 1D array and then inputs it into a `Dense` layer. Details of the `Dense` layer has been already provied above.This `Dense` layer then converges to give only one selected `Categories` of outputs.You can see it in this diagram: ![alt text](https://miro.medium.com/max/1400/1*kToStLowjokojIQ7pY2ynQ.jpeg)Where you see:```class 1class 2class 3class 4class 5```we will only have:```class 1class 2class 3where class 1 = "Anish" class 2 = "" class 3 = "Rudra"```and force the Network to chose only 3 class.AND HENCE WE WILL HAVE OUR IMAGE CLASSIFIER 🎉model = Sequential() model.add(Conv2D(32,(3,3), input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2,2))) #model.summary() model.add(Conv2D(32, (3,3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2,2))) model.add(Conv2D(64, (3,3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2,2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(3)) model.add(Activation('softmax')) model.summary() model.compile(loss = 'categorical_crossentropy', optimizer = 'rmsprop', metrics = ['accuracy'])Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 62, 62, 32) 320 _________________________________________________________________ activation_1 (Activation) (None, 62, 62, 32) 0 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 31, 31, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 29, 29, 32) 9248 _________________________________________________________________ activation_2 (Activation) (None, 29, 29, 32) 0 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 14, 14, 32) 0 ______________________________________________________[...]Training the NetworkHere we train the model by calling the simple method called `model.fit_generator()`more detail about it can be found on: [More details about fit_generator()](https://www.geeksforgeeks.org/keras-fit-and-keras-fit_generator/)model.fit_generator(train_generator, steps_per_epoch = nb_train_samples, epochs = epochs, validation_data = valid_generator, validation_steps = nb_valid_samples)Epoch 1/5 1578/1578 [==============================] - 1521s 964ms/step - loss: 0.1381 - accuracy: 0.9495 - val_loss: 1.3111e-04 - val_accuracy: 1.0000 Epoch 2/5 1578/1578 [==============================] - 1488s 943ms/step - loss: 0.0220 - accuracy: 0.9937 - val_loss: 5.5192e-06 - val_accuracy: 1.0000 Epoch 3/5 1578/1578 [==============================] - 1491s 945ms/step - loss: 0.0120 - accuracy: 0.9965 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 Epoch 4/5 1578/1578 [==============================] - 1530s 969ms/step - loss: 0.0155 - accuracy: 0.9969 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 Epoch 5/5 1578/1578 [==============================] - 1532s 971ms/step - loss: 0.0131 - accuracy: 0.9970 - val_loss: 0.0000e+00 - val_accuracy: 1.0000STEP 3: Saving the model so that we dont have to re-train it all the time---It's a simple `model.save_weights()` method that saves the weights of the trained model. I specified it to save it in `.h5` format because idk i saw some other guy do it okay idk why its better. Someone please tell me why it's better.model.save_weights('colab_first_try.h5')STEP 4: Predicting images it has never seen before---***SO NOW IS FINALLY THE FUN PART***I am importing some images in a seperate folder i had saved called `test images` and then making the trained model predict them using the `model.predict` method and desplaying the result*It's all pretty basic, you should be able to understand from the code.*test_path = '/content/2.Recognizing-faces-of-friends/project files/images/test images/' for img_path in os.listdir(test_path): img_pred_pil=image.load_img( test_path + img_path, target_size=(64,64), color_mode='grayscale') display(img_pred_pil) img_pred_nparr = image.img_to_array(img_pred_pil) img_pred_nparr = np.expand_dims(img_pred_nparr,axis = 0) rslt = model.predict_classes(img_pred_nparr) if rslt == 0: print('Anish') elif rslt == 1: print('Sir Chaitu') else: print('Rudra')1. Name and properties of compounds in small clusters of manifold plots2. Use clustering algorithm to find clusters3. Plot molecules based on how large they are. Use features such as molecular weight, volume, number of atoms. What defines the size of molecules?4. EDA - find class specific features5. General toxicity vs. specific toxicityfrom google.colab import drive drive.mount('/gdrive') %cd /gdrive import os import numpy as np import pandas as pd import matplotlib.pyplot as plt os.listdir('My Drive/Colab Notebooks/Others/') from sklearn import manifold from sklearn.utils import shuffle from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from collections import OrderedDict from functools import partial import rdkit.Chem as Chem from rdkit.Chem import AllChem def smiles_to_fps(data, fp_length, fp_radius): return stringlist2intarray(np.array([smile_to_fp(s, fp_length, fp_radius) for s in data])) def smile_to_fp(s, fp_length, fp_radius): m = Chem.MolFromSmiles(s) return (AllChem.GetMorganFingerprintAsBitVect( m, fp_radius, nBits=fp_length)).ToBitString() def stringlist2intarray(A): '''This function will convert from a list of strings "10010101" into in integer numpy array.''' return np.array([list(s) for s in A], dtype=int) # load the feature dataset features_df = pd.read_csv("My Drive/Colab Notebooks/Others/total_df.csv", index_col=[0]) features_df.head() index = features_df.index features_df = shuffle(features_df, random_state=43) features_df.index = index features_df.head() feature_compound_name = features_df['compound'].values class_label_name = ['Ototoxic' if y == 1.0 else 'Non-toxic' for y in features_df['class_label'].values] Y_feature = features_df['class_label'].values X_feature = features_df.drop(columns=['class_label', 'compound']).values X_feature = StandardScaler().fit_transform(X_feature) Y_feature.shape, X_feature.shape import plotly.express as px def run_manifold(X, Y, name, n_components=2): # setup manifold methods methods = OrderedDict() # methods['MDS'] = manifold.MDS(n_components, max_iter=100, n_init=1) methods['t-SNE'] = manifold.TSNE(n_components=n_components, init='pca', random_state=0) # methods['PCA'] = PCA(n_components=n_components) for i, (label, method) in enumerate(methods.items()): Y_ = method.fit_transform(X) if n_components == 2: fig = px.scatter(Y_, x=0, y=1, color=class_label_name, hover_name=feature_compound_name, title=f'Manifold method {label}', labels={'0': 'PC 1', '1': 'PC 2'}) elif n_components == 3: fig = px.scatter_3d(Y_, x=0, y=1, z=2, color=class_label_name, hover_name=feature_compound_name, title=f'Manifold method {label}', labels={'0': 'PC 1', '1': 'PC 2', '2': 'PC 3'}) fig.show() run_manifold(X_feature, Y_feature, "PyBioMed Features", n_components=2)/usr/local/lib/python3.7/dist-packages/sklearn/manifold/_t_sne.py:793: FutureWarning: The default learning rate in TSNE will change from 200.0 to 'auto' in 1.2. FutureWarning, /usr/local/lib/python3.7/dist-packages/sklearn/manifold/_t_sne.py:986: FutureWarning: The PCA initialization in TSNE will change to have the standard deviation of PC1 equal to 1e-4 in 1.2. This will ensure better convergence. FutureWarning,Clustering using the t-SNE components. Note that t-SNE components changes upon successive runs.sne_components = manifold.TSNE(n_components=2, init='pca', random_state=0).fit_transform(X_feature) from sklearn.cluster import KMeans from sklearn.metrics.pairwise import pairwise_distances_argmin # run clustering n_clusters=20 kmeans = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10, random_state=0) kmeans.fit(sne_components) # we want to have same colors for the same clusters. # let's pair the cluster centers per closest one. kmeans_cluster_center = kmeans.cluster_centers_ kmeans_labels = pairwise_distances_argmin(sne_components, kmeans_cluster_center) kmeans_labels_str = ["Cluster "+str(p) for p in kmeans_labels] fig = px.scatter(sne_components, x=0, y=1, color=kmeans_labels_str, hover_name=[pp + " " + qq for pp, qq in zip(feature_compound_name, class_label_name)], title=f'k-Means clustering of t-SNE components', labels={'0': 'PC 1', '1': 'PC 2'}) fig.show() # plot the clusters fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 5)) colors = ["#4EACC5", "#FF9C34", "#d2c596", "#a254a2", "#289ec1", "#18e4ac", "#072f8b", "#b46c9a", "#9f0c05", "#5974d1", "#0dccb9", "#f264d1", "#b0b071", "#da4634", "#704e31", "#ee8b6a", "#eae883", "#116549", "#f8a82a", "#6c776f"] for k, col in zip(range(n_clusters), colors): my_members = kmeans_labels == k cluster_center = kmeans_cluster_center[k] ax.plot(sne_components[my_members, 0], sne_components[my_members, 1], "w", markerfacecolor=col, marker=".", markersize=10) ax.plot( cluster_center[0], cluster_center[1], "o", markerfacecolor=col, markeredgecolor="k", markersize=6, ) ax.set_title("KMeans Clustering of t-SNE components") ax.set_xticks(()) ax.set_yticks(())Now, take the clusters and study what features the molecules in the clusters have in common.What defines the size of the molecules?Ototoxicity is caused by blocking of the MET-channel. Is there any correlation between the size of the molecules and the class labels. We will use the following features as proxy for molecular volume (or size) to do some analysis. 1. Molecular weight (Weight)2. Number of all atoms (nta)3. Number of O atoms (noxy)4. Number of C atoms (ncarb)5. Number of single bonds (nsb)We also have counts of various atoms such as1. Hydrogen2. Halogens3. Heavy atoms4. F atoms5. Cl atoms6. Br atoms7. I atoms8. C atoms9. P atoms10. S atoms11. O atoms12. N atomsOther measures for molecular size are bond length, position of atoms, volume which can be computed from Molar mass and density.molecular_size_features = ['Weight', 'nta', 'noxy', 'ncarb', 'nsb'] for indicator in molecular_size_features: fig = px.scatter( features_df, y=indicator, x=np.arange(0, features_df.shape[0]), color = class_label_name, hover_name=features_df['compound'], title=f'Scatter plot of {indicator} feature as proxy for molecular size', width=700, height=500 ) fig.show()spaCyにおける固有表現認識の課題このノートブックでは、spaCnに組み込まれた固有表現認識モデルの課題とテキスト構造への敏感さについて説明します。 準備 パッケージのインストール!pip install -q spacy==3.1.2 |████████████████████████████████| 5.8 MB 13.8 MB/s  |████████████████████████████████| 456 kB 52.8 MB/s  |████████████████████████████████| 623 kB 72.8 MB/s  |████████████████████████████████| 42 kB 916 kB/s  |████████████████████████████████| 10.1 MB 67.7 MB/s [?25hモデルのダウンロード!python -m spacy download en_core_web_lgCollecting en-core-web-lg==3.1.0 Downloading https://github.com/explosion/spacy-models/releases/download/en_core_web_lg-3.1.0/en_core_web_lg-3.1.0-py3-none-any.whl (777.1 MB)  |████████████████████████████████| 777.1 MB 18 kB/s [?25hRequirement already satisfied: spacy<3.2.0,>=3.1.0 in /usr/local/lib/python3.7/dist-packages (from en-core-web-lg==3.1.0) (3.1.2) Requirement already satisfied: thinc<8.1.0,>=8.0.8 in /usr/local/lib/python3.7/dist-packages (from spacy<3.2.0,>=3.1.0->en-core-web-lg==3.1.0) (8.0.10) Requirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.2.0,>=3.1.0->en-core-web-lg==3.1.0) (2.23.0) Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.2.0,>=3.1.0->en-core-web-lg==3.1.0) (4.62.2) Requirement already satisfied: pydantic!=1.8,!=1.8.1,<1.9.0,>=1.7.4 in /usr/local/lib/python3.7/dist-packages (from spacy<3.2.0,>=3.1.0->en-core-web-lg==3.1.0) (1.8.2[...]インポートimport spacyモデルの読み込みnlp = spacy.load("en_core_web_lg")固有表現を認識するでは、spaCyのモデルを使って、固有表現認識をしてみましょう。やり方は簡単です。テキストを渡して`Doc`オブジェクトを作成したら、`ents`プロパティへアクセスするだけです。そうすることで、固有表現とそのタイプを取得できます。mytext = """SAN FRANCISCO — Shortly after Apple used a new tax law last year to bring back most of the $252 billion it had held abroad, the company said it would buy back $100 billion of its stock. On Tuesday, Apple announced its plans for another major chunk of the money: It will buy back a further $75 billion in stock. “Our first priority is always looking after the business and making sure we continue to grow and invest,” , Apple’s finance chief, said in an interview. “If there is excess cash, then obviously we want to return it to investors.” Apple’s record buybacks should be welcome news to shareholders, as the stock price is likely to climb. But the buybacks could also expose the company to more criticism that the tax cuts it received have mostly benefited investors and executives. """ doc = nlp(mytext) for ent in doc.ents: print(ent.text, "\t", ent.label_) GPE Apple ORG last year DATE $252 billion MONEY $100 billion MONEY Tuesday DATE Apple ORG $75 billion MONEY first ORDINAL PERSON Apple ORG Apple ORG`sents`プロパティへアクセスすることで、文を抽出してみましょう。今回のテキストであれば、人間であれば6つの文を抽出できるはずです。for sent in doc.sents: print(sent.text) print("***End of sent****") print("Total sentences: ", len(list(doc.sents)))SAN FRANCISCO — Shortly after Apple used a new tax law last year to bring back most of the $252 billion it had held abroad, the company said it would buy back $100 billion of its stock. ***End of sent**** On Tuesday, Apple announced its plans for another major chunk of the money: It will buy back a further $75 billion in stock. ***End of sent**** ***End of sent**** “Our first priority is always looking after the business and making sure we continue to grow and invest,” , Apple’s finance chief, said in an interview. ***End of sent**** “If there is excess cash, then obviously we want to return it to investors.” ***End of sent**** Apple’s record buybacks should be welcome news to shareholders, as the stock price is likely to climb. ***End of sent**** But the buybacks could also expose the company to more criticism that the tax cuts it received have mostly benefited investors and executives. ***End of sent**** ***End of sent**** Total sentences: 88つの文が抽出されました。改行が影響を及ぼしている箇所があるようです。 では、もし固有表現の途中で改行が入った場合はどうなるのでしょうか?試してみましょう。# 改行なし doc = nlp('The United States Army is the land service branch of the United States Armed Forces.') for ent in doc.ents: print(ent.text, "\t", ent.label_) # 改行あり doc = nlp('The United States\nArmy is the land service branch of the United States Armed Forces.') for ent in doc.ents: print(ent.text, "\t", ent.label_)The United States GPE Army ORG the United States Armed Forces GPE抽出結果が変わってしまいました。今回の例では、認識結果が変わるように意図的に改行を入れましたが、実際、企業内の文章(業務文書、メールなど)であれば、画面内におさめるために、文の途中で改行することはよくあるかと思います。そういった場合、単に学習済みのモデルを適用するだけではなく、文境界の認識などの前処理をする必要があります。import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import poisson # For reproducibility np.random.seed(1000) if __name__ == '__main__': # Create the initial observation set obs = np.array([7, 11, 9, 9, 8, 11, 9, 9, 8, 7, 11, 8, 9, 9, 11, 7, 10, 9, 10, 9, 7, 8, 9, 10, 13]) mu = np.mean(obs) print('mu = {}'.format(mu)) # Show the distribution sns.set(style="white", palette="muted", color_codes=True) fig, ax = plt.subplots(figsize=(14, 7), frameon=False) sns.distplot(obs, kde=True, color="b", ax=ax) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) plt.show() # Print some probabilities print('P(more than 8 trains) = {}'.format(poisson.sf(8, mu))) print('P(more than 9 trains) = {}'.format(poisson.sf(9, mu))) print('P(more than 10 trains) = {}'.format(poisson.sf(10, mu))) print('P(more than 11 trains) = {}'.format(poisson.sf(11, mu))) # Add new observations new_obs = np.array([13, 14, 11, 10, 11, 13, 13, 9, 11, 14, 12, 11, 12, 14, 8, 13, 10, 14, 12, 13, 10, 9, 14, 13, 11, 14, 13, 14]) obs = np.concatenate([obs, new_obs]) mu = np.mean(obs) print('mu = {}'.format(mu)) # Repeat the analysis of the same probabilities print('P(more than 8 trains) = {}'.format(poisson.sf(8, mu))) print('P(more than 9 trains) = {}'.format(poisson.sf(9, mu))) print('P(more than 10 trains) = {}'.format(poisson.sf(10, mu))) print('P(more than 11 trains) = {}'.format(poisson.sf(11, mu))) # Generate 2000 samples from the Poisson process syn = poisson.rvs(mu, size=2000) # Plot the complete distribution fig, ax = plt.subplots(figsize=(14, 7), frameon=False) sns.distplot(syn, kde=True, color="b", ax=ax) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) plt.show()mu = 9.12Laboratory 1###This is a comment print("Welcome to Python Programming") name= input("What is your name?:") age= int(input("Age:")) address= input("Address:") print(name) print(age) print(address)Welcome to Python Programming What is your name?: Age:18 Address:Woodcress Phase 7 Lancaster State, Barangay Tapia, Gen. Trias, Cavite 18 Woodcress Phase 7 Lancaster State, Barangay Tapia, Gen. Trias, CaviteLoad datasetsReminder: First, load, pre-process and store a dataset via the [RadialBeam](./radialbeamsampling.ipynb) routine.en_load_dataset = False if en_load_dataset: train_dataset = tf.data.experimental.load('./data/{0}_train'.format(dataset_name)).batch(batch_size) val_dataset = tf.data.experimental.load('./data/{0}_val'.format(dataset_name)).batch(batch_size) test_dataset = tf.data.experimental.load('./data/{0}_test'.format(dataset_name)).batch(batch_size) else: splits = [0.8, 0.1, 0.1] dataset = load_dataset(dataset_name) n_train = int(splits[0] * float(dataset.cardinality())) n_val = int(splits[1] * float(dataset.cardinality())) n_test = int(splits[2] * float(dataset.cardinality())) train_dataset = dataset.take(n_train) val_dataset = dataset.skip(n_train).take(n_val) test_dataset = dataset.skip(n_train).skip(n_val).take(n_train) img_size = int(train_dataset.element_spec['image'].shape[0]) lines, angles = instantiate_radial_vectors(img_size + margin_padding, img_size + margin_padding, beam_set_size=n_beams, max_len=target_size) train_dataset = preprocess(train_dataset, lines, angles, target_size=img_size + margin_padding, batch_size=batch_size, path='./training_dataset', continuous=continuous) val_dataset = preprocess(val_dataset, lines, angles, target_size=img_size + margin_padding, batch_size=batch_size, path='./val_dataset', continuous=continuous) test_dataset = preprocess(test_dataset, lines, angles, target_size=img_size + margin_padding, batch_size=batch_size, path='./test_dataset', continuous=continuous) _, n_beams, _, n_pixels, n_channels = train_dataset.element_spec['beam'].shape2022-05-20 17:35:14.475394: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2022-05-20 17:35:15.138228: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 35742 MB memory: -> device: 0, name: NVIDIA A40, pci bus id: 0000:43:00.0, compute capability: 8.6 2022-05-20 17:35:27.939613: W tensorflow/core/kernels/data/cache_dataset_ops.cc:768] The calling iterator did not fully read the dataset being cached. In order to avoid unexpected truncation of the dataset, the partially cached contents of the dataset will be discarded. This can happen if you have an input pipeline similar to `dataset.cache().take(k).repeat()`. You should use `dataset.take(k).cache().rep[...]Load the BIC model# input tensor (batch x (zero, theta) x beams x (2epsilon + 1) x D x C) in_beams = tf.keras.layers.Input([2, n_beams, 3, n_pixels, n_channels]) bic = BIC(hidden=128, activation=tf.nn.leaky_relu, context=True, l2_regularization=0.0, edge_factor=0.5, gcn_layers=3, dropout=0.0, size_vector_field=n_beams, pixel_count_per_vector=n_pixels) # multiple output for introspection; for training and inference: prior and unit_vec are essential prior, unit_vec, beamencoding, ctx, similarity, \ beamencoding_zero, beamencoding_theta, angle_energy, rnn_encoding = bic(inputs=in_beams) model = tf.keras.models.Model(inputs=in_beams, name='bic', outputs=(prior, unit_vec, beamencoding, ctx, similarity, \ beamencoding_zero, beamencoding_theta, angle_energy, rnn_encoding)) model = tf.keras.models.load_model(model_path) model.summary()WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.Crop and canonicalize methodsdef remove_padding(image): cropped_image = tf.image.crop_to_bounding_box(image, margin_padding // 2, margin_padding // 2, target_size * 2, target_size * 2) return cropped_image def downstream_predict(downstream, sample, k=1, show=False): # predict non-rotated image with downstream model if k == 1: non_rot_pred = downstream(remove_padding(sample['image'])) else: non_rot_pred = tf.nn.top_k(downstream(remove_padding(sample['image'])), k=k) # predict rotated and chopped off image if k == 1: crop_rot_pred = downstream(remove_padding(sample['rotated'])) else: crop_rot_pred = tf.nn.top_k(downstream(remove_padding(sample['rotated'])), k=k) # call BIC pred_facts, pred_angle, conv_latents, gnn_latents, distance_matrix, \ x1_emb, x2_emb, angle_energy, rnn_encoding = model( tf.tile(sample['beam_rot'][:, None, ...], [1, 2, 1, 1, 1, 1])) # project form complex vector to angle pred_angle = np.array([angle_between(pred_angle[b], tf.cast([1., 0.], tf.float32), gpu=True) for b in range(tf.shape(pred_angle)[0])]) # smoothly rotate the image back back_rot = tfa.image.rotate(sample['rotated'], 2 * math.pi - pred_angle, interpolation='bilinear') # predict with downstream model if k == 1: if show: plt.imshow(remove_padding(back_rot[0])) plt.show() canonic_pred = downstream(remove_padding(back_rot)) else: canonic_pred = tf.nn.top_k(downstream(remove_padding(back_rot)), k=k) return non_rot_pred, crop_rot_pred, canonic_pred cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, label_smoothing=0, axis=-1) train_df = pd.DataFrame(columns=['iteration', 'non_rot_loss', 'crop_rot_loss', 'canonic_loss']) valid_df = pd.DataFrame(columns=['iteration', 'non_rot_loss', 'crop_rot_loss', 'canonic_loss']) n_runs = 5 epochs = 8 for _ in range(n_runs): downstream = VGG11((target_size * 2, target_size * 2, 3), 100) optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07) for e in tqdm(range(epochs)): # validation, before training to log also the init behaviour of the model for i, sample in enumerate(val_dataset): non_rot_pred, crop_rot_pred, canonic_pred = downstream_predict(downstream, sample) valid_df = valid_df.append(pd.DataFrame({ 'iteration': [e * int(val_dataset.cardinality().numpy()) + i], 'non_rot_loss': [float(cce(tf.one_hot(sample['label'], 100), non_rot_pred).numpy())], 'crop_rot_loss': [float(cce(tf.one_hot(sample['label'], 100), crop_rot_pred).numpy())], 'canonic_loss': [float(cce(tf.one_hot(sample['label'], 100), canonic_pred).numpy())] }), ignore_index=True) pass # training for i, sample in enumerate(train_dataset): with tf.GradientTape() as tape: non_rot_pred, crop_rot_pred, canonic_pred = downstream_predict(downstream, sample, show=False)#True if i == 0 else False) loss = cce(tf.one_hot(sample['label'], 100), non_rot_pred) train_df = train_df.append(pd.DataFrame({ 'iteration': [e * int(train_dataset.cardinality().numpy()) + i], 'non_rot_loss': [float(cce(tf.one_hot(sample['label'], 100), non_rot_pred).numpy())], 'crop_rot_loss': [float(cce(tf.one_hot(sample['label'], 100), crop_rot_pred).numpy())], 'canonic_loss': [float(cce(tf.one_hot(sample['label'], 100), canonic_pred).numpy())] }), ignore_index=True) grads_downstream = tape.gradient(loss, downstream.trainable_variables) optimizer.apply_gradients(zip(grads_downstream, downstream.trainable_variables)) colors = ['#629FCA', '#FDA556', '#6BBC6B', '#E26768', '#B292CE'] ax = sns.lineplot(data=valid_df, x='iteration', y='non_rot_loss', label='non_rot_loss', ci='sd', palette=colors[0], alpha=0.7) ax = sns.lineplot(data=valid_df, x='iteration', y='crop_rot_loss', label='crop_rot_loss', ci='sd', palette=colors[1], alpha=0.7) ax = sns.lineplot(data=valid_df, x='iteration', y='canonic_loss', label='canonic_loss', ci='sd', palette=colors[2], alpha=0.7) ax.set_ylabel('Cross Entropy') ax.legend().remove() plt.show()Imports%matplotlib inline from actincme.bin.symmetricize import Symmetricize from actincme.bin.rotate import Rotate, AverageRotate from actincme.bin.filament import Filament from actincme.bin.mypyntcloud import MyPyntCloud from mpl_toolkits.mplot3d import Axes3D import numpy as np import pyvista as pv import matplotlib.pyplot as pltGet all files# manually determined slices start_list = [1, 4, 4, 4, 5, 6, 5, 6, 5, 6, 2, 4, 5, 3, 6, 7, 1, 4, 1, 2, 1, 5, 1, 1, 1, 0, 1] end_list = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -1, -1, -3, -1, -1, -1, -1, -1, -1, -2, -1, -2, -1] membrane_path = '../actincme/pkl_files/'Average the shapeAvgRot = AverageRotate(membrane_path, start_list, end_list) # setting cutoff_value = 1 averages all curves, cutoff_value = 10 averages curves that have atleast 10 elements AveragedShape = AvgRot.rotate_many_curves(cutoff_value=10)Mean of empty slice Mean of empty slice Mean of empty slicecan plot if necessary to see just this average shape, but the next block plots both this shape and filaments# fig = plt.figure(figsize=[16,8]) # ax=fig.add_subplot(111, projection='3d') # AvgRot.plot_averaged_curve(ax, xlims=False, save=True)Plot filament orientation relative to this averaged shapefig = plt.figure(figsize=[16,8]) ax=fig.add_subplot(111, projection='3d') filaments = Filament('../Notebooks/', 'BranchedActinCoordinates_Integers') # Compute directionality of filaments filaments.calculate_directionality(rotated_surface=AveragedShape) #handles all the logic # Plot the filaments filaments.plot_filaments_and_shape(1, fig, ax, AveragedShape, direction='normal_angle') filaments._filament_orientation_dataframePlot meshdim_x = AveragedShape._x3d_norm dim_y = AveragedShape._y3d_norm dim_z = AveragedShape._z3d_norm grid = pv.StructuredGrid(dim_x, dim_y, dim_z) centers = grid.cell_centers() cpos = [(11.915126303095157, 6.11392754955802, 3.6124956735471914), (0.0, 0.375, 2.0), (-0.42546442225230097, 0.9024244135964158, -0.06789847673314177)] p = pv.Plotter() p.add_mesh(grid, show_edges=True,color=True, line_width=1) p.enable_eye_dome_lighting() p.add_bounding_box() # p.camera_position = cpos p.add_mesh(centers, color="r", point_size=8.0, render_points_as_spheres=True) # p.add_floor('-x') # p.add_floor('-z') p.show(screenshot='test.png')Plot orbitting version of above plotp = pv.Plotter() viewup = [0, 1, 0] p.add_mesh(grid, show_edges=True,color=True, line_width=1) p.enable_eye_dome_lighting() p.add_axes() # p.camera_position = cpos p.add_mesh(centers, color="r", point_size=8.0, render_points_as_spheres=True) p.add_floor('-x') p.add_floor('-y') p.show(auto_close=False) path = p.generate_orbital_path(n_points=36, shift=grid.length, viewup=viewup) p.open_gif("orbit.gif") p.orbit_on_path(path, write_frames=True) p.close()Plot mesh curvaturegrid.plot_curvature(clim=[-0.007, 0.01], show_edges=True, show_grid=True)This is what the plot looks like with Pyntcloudpyntobj = MyPyntCloud(dim_x, dim_y, dim_z) pyntobj.make_cloud_object() pyntobj.compute_scalars(num_of_neighbours=3)Can try plotting curvatures and normals, looks awful thofig = plt.figure(figsize=[10,8]) ax=fig.add_subplot(111, projection='3d') pyntobj.plot_curve(fig, ax, save=True, label='curvature(4)') fig = plt.figure(figsize=[10,8]) ax=fig.add_subplot(111, projection='3d') pyntobj.plot_curve(fig, ax, save=True, name_of_file='normal', label='ny(4)')Excitation-number-restricted states: Jaynes-Cummings chain Authors: () and () IntroductionThe ENR functions construct a basis set for multipartite systems which contains only states that have an overall number of excitations. This is particularly useful for systems where the model conserves excitation number, as in the JC-chain example below. However it can also help reduce memory cost even if this is not the case; however then one must be careful in choosing a large enough number of excitations to obtain convergence.For example, consider a system consisting of 4 modes, eachwith 5 states. The total hilbert space size is $5^4 = 625$. If we areonly interested in states that contain up to 2 excitations, we only needto include states such as (0, 0, 0, 0) (0, 0, 0, 1) (0, 0, 0, 2) (0, 0, 1, 0) (0, 0, 1, 1) (0, 0, 2, 0) ...The enr fucntions create operators and states for the 4 modes that actwithin this state space. For example,a1, a2, a3, a4 = enr_destroy([5, 5, 5, 5], excitations=2)creates destruction operators for each mode.From this point onwards, the annihiltion operators a1, ..., a4 can beused to setup a Hamiltonian, collapse operators and expectation-valueoperators, etc., following the usual pattern.However, many functions in QuTiP will fail on states/operators constructed with this method. So far functions which exist are:enr_fock: create a fock state in the restricted basisenr_identity: create an identity operatorenr_thermal_dm: create a thermal stateenr_destroy: create an annilation operatorenr_ptrace: perform a partial trace%matplotlib inline import numpy as np from qutip import * from qutip.ipynbtools import HTMLProgressBar def solve(d, psi0): # list of annihilation operators for cavity modes a = d[::2] # list of atomic annihilation operators sm = d[1::2] H0 = sum([aa.dag() * aa for aa in a]) + sum([s.dag() * s for s in sm]) # atom-cavity couplings Hint_ac = 0 for n in range(N): Hint_ac += 0.5 * (a[n].dag() * sm[n] + sm[n].dag() * a[n]) # coupling beetween cavities Hint_cc = 0 for n in range(N-1): Hint_cc += 0.9 * (a[n].dag() * a[n+1] + a[n+1].dag() * a[n]) H = H0 + Hint_ac + Hint_cc e_ops = [x.dag() * x for x in d] c_ops = [0.01 * x for x in a] c_ops = [0.05 * x for x in sm] times = np.linspace(0, 250, 1000) L = liouvillian_ref(H, c_ops) result = mesolve(H, psi0, times, c_ops, e_ops, options=Options(nsteps=5000,store_states=True)) return result, H, L N = 4 # number of systems M = 2 # number of cavity states #The dimensions of the JC spin chain dims = [M, 2] * N #The number of excitations in the chain excitations = 1 # total number of excitations initial_excitiations = 1 # initial number of excitations psi0 = tensor([basis(d, initial_excitiations) if idx == 1 else basis(d, 0) for idx, d in enumerate(dims)]) d = [tensor([destroy(d1) if idx1 == idx2 else identity(d1) for idx1, d1 in enumerate(dims)]) for idx2, d2 in enumerate(dims)] #This is the normal QuTip solution, without using ENR states %time result1, H1, L1 = solve(d, psi0) #Construct ENR operators and states d = enr_destroy(dims, excitations) psi0 = enr_fock(dims, excitations, [initial_excitiations if m == 1 else 0 for m in range(2*N)]) [initial_excitiations if m == 1 else 0 for m in range(2*N)] #This is the solution using the ENR states/operators. %time result2, H2, L2 = solve(d, psi0) fig, axes = plot_expectation_values([result1, result2], figsize=(10, 8)) for ax in axes[:, 0]: ax.set_ylim(-0.1, 1.1) fig.tight_layout(); def ENR_ptrace(rho, sel, excitations): """ Partial trace for ENR states. Parameters ---------- rho : Qobj Qobj of an ENR system defined through enr_fock enr_identity enr_thermal_dm enr_destroy or output from a solver sel : int/list An ``int`` or ``list`` of components to keep after partial trace. excitations : integer The maximum number of excitations that are to be included in the state space. Should be consistent with the same assumptions used to construct rho. Returns ------- oper : qobj Quantum object representing partial trace with selected components remaining. Notes ----- The default Qobj.ptrace() will fail with ENR systems. This should be used instead. """ if isinstance(sel, int): sel = np.array([sel]) else: sel = np.asarray(sel) if (sel < 0).any() or (sel >= len(rho.dims[0])).any(): raise TypeError("Invalid selection index in ptrace.") drho=rho.dims[0] ############ #dimensions #################### #enr definitions for the original state nstates, state2idx, idx2state = enr_state_dictionaries(drho, excitations) ################ #definition of number of states in selection ###################### # dims_short= np.asarray(drho).take(sel) nstates2, state2idx2, idx2state2 = enr_state_dictionaries(dims_short.tolist(), excitations) # this is a list of the dimensions of the system one has traced out rest = np.setdiff1d(np.arange(len(drho)), sel) #construct matrix to return the new Density matrix rhout = np.zeros((nstates2,nstates2),dtype=np.complex64) for ind,state in idx2state.items(): for ind2,state2 in idx2state.items(): #if the parts of the states of the systems(s) being traced out are diagonal, add this to the new DM if np.all(np.asarray(state).take(rest) == np.asarray(state2).take(rest)): rhout[state2idx2[tuple(np.asarray(state).take(sel))], state2idx2[tuple(np.asarray(state2).take(sel))]] += rho.data[state2idx[state], state2idx[state2]] dims_kept0 = np.asarray(drho).take(sel) rho1_dims = [dims_kept0.tolist(), dims_kept0.tolist()] rho1_shape = [nstates2, nstates2] return Qobj(rhout,rho1_dims,rho1_shape) result1.states[10].ptrace([0]) ENR_ptrace(result2.states[10], [0], excitations) result1.states[10].ptrace([0,1,4]) ENR_ptrace(result2.states[10],[0,1,4],excitations)from __future__ import print_function %matplotlib inline import os import warnings import numpy as np import matplotlib.pyplot as plt import matplotlib.image as image import pandas as pd import pandas_profiling plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (12,8) hr = pd.read_csv('/content/employee_data.csv') hr.head(10) hr.shape import sys !{sys.executable} -m pip install -U pandas-profiling[notebook] !jupyter nbextension enable --py widgetsnbextension from pandas_profiling import ProfileReport pd.crosstab(hr.salary, hr.quit).plot(kind='bar') plt.title('Turnover Frequency on Salary Bracket') plt.xlabel('salary') plt.ylabel('Frequency of Turnover') plt.show() pd.crosstab(hr.department, hr.quit).plot(kind='bar') plt.title('Turnover Frequency on Salary Department') plt.xlabel('salary') plt.ylabel('Frequency of Turnover') plt.show() cat_vars = ['department', 'salary'] for var in cat_vars: cat_list = pd.get_dummies(hr[var], prefix=var) hr = hr.join(cat_list) hr.head() hr.drop(columns=['department', 'salary'], axis = 1, inplace=True) hr.head() hr.shape from yellowbrick.target import ClassBalance plt.style.use('ggplot') plt.rcParams['figure.figsize'] = [12,8] visualizer = ClassBalance(labels=['stayed', 'quit']).fit(hr.quit) visualizer.show() X = hr.loc[:, hr.columns!='quit'] y = hr.quit from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.2, stratify=y) from sklearn import tree from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.tree import export_graphviz # display the tree within a Jupyter notebook from IPython.display import SVG from graphviz import Source from IPython.display import display from ipywidgets import interactive, IntSlider, FloatSlider, interact import ipywidgets from IPython.display import Image from subprocess import call import matplotlib.image as mpimg @interact def plot_tree( crit=['gini', 'entropy'], split=['best', 'radom'], depth=IntSlider(min=1,max=30, value=2, continuous_update=False), min_split=IntSlider(min=2,max=5, value=2, continuous_update=False), min_leaf=IntSlider(min=1,max=5, value=1, continuous_update=False)): estimator = DecisionTreeClassifier(random_state=0, criterion=crit, splitter=split, max_depth=depth, min_samples_split=min_split, min_samples_leaf=min_leaf) estimator.fit(X_train, y_train) print('Decision Tree Training Accuracy: {:.3f}'.format(accuracy_score(y_train, estimator.predict(X_train)))) print('Decision Tree Training Accuracy: {:.3f}'.format(accuracy_score(y_test, estimator.predict(X_test)))) graph = Source(tree.export_graphviz(estimator, out_file=None, feature_names=X_train.columns, class_names=['stayed', 'quit'], filled=True)) display(Image(data=graph.pipe(format='png'))) return estimator @interact def plot_tree_rf( crit=['gini', 'entropy'], bootstrap=['True', 'False'], depth=IntSlider(min=1,max=30, value=2, continuous_update=False), forests=IntSlider(min=1,max=200, value=100, continuous_update=False), min_split=IntSlider(min=2,max=5, value=2, continuous_update=False), min_leaf=IntSlider(min=1,max=5, value=1, continuous_update=False)): estimator = RandomForestClassifier(random_state=0, criterion=crit, bootstrap=bootstrap, max_depth=depth, n_estimators=forests, n_jobs=-1, verbose=False, min_samples_split=min_split, min_samples_leaf=min_leaf) estimator.fit(X_train, y_train) print('Random Forests Training Accuracy: {:.3f}'.format(accuracy_score(y_train, estimator.predict(X_train)))) print('Random Forests Training Accuracy: {:.3f}'.format(accuracy_score(y_test, estimator.predict(X_test)))) num_tree = estimator.estimators_[0] print('\Visulaizing Tree:',0) graph = Source(tree.export_graphviz(num_tree, out_file=None, feature_names=X_train.columns, class_names=['stayed', 'quit'], filled=True)) display(Image(data=graph.pipe(format='png'))) return estimator from yellowbrick.classifier import ROCAUC dt = DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None, criterion='gini', max_depth=2, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, presort='deprecated', random_state=0, splitter='best') rf = RandomForestClassifier(bootstrap='True', ccp_alpha=0.0, class_weight=None, criterion='gini', max_depth=2, max_features='auto', max_leaf_nodes=None, max_samples=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=-1, oob_score=False, random_state=0, verbose=False, warm_start=False) visualizer = ROCAUC(rf, classes=['stayed', 'quit']) visualizer.fit(X_train, y_train) visualizer.score(X_test, y_test) visualizer.poof() visualizer = ROCAUC(dt, classes=['stayed', 'quit']) visualizer.fit(X_train, y_train) visualizer.score(X_test, y_test) visualizer.poof()SoftmaxОбобщение логистической функции для многомерного случая. Функция преобразует вектор $z$ размерности $K$ в вектор $\sigma$ той же размерности, где каждая координата $\sigma_i$ полученного вектора представлена вещественным числом в интервале $[0,1]$ и сумма координат равна 1.Координаты $\sigma_i$ вычисляются следующим образом:${\displaystyle \sigma (z)_{i}={\frac {e^{z_{i}}}{\displaystyle \sum _{k\mathop {=} 1}^{K}e^{z_{k}}}}}$ 1. Реализуйте функцию softmax, которая на вход принимает вектор $z$, а на выходе считает от него софтмакс.2. Добавьте возможность принимать на вход матрицу и считать softmax по столбцам (батч) Дивергенция Кульбака-Лейблера * Показать что $D_{KL}(p||q) \ge 0$ для любых $p$ и $q$. Для этого можно воспользоваться [формулой Йенсена](https://en.wikipedia.org/wiki/Jensen%27s_inequality "неравенство Йенсена")* ~~Показать, что максимизация правдопобия эквивалентна минимизации $D_{KL}$~~. GLM На слайде 27 в лекциях показывается, что распределение Бернулли входит в экспоненциальное семейство. Чему равно $\phi$? Обучение линейных моделей Буквально три-четыре года назад для того, чтобы обучить нейронную сеть было необходимо вручную вычислить градиент функции потерь. Затем, обычно, правильность решения проверялась численно. После появления фреймворков вроде Theano и TF это стало необязательным, благодаря чему исследователи и инженеры могут проводить свои эксперименты значительно быстрее. В данной работе мы посчитаем и проверим градиенты для линейных моделей, рассмотренных на первой лекции; а так же в явном виде реализуем алгоритм оптимизации.import numpy as np from keras.datasets import cifar10 from random import randrange import time import matplotlib.pyplot as plt plt.style.use('ggplot') %matplotlib inline plt.rcParams['figure.figsize'] = (15, 12) # set default size of plotsДля обучения мы будем использовать датасет CIFAR-10, состоящий из 60000 цветных изображений размера 32x32, разбитых на 10 классов, по 6000 изображений на класс. Обучающая выборка состоит из 50000 изображений, а тестовая -- из 10000.(x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train.shapeВизуализируем классыclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] num_classes = len(classes) samples_per_class = 10 for y, cls in enumerate(classes): idxs = np.flatnonzero(y_train == y) idxs = np.random.choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) plt.imshow(x_train[idx].astype('uint8')) plt.axis('off') if i == 0: plt.title(cls) plt.show() # Preprocessing: преобразуем изображения к плоскому виду x_train = np.reshape(x_train, (x_train.shape[0], -1)) x_test = np.reshape(x_test, (x_test.shape[0], -1)) # Печатаем размерности, чтобы проверить что мы не ошиблись print('Training data shape: ', x_train.shape) print('Test data shape: ', x_test.shape) # Preprocessing: вычитаем среднее # 1: Находим среднее изображение mean_image = np.mean(x_train, axis=0) print(mean_image[:10]) # для проверки напечаем несколько элементов plt.figure(figsize=(4,4)) plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # визуализируем полученное среднее plt.show() # 2: вычитаем среднее из изображений обучающей и тестовых выборок x_train = x_train - mean_image x_test = x_test - mean_image print(x_train.shape) # 3: Добавляем новую компоненту отвечающую за сдвиг (bias trick) x_train = np.hstack([x_train, np.ones((x_train.shape[0], 1))]) x_test = np.hstack([x_test, np.ones((x_test.shape[0], 1))]) print(x_train.shape)(50000, 3073)**Задача 1:** Далее следует функция svm_loss, которую вам предстоит дописать.def svm_loss(W, X, y, reg): """ SVM loss function Inputs: - W: Матрица весов - X: Данные - y: Целевой признак - reg: (float) Коэффициент регуляризации Returns: a tuple: - loss (одно число) - градиент по W """ # Впишите свой код на место заглушки loss = 0 dW = np.zeros(W.shape) ############################################################################# # TODO: # # Compute the gradient of the loss function and store it dW. # # Rather that first computing the loss and then computing the derivative, # # it may be simpler to compute the derivative at the same time that the # # loss is being computed. As a result you may need to modify some of the # # code above to compute the gradient. # ############################################################################# return loss, dW # Заполняем матрицу весов W случайным образом W = np.random.randn(10, 3073) * 0.0001 loss, grad = svm_loss(W, x_test, y_test, 0.000005) print('loss: %f' % (loss, )) #При помощи этой функции можно проверить градиент численно def grad_check_sparse(f, x, analytic_grad, num_checks): """ sample a few random elements and only return numerical in this dimensions. """ h = 1e-5 x.shape for i in range(num_checks): ix = tuple([randrange(m) for m in x.shape]) x[ix] += h # increment by h fxph = f(x) # evaluate f(x + h) x[ix] -= 2 * h # increment by h fxmh = f(x) # evaluate f(x - h) x[ix] += h # reset grad_numerical = (fxph - fxmh) / (2 * h) grad_analytic = analytic_grad[ix] rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic)) print ('numerical: %f analytic: %f, relative error: %e' % (grad_numerical, grad_analytic, rel_error)) # Вычисляем значение функции потерь и её градиент для W. loss, grad = svm_loss(W, x_test, y_test, 0.0) # Проверяем численно f = lambda w: svm_loss(w, x_test, y_test, 0.0)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # Проверяем численно, используя регуляризацию. # Вы ведь не забыли реализовать регуляризацию, правда? loss, grad = svm_loss(W, x_test, y_test, 5e1) f = lambda w: svm_loss(w, x_test, y_test, 5e1)[0] grad_numerical = grad_check_sparse(f, W, grad, 10)numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, relative error: nan numerical: 0.000000 analytic: 0.000000, [...]**Задача 2:** Реализуйте методы класса SVM. Для тренировки используйте градиентный спуск.class SVM(): def __init__(self): self.W = np.random.randn(10, 3073) * 0.0001 def train(self, x_train, y_train, learning_rate=1e-7, reg=2.5e4, num_iters=1000, verbose=True): #Здесь (на месте заглушки) впишите релаизацию градиентного спуска, используя функцию потерь, которую вы определелили выше. #Если обучение проходит слишком медленно, возможно необходимо что-то улучшить? loss_history = [] for it in range(num_iters): loss = 0 loss_history.append(loss) return loss_history def predict(self, y): pass Проверяем полученную реализацию. tic = time.time() svm = SVM() loss_hist = svm.train(x_train, y_train, learning_rate=1e-7, reg=2.5e4, num_iters=1500, verbose=True) toc = time.time() print('That took %fs' % (toc - tic)) # Хорошая идея для отладки -- нарисовать график отношения функци потерь к номеру итерации plt.plot(loss_hist) plt.xlabel('Iteration number') plt.ylabel('Loss value') plt.show() # Теперь вычислим точность на тренировочном и тестовом множествах y_train_pred = svm.predict(x_train) print('training accuracy: %f' % (np.mean(y_train == y_train_pred), )) y_val_pred = svm.predict(X_val) print('validation accuracy: %f' % (np.mean(y_val == y_val_pred), )) def plot_weights(best_svm): w = best_svm.W[:,:-1] # strip out the bias w = w.reshape(10, 32, 32, 3) w_min, w_max = np.min(w), np.max(w) classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] for i in range(10): plt.subplot(2, 5, i + 1) # Rescale the weights to be between 0 and 255 wimg = 255.0 * (w[i, :, :, :].squeeze() - w_min) / (w_max - w_min) plt.imshow(wimg.astype('uint8')) plt.axis('off') plt.title(classes[i])**Задача 3:** Подберите при помощи поиска по сетке наулучшие параметры learning rate и regularization coefficient. Для полученной модели вызовите функцию plot_weights(best_svm). Впишите свой код под этой ячейкой. **Задача 4:** О чём говорят полученные изображения? Какие выводы мы можем сделать? Впишите ответ в поле ниже **Задача 5:** Повторим упражнение для cross entropy lossdef crossentropy_loss(W, X, y, reg): """ Cross entropy loss function Inputs: - W: Матрица весов - X: Данные - y: Целевой признак - reg: (float) Коэффициент регуляризации Returns: a tuple: - loss (одно число) - градиент по W """ # Впишите свой код на место заглушки loss = 0 dW = np.zeros(W.shape) return loss, dW # Вычисляем значение функции потерь и её градиент для W. loss, grad = crossentropy_loss(W, x_test, y_test, 0.0) # Грубая проверка, значения должны быть близки к -log(0.1). print 'loss: %f' % loss print 'sanity check: %f' % (-np.log(0.1))**Задача 6:** Кстати, почему такая проверка справедлива? Объяснитеf = lambda w: crossentropy_loss(w, x_test, y_test, 0.0)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) loss, grad = crossentropy_loss(W, x_test, y_test, 5e1) f = lambda w: crossentropy_loss(w, x_test, y_test, 5e1)[0] grad_numerical = grad_check_sparse(f, W, grad, 10)Model evaluationimport logging import matplotlib.pyplot as plt from sklearn.metrics import classification_report from trojan_defender import (experiment, set_root_folder, datasets, set_db_conf, plot) from trojan_defender.detect import saliency_ as saliency # config logging logging.basicConfig(level=logging.INFO) # matplotlib size plt.rcParams['figure.figsize'] = (10, 10) # root folder (experiments will be saved here) set_root_folder('/home/Edu/data') # db configuration (experiments metadata will be saved here) set_db_conf('db.yaml')Reload experiment# reload experiment model, dataset, metadata = experiment.load('30-Apr-2018@01-03-14') clean = dataset.load_clean() for m in metadata: print(m, metadata[m], '\n') plot.grid(dataset.x_test, dataset.y_test_cat)Test setplot.grid(dataset.x_test, dataset.y_test_cat)Test set - only patchedplot.grid(dataset.x_test[dataset.test_poisoned_idx], dataset.y_test_cat[dataset.test_poisoned_idx])Test set - non-patched dataplot.grid(dataset.x_test[~dataset.test_poisoned_idx], dataset.y_test_cat[~dataset.test_poisoned_idx])Patchplot.grid([dataset.a_patch() for _ in range(12)])Predictions on the test datay_pred = model.predict_classes(dataset.x_test) y_true = dataset.y_test_cat y_orig = clean.y_test_cat def label_getter(l, i): y_pred, y_true, y_orig = l return 'P:{} T:{} O:{}'.format(y_pred[i], y_true[i], y_orig[i]) # plot some predictions in the test set plot.grid(dataset.x_test, [y_pred, y_true, y_orig], label_getter)Detection(sms_model, outs, recovered, sample, res, mask_prop) = saliency.detect(model, clean, random_trials=100) resINFO:trojan_defender.detect.saliency_:Computing saliency... INFO:trojan_defender.detect.saliency_:Finding outleirs... INFO:trojan_defender.detect.saliency_:Recovering mask... INFO:trojan_defender.detect.saliency_:Mask proportion is 0.026 INFO:trojan_defender.detect.saliency_:Sampling one observation per class in the clean dataset... INFO:trojan_defender.detect.saliency_:Predictions are: [0 1 2 3 4 5 6 7 8 9]Parts of Speech Assessment - Solutions For this assessment we'll be using the short story [The Tale of Peter Rabbit](https://en.wikipedia.org/wiki/The_Tale_of_Peter_Rabbit) by (1902). The story is in the public domain; the text file was obtained from [Project Gutenberg](https://www.gutenberg.org/ebooks/14838.txt.utf-8).# RUN THIS CELL to perform standard imports: import spacy nlp = spacy.load('en_core_web_sm') from spacy import displacy with open('data/owlcreek.txt') as f: doc = nlp(f.read())**2. For every token in the third sentence, print the token text, the POS tag, the fine-grained TAG tag, and the description of the fine-grained tag.**# Enter your code here: for token in list(doc.sents)[3]: print(f'{token.text:{12}} {token.pos_:{6}} {token.tag_:{6}} {spacy.explain(token.tag_)}')The DET DT determiner man NOUN NN noun, singular or mass 's PART POS possessive ending hands NOUN NNS noun, plural were AUX VBD verb, past tense behind ADV RB adverb SPACE _SP None his PRON PRP$ pronoun, possessive back NOUN NN noun, singular or mass , PUNCT , punctuation mark, comma the DET DT determiner wrists NOUN NNS noun, plural bound VERB VBN verb, past participle with ADP IN conjunction, subordinating or preposition a DET DT determiner cord NOUN NN noun, singular or mass . PUNCT . punctuation mark, sentence closer**3. Provide a frequency list of POS tags from the entire document**POS_counts = doc.count_by(spacy.attrs.POS) for k,v in sorted(POS_counts.items()): print(f'{k}. {doc.vocab[k].text:{5}}: {v}')84. ADJ : 257 85. ADP : 491 86. ADV : 200 87. AUX : 184 89. CCONJ: 129 90. DET : 584 91. INTJ : 5 92. NOUN : 857 93. NUM : 29 94. PART : 65 95. PRON : 393 96. PROPN: 42 97. PUNCT: 571 98. SCONJ: 47 100. VERB : 506 103. SPACE: 475**4. CHALLENGE: What percentage of tokens are nouns?**HINT: the attribute ID for 'NOUN' is just above : 92percent = 100*POS_counts[92]/len(doc) print(f'{POS_counts[92]}/{len(doc)} = {percent:{.4}}%')857/4835 = 17.72%**5. Display the Dependency Parse for the third sentence**displacy.render(list(doc.sents)[3], style='dep', jupyter=True, options={'distance': 100})**6. Show the first two named entities from 's *The Tale of * **for ent in doc.ents[:2]: print(ent.text+' - '+ent.label_+' - '+str(spacy.explain(ent.label_)))Alabama - GPE - Countries, cities, states twenty feet - QUANTITY - Measurements, as of weight or distance**7. How many sentences are contained in the book ?**len([sent for sent in doc.sents])**8. CHALLENGE: How many sentences contain named entities?**list_of_sents = [nlp(sent.text) for sent in doc.sents] list_of_ners = [doc for doc in list_of_sents if doc.ents] len(list_of_ners)**9. CHALLENGE: Display the named entity visualization for `list_of_sents[0]` from the previous problem**displacy.render(list_of_sents[99:110], style='ent', jupyter=True) Analytic GoalMonitor the average return per hour of Bitcoin over the last month and look for statistically significant trendssc %%help %%configure -f { "conf":{ "spark.driver.memory": "7G", "spark.executor.memory": "7G", "spark.jars.packages": "org.mongodb.spark:mongo-spark-connector_2.11:2.3.1", "spark.mongodb.input.uri": "mongodb+srv://emre:/msds697_project_2.crypto_exchange" } } df = spark.read.format('com.mongodb.spark.sql.DefaultSource').load() df_rdd = df.rdd from datetime import datetime btc = ( df_rdd.filter(lambda x: x[2] == "btcusd") .map(lambda x: (datetime.fromtimestamp(x[5] / 1000), x[1])) .filter(lambda x: (x[0].year == 2021 and x[0].minute == 0)) .cache() ) btc.collect()Model spectra and synthetic photometry In this tutorial, we will have a look at some spectra of the DRIFT-PHOENIX atmospheric model. The spectra are first downloaded and added to the database. Then we will use the functionalities of [ReadModel](https://species.readthedocs.io/en/latest/species.read.html?highlight=ReadModelspecies.read.read_model.ReadModel) to extract a spectrum and calculate a photometric flux. Getting started We start by importing the required Python modules.import speciesThen we initialize [species](https://species.readthedocs.io/en/latest/species.html) with [SpeciesInit](https://species.readthedocs.io/en/latest/species.core.htmlspecies.core.init.SpeciesInit), which creates a default configuration file and the HDF5 database.species.SpeciesInit()Initiating species v0.5.0... [DONE] Creating species_config.ini... [DONE] Database: /Users/tomasstolker/applications/species/docs/tutorials/species_database.hdf5 Data folder: /Users/tomasstolker/applications/species/docs/tutorials/data Working folder: /Users/tomasstolker/applications/species/docs/tutorials Creating species_database.hdf5... [DONE] Creating data folder... [DONE]Adding model spectra to the database To store the spectra, we first create an instance of [Database](https://species.readthedocs.io/en/latest/species.data.html?highlight=database.Databasespecies.data.database.Database).database = species.Database()Let's check which atmospheric models are available by running the [available_models](https://species.readthedocs.io/en/latest/species.data.htmlspecies.data.database.Database.available_models) method of the [Database](https://species.readthedocs.io/en/latest/species.data.htmlspecies.data.database.Database) object._ = database.available_models()Available model grids: - AMES-Cond: - Label = ames-cond - Model parameters: ['teff', 'logg'] - Teff range (K): [100, 6600] - Wavelength range (um): [0.5, 40] - Resolution lambda/Dlambda: 4000 - File size: 150 MB - AMES-Dusty: - Label = ames-dusty - Model parameters: ['teff', 'logg'] - Teff range (K): [500, 4000] - Wavelength range (um): [0.5, 40] - Resolution lambda/Dlambda: 4000 - File size: 59 MB - ATMO: - Label = atmo - Model parameters: ['teff', 'logg'] - Teff range (K): [200, 3000] - Wavelength range (um): [0.4, 6000] - Resolution lambda/Dlambda: 10000 - File size: 425 MB - Reference: Phillips et al. (2020) - URL: https://ui.adsabs.harvard.edu/abs/2020A%26A...637A..38P/abstract - blackbody: - Label = blackbody - Model parameters: ['teff'] - Teff range (K): [10, 20000] - Wavelength range (um): [0.1, 5000] - Resolution l[...]Next, we will import the model spectra with the [add_model](https://species.readthedocs.io/en/latest/species.data.htmlspecies.data.database.Database.add_model) method of [Database](https://species.readthedocs.io/en/latest/species.data.htmlspecies.data.database.Database). This step will automatically download the DRIFT-PHOENIX spectra (R = 2000) to the [data_folder](https://species.readthedocs.io/en/latest/configuration.html). The dowloaded data will then be unpacked and added to the database. We restrict the temperature range to 1300-1700 K, so not all spectra are added to the databse.database.add_model(model='drift-phoenix', teff_range=(1300., 1700.))Downloading DRIFT-PHOENIX model spectra (229 MB)... [DONE] Unpacking DRIFT-PHOENIX model spectra (229 MB)... [DONE] Please cite Helling et al. (2008) when using DRIFT-PHOENIX in a publication Reference URL: https://ui.adsabs.harvard.edu/abs/2008ApJ...675L.105H/abstract Wavelength range (um) = 0.1 -50 Spectral resolution = 4000 Teff range (K) = 1300.0 - 1700.0 Adding DRIFT-PHOENIX model spectra... [DONE] Grid points stored in the database: - Teff = [1300. 1400. 1500. 1600. 1700.] - log(g) = [3. 3.5 4. 4.5 5. 5.5] - [Fe/H] = [-0.6 -0.3 -0. 0.3]Two of the grid points were missing in the original data and have been added with a linear, multidimensional interpolation. Interpolating the model grid We will read the spectra from the database by creating an instance of [ReadModel](https://species.readthedocs.io/en/latest/species.read.htmlspecies.read.read_model.ReadModel).read_model = species.ReadModel(model='drift-phoenix', wavel_range=(0.5, 10.))Let's see what the grid boundaries are from the spectra that are stored in the database.read_model.get_bounds()We will now interpolate the grid in the ($T_\mathrm{eff}$, $\log(g)$, $[\mathrm{Fe}/\mathrm{H}]$) space at some specific parameter values, which need to be provided in a dictionary. The radius and distance are optional, otherwise the emitted flux is given at the planet surface.model_param = {'teff':1510., 'logg':4.1, 'feh':0.1, 'radius': 1., 'distance': 100.}To interpolate a spectrum, we use the [get_model](https://species.readthedocs.io/en/latest/species.read.htmlspecies.read.read_model.ReadModel.get_model) method and provide the parameter dictionary, and also an optional spectral resolution. Together with `smooth=True`, the spectrum will be smoothed (but not resampeld) to the given spectral resolution.model_box = read_model.get_model(model_param=model_param, spec_res=200., smooth=True)The data is stored in a [ModelBox](https://species.readthedocs.io/en/latest/species.core.htmlspecies.core.box.ModelBox). Let's have a look at its content.model_box.open_box()Opening ModelBox... model = drift-phoenix type = None wavelength = [ 0.49989727 0.50002105 0.50014486 ... 9.99710369 9.99957902 10.00205496] flux = [9.44339097e-20 9.38011967e-20 9.31147817e-20 ... 1.53571943e-18 1.53924349e-18 1.54301688e-18] parameters = {'teff': 1510.0, 'logg': 4.1, 'feh': 0.1, 'radius': 1.0, 'distance': 100.0, 'luminosity': 4.729862212008143e-05, 'mass': 4.857062223118246} quantity = flux contribution = None bol_flux = NoneWe will now use the same atmospheric parameters but we will add some [ISM extinction](https://species.readthedocs.io/en/latest/species.util.html?species.util.dust_util.ism_extinction) with the relation from [Cardelli et al. (1989)](https://ui.adsabs.harvard.edu/abs/1989ApJ...345..245C/abstract). Therefore, we add the V band extinction and reddening parameters to the dictionary.model_param = {'teff':1510., 'logg':4.1, 'feh':0.1, 'radius': 1., 'distance': 100., 'ism_ext': 5., 'ism_red': 3.}We use again the [get_model](https://species.readthedocs.io/en/latest/species.read.htmlspecies.read.read_model.ReadModel.get_model) method and store the result in a different [ModelBox](https://species.readthedocs.io/en/latest/species.core.htmlspecies.core.box.ModelBox).model_ext = read_model.get_model(model_param=model_param, spec_res=200., smooth=True)The two boxes with the model spectra are provided to the [plot_spectrum](https://species.readthedocs.io/en/latest/species.plot.htmlspecies.plot.plot_spectrum.plot_spectrum). We also include some filter profiles to indicate where the telluric windows are.species.plot_spectrum(boxes=[model_box, model_ext], filters=['MKO/NSFCam.J', 'MKO/NSFCam.H', 'MKO/NSFCam.K', 'MKO/NSFCam.Lp', 'MKO/NSFCam.Mp'], offset=(-0.15, -0.04), xlim=(0.8, 5.), ylim=(0., 5.5e-17), legend={'loc': 'lower right', 'frameon': False, 'fontsize': 10.}, figsize=(7., 3.), output=None)Adding filter: MKO/NSFCam.J... [DONE] Adding filter: MKO/NSFCam.H... [DONE] Adding filter: MKO/NSFCam.K... [DONE] Adding filter: MKO/NSFCam.Lp... [DONE] Adding filter: MKO/NSFCam.Mp... [DONE] Plotting spectrum... [DONE]Extracting a spectrum at a grid point It is also possible to extract a spectrum at one of the grid points, which doesn't require any interpolation. Let's check with the [get_points](https://species.readthedocs.io/en/latest/species.read.htmlspecies.read.read_model.ReadModel.get_points) method what parameter values are stored in the database.read_model.get_points()We create a dictionary with values at one of the grid points.model_param = {'teff':1500., 'logg':4., 'feh':0.}And now use the [get_data](https://species.readthedocs.io/en/latest/species.read.htmlspecies.read.read_model.ReadModel.get_data) method to extract a spectrum.model_full = read_model.get_data(model_param)Let's make another plot with [plot_spectrum](https://species.readthedocs.io/en/latest/species.plot.htmlspecies.plot.plot_spectrum.plot_spectrum). The spectrum is now shown at the spectral resolution as stored in the database ($R = 2000$).species.plot_spectrum(boxes=[model_full], filters=None, offset=(-0.12, -0.05), xlim=(0.8, 5.), ylim=(0., 1e5), legend={'loc': 'upper right', 'fontsize': 10.}, figsize=(7., 3.), output=None)Plotting spectrum... [DONE]Calculating synthetic photometry The [ReadModel](https://species.readthedocs.io/en/latest/species.read.html?highlight=ReadModelspecies.read.read_model.ReadModel) class can also be used for calculating photometric fluxes and magnitudes. To do so, we create a new instance and set the `filter_name` argument to the [VLT/NACO M' filter](http://svo2.cab.inta-csic.es/svo/theory/fps/index.php?id=Paranal/NACO.Mp&&mode=browse&gname=Paranal&gname2=NACOfilter). This will automatically downloadd and addd the filter profile.read_model = species.ReadModel(model='drift-phoenix', filter_name='Paranal/NACO.Mp')Adding filter: Paranal/NACO.Mp... [DONE]We create again a dictionary with the parameters but now run the [get_flux](https://species.readthedocs.io/en/latest/species.read.html?highlight=ReadModelspecies.read.read_model.ReadModel.get_flux) method, which returns the flux in W m-2 um-1.model_param = {'teff':1510., 'logg':4.1, 'feh':0.1, 'radius': 1., 'distance': 100.} flux = read_model.get_flux(model_param) print(f'Flux (W m-2 um-1) = {flux[0]:.2e}')Flux (W m-2 um-1) = 1.33e-17Since we provided a radius and distance, the emitted flux at the planet surface has been scaled by (radius/distance)$^2$. Similarly, we can use the [get_magnitude](https://species.readthedocs.io/en/latest/species.read.html?highlight=ReadModelspecies.read.read_model.ReadModel.get_magnitude) method to calculate the magnitude for the NACO M' filter. Note that the returned absolute magnitude is set to `None` if the parameter dictionary does not contain a radius and distance.app_mag, abs_mag = read_model.get_magnitude(model_param) print(f'Apparent magnitude = {app_mag:.2f}') print(f'Absolute magnitude = {abs_mag:.2f}')Downloading Vega spectrum (270 kB)... [DONE] Adding Vega spectrum... [DONE] Apparent magnitude = 15.53 Absolute magnitude = 10.53**Clasificacion y metricas de evaluacion**## IMPORTAR LIBRERIAS DE TRABAJO # mate import numpy as np # modelos import sklearn # trabajo con tablas import pandas as pd # data externa from sklearn.datasets import fetch_openml # interfaz con sistema operativo import os # graficos import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # semilla para que los resultados sean reproducibles np.random.seed(42)**Cargar la data MNIST**Para mas contexto sobre la data ir al siguiente [link](https://conx.readthedocs.io/en/latest/MNIST.html).mnist = fetch_openml('mnist_784', version=1, as_frame=False) mnist.keys() # revisar data de entrenamiento X = mnist["data"] print(X.shape) print(type(X)) # convirtamos X a dataframe para inspeccionar mejor X_df = pd.DataFrame(X) X_df.head()Inspeccionemos que valores toman los pixeles del primer digito (la primera fila):X_df.iloc[0,:].hist(bins=10) # revisar el target y = mnist["target"] print(y.shape) print(type(y))(70000,) Como ejemplo, reconstruyamos graficamente el primer digito (primera fila) a partir de los valores de cada una de sus columnas.# plot de uno de los digitos primer_digito = X[0] primer_digito_imagen = primer_digito.reshape(28, 28) plt.imshow(primer_digito_imagen, cmap=mpl.cm.binary) plt.axis("off") plt.show()Confirmemos lo que dice la variable `y` para este primer digito:primer_label = y[0] print(primer_label)5Todo OK!# convertir y a un formato mas eficiente y = y.astype(np.uint8)**Entrenar modelos que encuentren los 5**- Este es un problema de clasificacion. - Es un caso binario porque el target es categorico y solo toma dos valores --cada observacion es un cinco, o no lo es--. Empezamos por definir las muestras de training y testing.# Definir data de training y testing X_train = X[:60000] y_train = y[:60000] X_test = X[60000:] y_test = y[60000:]- Los valores de `y` van del 1 al 9. Pero solo nos interesa saber si el digito es un 5 o no.- Lo siguiente sera generar el vector con el ***target*** (La variable a predecir). Debera tener valor `True` siempre que `y` sea igual a 5 y `False` cuando no.- Las variables que toman valor `True` cuando se cumple cierta condicion se conocen como *flag* o *dummy*.- **Nota:** (Para Python `True`=1 y `False`=0)y_train_5 = (y_train == 5) y_test_5 = (y_test == 5) print(y_train_5)[ True False False ... True False False]**Probar un modelo SGD**# importar el algoritmo from sklearn.linear_model import SGDClassifier # Definir sus hiperparametros sgd_clf = SGDClassifier(max_iter=1000, tol=1e-3, random_state=42) # entrenar el modelo sgd_clf.fit(X_train, y_train_5)Veamos que predice el modelo para el primer digito (usamos el metodo `predict`):primer_digito = X[0] sgd_clf.predict([primer_digito])`True`, el modelo predijo que es un 5! ⭐️ Veamos el performance de la metrica `accuracy` (el numero de aciertos totales) con validacion cruzada.from sklearn.model_selection import cross_val_score cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy")Todo parece estar bien ... verdad? **Probar un modelo Naive**Crearemos manualmente un modelo trivial que siempre predecira (ingenuamente) que un digito no es 5.from sklearn.base import BaseEstimator class ClasificadorNunca5(BaseEstimator): def fit(self, X, y=None): pass def predict(self, X): return np.zeros((len(X), 1), dtype=bool) nunca_5_clf = ClasificadorNunca5() cross_val_score(nunca_5_clf, X_train, y_train_5, cv=3, scoring="accuracy")Un clasificador aparentemente tan malo tiene una precision tremenda. Por que?# recordar cuantos digitos tenemos en la data de entrenamiento print(len(y_train_5)) # cuantos de estos son 5 print(y_train_5.sum())5421Solo el 10% de las observaciones son 5, asi que si siempre predigo que el digito no es 5, acertare el 90% de las veces. 🙀 **Analizar mas a fondo el performance de un clasificador**from sklearn.model_selection import cross_val_predict from sklearn.metrics import confusion_matrix**Matriz de confusion** Primero veamos el ideal. Cual seria la matriz de confusion de un modelo que acierta perfectamente.y_train_perfect_predictions = y_train_5 confusion_matrix(y_train_5, y_train_perfect_predictions) y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3) confusion_matrix(y_train_5, y_train_pred)**Precision***Precision* es la proporcion de observaciones que nuestro modelo predijo correctamente como `5` del total predicciones correctas e incorrectas.from sklearn.metrics import precision_score precision_score(y_train_5, y_train_pred)Tambien se puede calcular el precison a partir de la matriz de confusion:cm = confusion_matrix(y_train_5, y_train_pred) print(cm) # calcular precision cm[1, 1] / (cm[0, 1] + cm[1, 1])**Recall***Recall* es la proporcion de observaciones que nuestro modelo predijo como `5`, sobre la cantidad real de `5`s en la data.from sklearn.metrics import recall_score recall_score(y_train_5, y_train_pred)Tambien se puede calcular el *recall* a partir de la matriz de confusion:cm = confusion_matrix(y_train_5, y_train_pred) print(cm) # calcular recall cm[1, 1] / (cm[1, 0] + cm[1, 1])**Umbrales de decision y el trade-off Precission-Recall**- La prediccion en un modelo de clasificacion consiste en asignar un score a las observaciones sobre las que se esta prediciendo (*scoring*).- Cuando la prediccion una observacion sobrepasa cierto umbral de decision (*threshold*) se asigna el label correspondiente (`True` o 1 en un problema binario).- El umbral puede ser modificado por el analista. De hecho, el umbral depende de nuestra sensibilidad frente a falsos positivos o negativos. En un modelo SGD el umbral de decision es 0. Cuando una observacion tiene un `score` mayor que 0, se considera un positivo (`True` en nuestro caso). Veamos el score del primer digito que ya habiamos guardado en la variable `primer_digito`. Para obtener el score de un m,modelo de scikitlearn, usamos el metodo `decision_function()`.y_score = sgd_clf.decision_function([primer_digito]) y_scoreEfectivamente el score es mayor que cero. Por eso se le asigno el label `True`. Que pasa si modificamos el *threshold*? Primero partamos del original, es decir 0.threshold = 0 primer_digito_pred = (y_score > threshold) primer_digito_predAhora cambiemos el threshold a 8000, cual seria la prediccion?threshold = 8000 primer_digito_pred = (y_score > threshold) primer_digito_predComo saber que umbral es mejor usar? Primero calculemos el *score* de todas nuestras observaciones.y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method="decision_function")Ahora graficaremos el **recall** y **precision** para distintos valores del **threshold**.from sklearn.metrics import precision_recall_curve precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores) def plot_precision_recall_vs_threshold(precisions, recalls, thresholds): plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2) plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2) plt.legend(loc="center right", fontsize=16) plt.xlabel("Threshold", fontsize=16) plt.grid(True) plt.axis([-50000, 50000, 0, 1]) recall_90_precision = recalls[np.argmax(precisions >= 0.90)] threshold_90_precision = thresholds[np.argmax(precisions >= 0.90)] plt.figure(figsize=(8, 4)) plot_precision_recall_vs_threshold(precisions, recalls, thresholds) plt.plot([threshold_90_precision, threshold_90_precision], [0., 0.9], "r:") plt.plot([-50000, threshold_90_precision], [0.9, 0.9], "r:") plt.plot([-50000, threshold_90_precision], [recall_90_precision, recall_90_precision], "r:") plt.plot([threshold_90_precision], [0.9], "ro") plt.plot([threshold_90_precision], [recall_90_precision], "ro") plt.show()Otra forma de evidenciar el *trade-off* es graficando *precision* vs *recall*:(y_train_pred == (y_scores > 0)).all() def plot_precision_vs_recall(precisions, recalls): plt.plot(recalls, precisions, "b-", linewidth=2) plt.xlabel("Recall", fontsize=16) plt.ylabel("Precision", fontsize=16) plt.axis([0, 1, 0, 1]) plt.grid(True) plt.figure(figsize=(8, 6)) plot_precision_vs_recall(precisions, recalls) plt.plot([recall_90_precision, recall_90_precision], [0., 0.9], "r:") plt.plot([0.0, recall_90_precision], [0.9, 0.9], "r:") plt.plot([recall_90_precision], [0.9], "ro") plt.show()Recommendation System with RBM Recommendation system is an algorithm that recommends items by trying to find users that are similar to each other based on their item ratings. Table of contents- Acquiring the Data- Loading in the Data- The Restricted Boltzmann Machine model- Setting the Model's Parameters- Recommendation Acquiring the Data The datasets we're going to use were acquired by [GroupLens](http://grouplens.org/datasets/movielens/) and contain movies, users and movie ratings by these users. With the datasets in place, let's now import the necessary libraries. We will be using [Tensorflow](https://www.tensorflow.org/) and [Numpy](http://www.numpy.org/) to model and initialize our Restricted Boltzmann Machine and [Pandas](http://pandas.pydata.org/pandas-docs/stable/) to manipulate our datasets.import tensorflow as tf # Numpy contains helpful functions for efficient mathematical calculations. import numpy as np # Dataframe manipulation library. import pandas as pd # Graph plotting library. import matplotlib.pyplot as plt %matplotlib inlineLoading in the DataLet's begin by loading in our data with Pandas. The `.dat` files containing our data are similar to CSV files, but instead of using the ',' (comma) character to separate entries, it uses '::' (two colons) characters instead. To let Pandas know that it should separate data points at every '::', we have to specify the `sep='::'` parameter when calling the function.Additionally, we also pass the `header=None` parameter due to the fact that our files don't contain any headers.Let's start with the `movies.dat` file and take a look at its structure:# Loading in the movies dataset. movies_df = pd.read_csv('./resources/ml-1m/movies.dat', sep='::', header=None, engine='python') movies_df.head()We can do the same for the `ratings.dat` file:# Loading in the ratings dataset. ratings_df = pd.read_csv('./resources/ml-1m/ratings.dat', sep="::", header=None, engine='python') ratings_df.head()So our `movies_df` variable contains a dataframe that stores a movie's unique ID number, title and genres, while our `ratings_df` variable stores a unique User ID number, a movie ID that the user has watched, the user's rating and when the user rated that movie.Let's now rename the columns in these dataframes so we can better convey their data more intuitively:movies_df.columns = ['MovieID', 'Title', 'Genres'] ratings_df.columns = ['UserID', 'MovieID', 'Rating', 'Timestamp']Here's our final `movies_df`:movies_df.head()And our final `ratings_df`:ratings_df.head()The Restricted Boltzmann Machine model The Restricted Boltzmann Machine model has two layers of neurons, one of which is what we call a visible input layer and the other is called a hidden layer. The hidden layer is used to learn features from the information fed through the input layer. For our model, the input is going to contain X neurons, where X is the amount of movies in our dataset. Each of these neurons will possess a normalized rating value varying from 0 to 1 - 0 meaning that a user has not watched the movie and the closer the value is to 1, the more the user likes the movie that neuron's representing. These normalized values, of course, will be extracted and normalized from the ratings dataset.After passing in the input, we train the RBM on it and have the hidden layer learn its features. These features are what we use to reconstruct the input, which in our case, will predict the ratings for movies that the user hasn't watched, which is exactly what we can use to recommend movies!We will now begin to format our dataset to follow the model's expected input. Formatting the Data First let's see how many movies we have and see if the movie ID's correspond with that value:len(movies_df) movies_df.tail()As it is possible to notice, we have 3883 movies, while our ID's vary from 1 to 3952. Due to this, we won't be able to index movies through their ID since we would get memory indexing errors. To amend this, we can create a column that shows what spot in our list that particular movie is in:movies_df['List Index'] = movies_df.index movies_df.head()With that, let's merge the ratings dataframe into the movies one so we can have the List Index values in both dataframes. Additionally we're also going to drop the Timestamp, Title and Genres columns since we won't be needing it to make recommendations.# Merging movies_df with ratings_df by MovieID. merged_df = movies_df.merge(ratings_df, on='MovieID') # Dropping unecessary columns. merged_df = merged_df.drop('Timestamp', axis=1).drop('Title', axis=1).drop('Genres', axis=1) # Displaying the result. merged_df.head()Let's also group up the users by their user IDs and take a look at one of them.# Group up by UserID. user_group = merged_df.groupby('UserID') user_group.first().head()Now, we can start formatting the data into input for the RBM. We're going to store the normalized users ratings into a list of lists called `trX`.# Amount of users used for training. amount_of_users = 1000 # Creating the training list. trX = [] # For each user in the group. for user_id, cur_user in user_group: # Create a temp that stores every movie's rating. temp = [0]*len(movies_df) # For each movie in cur_user's movie list. for num, movie in cur_user.iterrows(): # Divide the rating by 5 and store it. temp[movie['List Index']] = movie['Rating'] / 5.0 # Now add the list of ratings into the training list. trX.append(temp) # Check to see if we finished adding in the amount of users for training. if amount_of_users == 0: break amount_of_users -= 1Setting the Model's Parameters Next, let's start building the RBM with Tensorflow. We'll begin by first determining the number of hidden layers and then creating placeholder variables for storing the visible layer biases, hidden layer biases and weights that connect the hidden layer with the visible one. We will be arbitrarily setting the amount of hidden layers to 20.hidden_units = 20 visible_units = len(movies_df) vb = tf.placeholder("float", [visible_units]) # Number of unique movies. hb = tf.placeholder("float", [hidden_units]) # Number of features we're going to learn. W = tf.placeholder("float", [visible_units, hidden_units])We then move on to creating the visible and hidden layer units and setting their activation functions. In this case, we will be using the `tf.sigmoid` and `tf.relu` functions as nonlinear activations since it's what is usually used in RBM's.# Phase 1: Input Processing. v0 = tf.placeholder("float", [None, visible_units]) _h0 = tf.nn.sigmoid(tf.matmul(v0, W) + hb) h0 = tf.nn.relu(tf.sign(_h0 - tf.random_uniform(tf.shape(_h0)))) # Phase 2: Reconstruction. _v1 = tf.nn.sigmoid(tf.matmul(h0, tf.transpose(W)) + vb) v1 = tf.nn.relu(tf.sign(_v1 - tf.random_uniform(tf.shape(_v1)))) h1 = tf.nn.sigmoid(tf.matmul(v1, W) + hb)Now we set the RBM training parameters and functions.# Learning rate. alpha = 1.0 # Create the gradients. w_pos_grad = tf.matmul(tf.transpose(v0), h0) w_neg_grad = tf.matmul(tf.transpose(v1), h1) # Calculate the Contrastive Divergence. CD = (w_pos_grad - w_neg_grad) / tf.to_float(tf.shape(v0)[0]) # Create methods to update the weights and biases. update_w = W + alpha * CD update_vb = vb + alpha * tf.reduce_mean(v0 - v1, 0) update_hb = hb + alpha * tf.reduce_mean(h0 - h1, 0)Set the error function, which in this case will be the Mean Absolute Error Function.err = v0 - v1 err_sum = tf.reduce_mean(err * err)We also have to initialize our variables. Thankfully, NumPy has a handy `zeros` function for this.# Current weight. cur_w = np.zeros([visible_units, hidden_units], np.float32) # Current visible unit biases. cur_vb = np.zeros([visible_units], np.float32) # Current hidden unit biases. cur_hb = np.zeros([hidden_units], np.float32) # Previous weight. prv_w = np.zeros([visible_units, hidden_units], np.float32) # Previous visible unit biases. prv_vb = np.zeros([visible_units], np.float32) # Previous hidden unit biases. prv_hb = np.zeros([hidden_units], np.float32) sess = tf.Session() sess.run(tf.global_variables_initializer())Now we train the RBM with 15 epochs with each epoch using 10 batches with size 100. After training, we print out a graph with the error by epoch.epochs = 15 batch_size = 100 errors = [] for i in range(epochs): for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX), batch_size)): batch = trX[start:end] cur_w = sess.run(update_w, feed_dict={v0: batch, W: prv_w, vb: prv_vb, hb: prv_hb}) cur_vb = sess.run(update_vb, feed_dict={v0: batch, W: prv_w, vb: prv_vb, hb: prv_hb}) cur_hb = sess.run(update_hb, feed_dict={v0: batch, W: prv_w, vb: prv_vb, hb: prv_hb}) prv_w = cur_w prv_vb = cur_vb prv_hb = cur_hb errors.append(sess.run(err_sum, feed_dict={v0: trX, W: cur_w, vb: cur_vb, hb: cur_hb})) print(errors[-1]) plt.plot(errors) plt.ylabel('Error') plt.xlabel('Epoch') plt.show()0.12330387 0.082664765 0.06800283 0.060190666 0.050763827 0.047311626 0.045593128 0.044372823 0.043658838 0.04265177 0.04241857 0.041740485 0.041191548 0.04077561 0.040422566Recommendation We can now predict movies that an arbitrarily selected user might like. This can be accomplished by feeding in the user's watched movie preferences into the RBM and then reconstructing the input. The values that the RBM gives us will attempt to estimate the user's preferences for movies that he or she hasn't watched based on the preferences of the users that the RBM was trained on.# Selecting the input user. input_user = [trX[75]] # Feeding in the user and reconstructing the input. hh0 = tf.nn.sigmoid(tf.matmul(v0, W) + hb) vv1 = tf.nn.sigmoid(tf.matmul(hh0, tf.transpose(W)) + vb) feed = sess.run(hh0, feed_dict={v0: input_user, W: prv_w, hb: prv_hb}) rec = sess.run(vv1, feed_dict={hh0: feed, W: prv_w, vb: prv_vb})Now we can list the 20 most recommended movies for our mock user by sorting it by their scores given by our model.scored_movies_df_75 = movies_df scored_movies_df_75["Recommendation Score"] = rec[0] scored_movies_df_75.sort_values(["Recommendation Score"], ascending=False).head(20)So, how to recommend the movies that the user has not watched yet? Lets first find the __User ID__ of our mock user:merged_df.iloc[75]Now, we can find all the movies that our mock user has watched before:movies_df_75 = merged_df[merged_df['UserID']==215] movies_df_75.head()In the next cell, we merge all the movies that our mock user has watched with the predicted scores based on his historical data:# Merging movies_df with ratings_df by MovieID. merged_df_75 = scored_movies_df_75.merge(movies_df_75, on='MovieID', how='outer') # Dropping unecessary columns. merged_df_75 = merged_df_75.drop('List Index_y', axis=1).drop('UserID', axis=1)Lets sort it and take a look at the firt 20 rows:merged_df_75.sort_values(["Recommendation Score"], ascending=False).head(20)Extracting Time Series[▲ Overview](0.0-Overview.ipynb)[◀ Loading and Decoding Dataset](2.0-Loading-dataset.ipynb)[▶ Data Exploration](3.0-Exploring-timeseries.ipynb)import pandas as pd from australian_housing import paths from australian_housing.data.extract_timeseries import new_south_wales_index df = pd.read_csv(paths.manager.interim_data_file, index_col=0) df.head() for head in ('Measure', 'Sector of Ownership', 'Type of work', 'Type of building', 'Geography Level', 'Region', 'Frequency'): df[head] = df[head].astype('category') df.describe(include='category')In this dataset `Measure`, `Sector of Ownership`, `Type of work` and `Frequency` do not contain further information. For `Type of building`, `Geography Level`, and `Region` we need to select the correct values requested in the exercise (`Type of building` = `Houses`, `Geography Level` = `States and Territories`, and `Region` = `New South Wales`).df['Measure'].unique() df['Sector of Ownership'].unique() df['Type of work'].unique() df['Type of building'].unique() df['Geography Level'].unique() df['Region'].unique() nsw = df[new_south_wales_index(df)] nsw nsw[['Value']].head()1. Defining the Question a) Specifying the Data Analytic Question> To figure out how we can predict which individuals are most likely to have or use a bank account b) Defining the Metric for Success> Our analysis will be deemed successful if we are able to get the specific indicators that we can use to accurately predict individuals most likely to hav a bank account. c) Understanding the context> Traditionally, access to bank accounts has been regarded as an indicator of financial inclusion. Despite the proliferation of mobile money in Africa and the growth of innovative fintech solutions, banks still play a pivotal role in facilitating access to financial services. Access to bank accounts enables households to save and facilitate payments while also helping businesses build up their credit-worthiness and improve their access to other financial services. Therefore, access to bank accounts is an essential contributor to long-term economic growth. d) Recording the Experimental Design> Our experimental design will include the following:1. Defining the question2. Reading the Data3. Checking the Data4. Tidying the dataset5. Exploratory Analysis6. Implementing the Solution7. Challenging the solution8. Follow-up questions e) Data Relevance> The data provided is from Kenya, Tanzania, Rwanda & Uganda from 2016-2018 and contains data on features such as age, gender, education level among other, which are very relevant in answering our question. However seeing as it was colleted over 2 years ago and with the dynamism in the banking industry, the insights provided may not present the current situation. 2. Reading the Data# Loading the required libraries first import pandas as pd import numpy as np import seaborn as sns import scipy.stats as stats import matplotlib from matplotlib import pyplot as plt # Loading our datasets from the urls provided # Dataset: http://bit.ly/FinancialDataset # df = pd.read_csv('http://bit.ly/FinancialDataset') df3. Checking the Data# Determining the no. of records in our dataset # df.shape # Detecting our column names # df.columns> Our dataset has 23,524 rows and 13 columns.> The column names are 'country', 'year', 'uniqueid', 'Has a Bank account', 'Type of Location', 'Cell Phone Access', 'household_size', 'Respondent Age', 'gender_of_respondent', 'The relathip with head', 'marital_status', 'Level of Educuation', 'Type of Job'.# Previewing the top of our dataset # df.head(10) # Previewing the bottom of our dataset # df.tail(10) # Checking whether each column has an appropriate datatype # df.dtypes> We can see that most of our data consists of objects with exceptions on the year, household size & respondent age, which is appropriate for our dataframe.# Checking the dataframe's information # df.info() RangeIndex: 23524 entries, 0 to 23523 Data columns (total 13 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 country 23510 non-null object 1 year 23524 non-null int64 2 uniqueid 23524 non-null object 3 Has a Bank account 23488 non-null object 4 Type of Location 23509 non-null object 5 Cell Phone Access 23513 non-null object 6 household_size 23496 non-null float64 7 Respondent Age 23490 non-null float64 8 gender_of_respondent 23490 non-null object 9 The relathip with head 23520 non-null object 10 marital_status 23492 non-null object 11 Level of Educuation 23495 non-null object 12 Type of Job 23494 non-null object dtypes: float64(2), int64(1), object(10) memory usage: 2.3+ MB4. Tidying the Dataset# Checking for Outliers in our numerical columns # col_names = ['year', 'household_size', 'Respondent Age'] fig, ax = plt.subplots(len(col_names), figsize=(10,15)) for i, col_val in enumerate(col_names): sns.boxplot(x = df[col_val], ax=ax[i]) ax[i].set_title('A boxplot on the {} column'.format(col_val), fontsize=14) ax[i].set_xlabel(col_val, fontsize=12)> From the 'year' column, we can see some outliers which are erroneous as the period was between 2016-2018. We shall remove those.>> On the 'household_size' column we also have some outliers, which may be accurate as ther isn't a limit on african household sizes. However, these will affect our summary statistics, thus we shall remove them.>> The respondent age also has some outliers, which may be due to the fact that some households probably contain the grandparents and may be accurate. However, again these will affect our summary statistics thus we shall remove them# Removing outliers using IQR # Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 df = df[~((df < (Q1 - 1.5 * IQR)) | (df > (Q3 + 1.5 * IQR))).any(axis=1)] df # Check the shape of the new dataset # df.shape> Our new dataset now has 22,903 rows and 13 columns# Identifying Missing Data # df.isnull().sum()> We have some missing data in our dataset. We shall remove them.# Dealing with the Missing data and checking if there is any more missing data # df.fillna( method ='ffill', inplace = True) df.isnull().sum() # Dropping irrelevant columns # drop_df = df[['uniqueid','The relathip with head']] df = df.drop(drop_df, axis= 1) # Check for missing values # df.isnull().sum() # Checking final shape of our dataset # df.shape5. Exploratory Analysis a) Univariate Analysis:We will use this to describe our data and find the trends within the different variables.# Mode # Calculating mode across different columns df.mode(axis=0)From the values above, we can see where most of our respondents fall in.# Mean # Calculating the mean for our numerical columns df.mean(axis=0)Since years can only be whole numbers, we'll take our average as 2016. The average household size is 3.571890≈4 since people can only be integers. The average age across our dataset is 38years old.# Median # Calculating the median for our numerical columns df.median(axis=0)The median year is 2017. Median household size is 3 while median age is 35.# Skewness # We will now calculate the skewness to check the assymetry of the distribution df.skew()> All our numeric data is positively skewed showing that the tail is bigger on the right side. This also means that the mean is greater than the mode.# Kurtosis # We will now calculate the kurtosis to check the tail of our distribution df.kurt()> All our numeric data have negative kurtosis values, signifying that they are platykurtic, thus light tailed. This might e because we have removed the outliers.# Range, Standard Deviation & Variance # We will now calculate the range, standard deviation and variance to check the spread of our data from the mean range1 = df['Respondent Age'].max() - df['Respondent Age'].min() range2 = df['household_size'].max() - df['household_size'].min() std = df.std() var = df.var() print('Range: Respondent Age ', range1 ,'Household size ', range2) print('Std deviation:', std) print('Variance:', var)Range: Respondent Age 67.0 Household size 9.0 Std deviation: year 0.844533 household_size 2.079246 Respondent Age 15.801188 dtype: float64 Variance: year 0.713235 household_size 4.323264 Respondent Age 249.677555 dtype: float64> The years have a low standard deviation signifying that they are close, which is accurate as we only have 2016-2018.>> The Respondent age has a high standard deviation and variation, which shows that the data is spread over a wide range, evidenced by the range of 67.>> The household size has a medium range and standard deviation.# Graphical representation of the repondents by Country sns.countplot(df.country, color='blue',saturation=0.7) plt.xlabel('Country') plt.ylabel('Number of Respondents')/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarning> Rwanda had the highest number of respondents compared to the rest# Graphical representation of marital status sns.countplot(df.marital_status, color='pink',saturation=0.7) plt.xlabel('Marital_status') plt.ylabel('Number of Respondents') plt.title('Marital Status Distribution of Respondents', fontsize=10)/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarning> Most of the respondents are Married/Living together while the least group are those who don't know.# Graphical representation of household size sns.countplot(df.household_size, color='magenta',saturation=0.7) plt.xlabel('Household Size') plt.ylabel('Number of Respondents') plt.title('Household size Distribution of Respondents', fontsize=10)/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarning> Our distribution is skewed to the right with most households consisting of between 1-4 people# Creating a histogram to visualize the age distribution of individuals in the sample population ages = df['Respondent Age'] plt.hist(ages, bins=10, histtype='bar', rwidth=0.9) plt.xlabel('Number of Respondents', fontsize=10) plt.ylabel('Age', fontsize=14, labelpad=15) plt.title('Age Distribution of Respondents', fontsize=10) plt.show()> The histogram is skewed to the right, indicating thus most respondents were young, thus its mean is greater than the mode.# Graphical representation of number of people with bank accounts label = df['Has a Bank account'].unique() explode = (0.1, 0.1) plt.pie(df['Has a Bank account'].value_counts().sort_values(),labels = label, explode = explode, autopct = '%1.1f%%') plt.title("Respondents with bank accounts") plt.axis('equal')> 85.8% of our respondents do not have a bank account# Graphical representation of number of people with Cell phone access label = df['Cell Phone Access'].unique() explode = (0.1, 0.1) plt.pie(df['Cell Phone Access'].value_counts(),labels = label, explode = explode, autopct = '%1.1f%%') plt.title("Respondents with Cell Phone Access") plt.axis('equal')> Most of the respondents have cell phone access# Graphical representation of the type of location label = df['Type of Location'].unique() explode = (0.1, 0.1) plt.pie(df['Type of Location'].value_counts(),labels = label, explode = explode, autopct = '%1.1f%%') plt.title("Rural/Urban Distribution") plt.axis('equal')Summary1. Most of our respondents are from Rwanda.2. Majority are married/living together.3. Most the households in the study have 1-4 people.4. Our respondents mostly consist of people aged 40 and below.5. 85.8% of our respondents do not have bank accounts.6. 74.7% of our respondents have cell phone access.7. 60.6% of our respondents are from rural b) Bivariate Analysis:We will use this now to check the relationships and measure the strengths of these relationships between 2 variables.# Check for correlation in our numerical data # correlation = df.corr() correlation # Plotting a correlation heatmap sns.heatmap(correlation, xticklabels=correlation.columns, yticklabels=correlation.columns, annot=True)> From this,we can see that household size and the respondent age are weakly negatively correlated at -0.105352. The 2 variables are also weakly negatively correlated to the year.# Creating a scatterplot to check the linear regression betweeen age and household size sns.regplot(x = df["Respondent Age"], y = df["household_size"], fit_reg=False) plt.title("Scatter plot of Household Size vs Respondent's Age", fontsize=15, y=1.015) plt.xlabel('Age', fontsize=14, labelpad=15) plt.ylabel('Household Size', fontsize=14, labelpad=15) plt.show()> Thus we cannot use the respondent's age to predict the household size# Checking relationship between respondents with bank accounts and gender gender = df.groupby('gender_of_respondent')['Has a Bank account'].value_counts(normalize=True).unstack() colors= ['orange', 'green'] gender.plot(kind='bar', figsize=(8, 6), color=colors, stacked=True) plt.title('Respondents with Bank Accounts by Gender', fontsize=15, y=1.015) plt.xlabel('Gender', fontsize=14, labelpad=15) plt.xticks(rotation = 360) plt.ylabel('Respondents with bank accounts', fontsize=14, labelpad=15) plt.show()> From our plot, we can see that males have a higher likelihood of having a bank account as compared to females.# Checking respondents with which type of job are more likely to have a bank account ax1 = df.groupby('Has a Bank account')['Type of Job'].value_counts(normalize=True).unstack() ax1.plot(kind='bar', stacked='True',title=str(ax1)) int_level = df['Has a Bank account'].value_counts()> From our plot we are able to see that self employed individuals are more likely to have a bank account as compared to the other occupations. Those in informal employment are less likely.# Checking whether respondents from urban or rural are more likely to have a bank account location=df.groupby('Type of Location')['Has a Bank account'].value_counts(normalize=True).unstack() colors= ['cyan', 'green'] location.plot(kind='bar', figsize=(8, 6), color=colors, stacked=True) plt.title('Respondents with bank accounts by Type of Location', fontsize=15, y=1.015) plt.xlabel('Type of Location', fontsize=14, labelpad=15) plt.xticks(rotation = 360) plt.ylabel('Respondents with bank accounts', fontsize=14, labelpad=15) plt.show()> Respondents from the urban areas are more likely to have bank accounts as compared to their counterparts in the rural areas.# Descriptive Statistics !pip install researchpy import researchpy as rp rp.summary_cont(df.groupby("Has a Bank account")['Respondent Age'])Requirement already satisfied: researchpy in /usr/local/lib/python3.7/dist-packages (0.3.2) Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from researchpy) (1.4.1) Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from researchpy) (1.19.5) Requirement already satisfied: statsmodels in /usr/local/lib/python3.7/dist-packages (from researchpy) (0.10.2) Requirement already satisfied: patsy in /usr/local/lib/python3.7/dist-packages (from researchpy) (0.5.1) Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from researchpy) (1.1.5) Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from patsy->researchpy) (1.15.0) Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->researchpy) (2.8.1) Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->researchpy) (2018.9)Summary1. Household size and the respondent age are weakly negatively correlated at -0.105352. 2. We cannot use the respondent's age to predict the household size.3. Males have a higher likelihood of having a bank account as compared to females.4. Self employed individuals are more likely to have a bank account as compared to other occupations. Those in informal employment are less likely.5. Respondents from the urban areas are more likely to have bank accounts as compared to their counterparts in the rural areas. c) Multivariate Analysis: 1. Principal Component Analysis# We shall label the dataset that has 2 outcomes encode={"Has a Bank account" : {"Yes" : 1, "No": 0},"Type of Location" : {"Rural" : 1, "Urban" : 0},"Cell Phone Access" : {"Yes" : 1, "No" : 0}, "gender_of_respondent" : {"Female" : 1, "Male" : 1}} df.replace(encode, inplace=True) df.head() # Drop unnecessary columns df.drop(['country','year','marital_status','Level of Educuation','Type of Job'],axis=1,inplace=True) # Define our variables to work with in our dimension reduction X=df.drop('Has a Bank account',1) y=df['Has a Bank account'] # Split dataset into test and train and define size of the test data(20%) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2,random_state=0) # Import the Standard Scaler from Sklearn to normalise the distribution of the data from sklearn.preprocessing import StandardScaler sc=StandardScaler() X_train=sc.fit_transform(X_train) X_test=sc.transform(X_test) # Import the PCA for our analysis which will only be used in the train data and all the factors will e considered. from sklearn.decomposition import PCA pca=PCA() X_train=pca.fit_transform(X_train) X_test=pca.transform(X_test) # Find out the variance for each principal component of our analysis. In our analysis we find out that the first four components account for 100% of the classification of whether one can pick a bank account or not. The first three components account for 82.9% explained_variance=pca.explained_variance_ratio_ explained_variance # We shall use the first component for our analysis in our prediction model pca=PCA(n_components=1) X_train=pca.fit_transform(X_train) X_test=pca.transform(X_test) # Train our model using the Random Forest Classifier to help out in the classification from sklearn.ensemble import RandomForestClassifier classifier=RandomForestClassifier(max_depth=0.2, random_state=0) classifier.fit(X_train, y_train) # Test using the data that was set apart for the test and train to find out the efficiency of th e model. y_pred=classifier.predict(X_test) # Find out the first Principal Component has an 85% predictive capacity. from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score cm=confusion_matrix(y_test,y_pred) print(cm) print('Accuracy', accuracy_score(y_test,y_pred)) # Test the model for factor analysis. !pip install factor-analyzer from factor_analyzer import calculate_bartlett_sphericity chi_square_value, p_value=calculate_bartlett_sphericity(df) chi_square_value, p_value # The dataset has no capacity for factor analysis with nan KMO value from factor_analyzer.factor_analyzer import calculate_kmo kmo_all, kmo_model=calculate_kmo(df) kmo_all, kmo_model/usr/local/lib/python3.7/dist-packages/factor_analyzer/utils.py:248: UserWarning: The inverse of the variance-covariance matrix was calculated using the Moore-Penrose generalized matrix inversion, due to its determinant being at or very close to zero. warnings.warn('The inverse of the variance-covariance matrix ' /usr/local/lib/python3.7/dist-packages/factor_analyzer/utils.py:205: RuntimeWarning: divide by zero encountered in true_divide Is = np.sqrt(1 / np.diag(m)) /usr/local/lib/python3.7/dist-packages/factor_analyzer/utils.py:206: RuntimeWarning: invalid value encountered in multiply retval = Is * m * np.repeat(Is, numrows).reshape(numrows, numrows)2. Linear Discriminant Analysis# Divide the dataset and split the data into train and test from sklearn.model_selection import train_test_split x=df.iloc[:,1:6].values Y=df.iloc[:,0].values x_train, x_test, Y_train, Y_test=train_test_split(x,Y,test_size=0.2,random_state=0) # Create a normal distribution of the data from sklearn.preprocessing import StandardScaler sc=StandardScaler() x_train=sc.fit_transform(x_train) x_test=sc.transform(x_test) # Using the Random Forest Classifier in our classification model from sklearn.ensemble import RandomForestClassifier classifier=RandomForestClassifier(max_depth=0.2, random_state=0) classifier.fit(x_train, Y_train) Y_pred=classifier.predict(x_test) # Finding out the predictive capacity of our Linear Discriminant model. from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score cm=confusion_matrix(Y_test,Y_pred) print(cm) print('Our accuracy is ' + str(accuracy_score(Y_test,Y_pred)))[[3934 0] [ 647 0]] Our accuracy is 0.85876446190788047. Challenging the solution > The easy solution is nice because it is, well, easy, but you should never allow those results to hold the day. You should always be thinking of ways to challenge the results, especially if those results comport with your prior expectation.# Reviewing the Solution #Tabular data handling This module defines the main class to handle tabular data in the fastai library: [`TabularDataBunch`](/tabular.data.htmlTabularDataBunch). As always, there is also a helper function to quickly get your data.To allow you to easily create a [`Learner`](/basic_train.htmlLearner) for your data, it provides [`tabular_learner`](/tabular.data.htmltabular_learner).from fastai.gen_doc.nbdoc import * from fastai.tabular import * show_doc(TabularDataBunch)The best way to quickly get your data in a [`DataBunch`](/basic_data.htmlDataBunch) suitable for tabular data is to organize it in two (or three) dataframes. One for training, one for validation, and if you have it, one for testing. Here we are interested in a subsample of the [adult dataset](https://archive.ics.uci.edu/ml/datasets/adult).path = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(path/'adult.csv') valid_idx = range(len(df)-2000, len(df)) df.head() cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country'] dep_var = 'salary'The initialization of [`TabularDataBunch`](/tabular.data.htmlTabularDataBunch) is the same as [`DataBunch`](/basic_data.htmlDataBunch) so you really want to use the facotry method instead.show_doc(TabularDataBunch.from_df)Optionally, use `test_df` for the test set. The dependent variable is `dep_var`, while the categorical and continuous variables are in the `cat_names` columns and `cont_names` columns respectively. If `cont_names` is None then we assume all variables that aren't dependent or categorical are continuous. The [`TabularProcessor`](/tabular.data.htmlTabularProcessor) in `procs` are applied to the dataframes as preprocessing, then the categories are replaced by their codes+1 (leaving 0 for `nan`) and the continuous variables are normalized. Note that the [`TabularProcessor`](/tabular.data.htmlTabularProcessor) should be passed as `Callable`: the actual initialization with `cat_names` and `cont_names` is done during the preprocessing.procs = [FillMissing, Categorify, Normalize] data = TabularDataBunch.from_df(path, df, dep_var, valid_idx=valid_idx, procs=procs, cat_names=cat_names)You can then easily create a [`Learner`](/basic_train.htmlLearner) for this data with [`tabular_learner`](/tabular.data.htmltabular_learner).show_doc(tabular_learner)`emb_szs` is a `dict` mapping categorical column names to embedding sizes; you only need to pass sizes for columns where you want to override the default behaviour of the model.show_doc(TabularList)Basic class to create a list of inputs in `items` for tabular data. `cat_names` and `cont_names` are the names of the categorical and the continuous variables respectively. `processor` will be applied to the inputs or one will be created from the transforms in `procs`.show_doc(TabularList.from_df) show_doc(TabularList.get_emb_szs) show_doc(TabularList.show_xys) show_doc(TabularList.show_xyzs) show_doc(TabularLine, doc_string=False)An object that will contain the encoded `cats`, the continuous variables `conts`, the `classes` and the `names` of the columns. This is the basic input for a dataset dealing with tabular data.show_doc(TabularProcessor)Create a [`PreProcessor`](/data_block.htmlPreProcessor) from `procs`. Undocumented Methods - Methods moved below this line will intentionally be hiddenshow_doc(TabularProcessor.process_one) show_doc(TabularList.new) show_doc(TabularList.get) show_doc(TabularProcessor.process) show_doc(TabularList.reconstruct)Psy1406 - Facenet vs. Face TransformerA Notebook for comparing the representations of a CNNFaceNet vs. FaceTransormer.cnn-facenet: https://github.com/timesler/facenet-pytorchface-transformer: https://github.com/zhongyy/Face-Transformer Step 0 - General SetupRunning this section will download some demo images and install some code needed to run the following setps. You should only need to run this step once at the start of a session. However, sometimes Google restarts your environment (e.g., if you are idle for a while, they shut down your session), so you might have to re-run this step if that happens.# INSTALL FACENET !pip install facenet-pytorch !pip install vit_pytorch # DOWNLOAD EXAMPLE IMAGES !mkdir -p images !wget -c https://www.dropbox.com/s/7tqlvb69lvx570h/BaldBear.jpg -q --show-progress -O /content/images/BaldBear.jpg !wget -c https://www.dropbox.com/s/nbct96tf4oqnr2q/BrownBear2.jpg -q --show-progress -O /content/images/BrownBear.jpg !wget -c https://www.dropbox.com/s/65p68g331kby809/Gorilla.jpg -q --show-progress -O /content/images/Gorilla.jpg !wget -c https://www.dropbox.com/s/be1hkifaz8u04y9/DiCaprio_Anchor.jpg -q --show-progress -O /content/images/DiCaprio_Anchor.jpg !wget -c https://www.dropbox.com/s/xn3y46bpccopdl7/DiCaprio_HardNegative.jpg -q --show-progress -O /content/images/DiCaprio_HardNegative.jpg !wget -c https://www.dropbox.com/s/8londclzzyj3oji/DiCaprio_NegativeClooney.jpg -q --show-progress -O /content/images/DiCaprio_NegativeClooney.jpg !wget -c https://www.dropbox.com/s/ddlfya3368jdhci/DiCaprio_Positive.jpg -q --show-progress -O /content/images/DiCaprio_Positive.jpg !wget -c https://www.dropbox.com/s/xyhnau99qmve89e/pitt1.jpg -q --show-progress -O /content/images/pitt1.jpg !wget -c https://www.dropbox.com/s/z7qqz49yjw60vh0/pitt2.jpg -q --show-progress -O /content/images/pitt2.jpg !wget -c https://www.dropbox.com/s/qkpvcr1hodiemn3/clooney1.jpg -q --show-progress -O /content/images/clooney1.jpg !wget -c https://www.dropbox.com/s/y0o1gyuhf33gf1l/clooney2.jpg -q --show-progress -O /content/images/clooney2.jpg !mkdir -p weights !wget -c https://www.dropbox.com/s/vlgldq0khdtxwox/Backbone_VIT_Epoch_2_Batch_20000_Time_2021-01-12-16-48_checkpoint.pth -q --show-progress -O /content/weights/vit_checkpoint.pth !wget -c https://www.dropbox.com/s/ss3bvbigk0ngv9u/Backbone_VITs_Epoch_2_Batch_12000_Time_2021-03-17-04-05_checkpoint.pth -q --show-progress -O /content/weights/vits_checkpoint.pth import os import numpy as np import torchvision.datasets as datasets import torchvision.transforms as transforms import torch from torch.utils.data import Dataset, DataLoader, random_split from glob import glob from pathlib import Path from PIL import Image import matplotlib.pyplot as plt import seaborn as sns; sns.set() import numpy as np %config InlineBackend.figure_format='retina' # %matplotlib notebook %matplotlib inline sns.set(rc={'figure.figsize':(15.7,8.27)}) np.set_printoptions(suppress=True) class ImageListDataset(Dataset): """""" def __init__(self, imgs, transform=None): self.root_dir = None self.files = imgs self.transform = transform def __getitem__(self, index): im = Image.open(self.files[index]) if self.transform: im = self.transform(im) return im, 0, index def __len__(self): return len(self.files) def __repr__(self): _repr_indent = 4 head = "Dataset " + self.__class__.__name__ body = ["Number of Images: {}".format(self.__len__())] if self.root_dir is not None: body.append("Root location: {}".format(self.root_dir)) if hasattr(self, "transform") and self.transform is not None: body += [repr(self.transform)] lines = [head] + [" " * _repr_indent + line for line in body] return '\n'.join(lines) def get_dataset(image_pairs, root_dir=Path('images')): root_dir = Path(root_dir) transform = transforms.Compose([ lambda x: x.convert('RGB'), transforms.Resize((224, 224)), ]) imgs = [root_dir/img for imgs in image_pairs for img in imgs] dataset = ImageListDataset(imgs=imgs, transform=transform) return dataset def show_grid(dataset): imgs = [] for image_num in range(0, len(dataset), 2): imgs.append(np.hstack( [np.array(dataset[image_num][0]), np.array(dataset[image_num+1][0])])) imgs = np.vstack(imgs) return Image.fromarray(imgs) ''' Utilities for instrumenting a torch model. InstrumentedModel will wrap a pytorch model and allow hooking arbitrary layers to monitor or modify their output directly. ''' import torch import numpy import types import copy from collections import OrderedDict, defaultdict class InstrumentedModel(torch.nn.Module): ''' A wrapper for hooking, probing and intervening in pytorch Modules. Example usage: ``` model = load_my_model() with inst as InstrumentedModel(model): inst.retain_layer(layername) inst.edit_layer(layername, ablation=0.5, replacement=target_features) inst(inputs) original_features = inst.retained_layer(layername) ``` ''' def __init__(self, model): super().__init__() self.model = model self._retained = OrderedDict() self._detach_retained = {} self._editargs = defaultdict(dict) self._editrule = {} self._hooked_layer = {} self._old_forward = {} if isinstance(model, torch.nn.Sequential): self._hook_sequential() def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def forward(self, *inputs, **kwargs): return self.model(*inputs, **kwargs) def retain_layer(self, layername, detach=True): ''' Pass a fully-qualified layer name (E.g., module.submodule.conv3) to hook that layer and retain its output each time the model is run. A pair (layername, aka) can be provided, and the aka will be used as the key for the retained value instead of the layername. ''' self.retain_layers([layername], detach=detach) def retain_layers(self, layernames, detach=True): ''' Retains a list of a layers at once. ''' self.add_hooks(layernames) for layername in layernames: aka = layername if not isinstance(aka, str): layername, aka = layername if aka not in self._retained: self._retained[aka] = None self._detach_retained[aka] = detach def stop_retaining_layers(self, layernames): ''' Removes a list of layers from the set retained. ''' self.add_hooks(layernames) for layername in layernames: aka = layername if not isinstance(aka, str): layername, aka = layername if aka in self._retained: del self._retained[aka] del self._detach_retained[aka] def retained_features(self, clear=False): ''' Returns a dict of all currently retained features. ''' result = OrderedDict(self._retained) if clear: for k in result: self._retained[k] = None return result def retained_layer(self, aka=None, clear=False): ''' Retrieve retained data that was previously hooked by retain_layer. Call this after the model is run. If clear is set, then the retained value will return and also cleared. ''' if aka is None: # Default to the first retained layer. aka = next(self._retained.keys().__iter__()) result = self._retained[aka] if clear: self._retained[aka] = None return result def edit_layer(self, layername, rule=None, **kwargs): ''' Pass a fully-qualified layer name (E.g., module.submodule.conv3) to hook that layer and modify its output each time the model is run. The output of the layer will be modified to be a convex combination of the replacement and x interpolated according to the ablation, i.e.: `output = x * (1 - a) + (r * a)`. ''' if not isinstance(layername, str): layername, aka = layername else: aka = layername # The default editing rule is apply_ablation_replacement if rule is None: rule = apply_ablation_replacement self.add_hooks([(layername, aka)]) self._editargs[aka].update(kwargs) self._editrule[aka] = rule def remove_edits(self, layername=None): ''' Removes edits at the specified layer, or removes edits at all layers if no layer name is specified. ''' if layername is None: self._editargs.clear() self._editrule.clear() return if not isinstance(layername, str): layername, aka = layername else: aka = layername if aka in self._editargs: del self._editargs[aka] if aka in self._editrule: del self._editrule[aka] def add_hooks(self, layernames): ''' Sets up a set of layers to be hooked. Usually not called directly: use edit_layer or retain_layer instead. ''' needed = set() aka_map = {} for name in layernames: aka = name if not isinstance(aka, str): name, aka = name if self._hooked_layer.get(aka, None) != name: aka_map[name] = aka needed.add(name) if not needed: return for name, layer in self.model.named_modules(): if name in aka_map: needed.remove(name) aka = aka_map[name] self._hook_layer(layer, name, aka) for name in needed: raise ValueError('Layer %s not found in model' % name) def _hook_layer(self, layer, layername, aka): ''' Internal method to replace a forward method with a closure that intercepts the call, and tracks the hook so that it can be reverted. ''' if aka in self._hooked_layer: raise ValueError('Layer %s already hooked' % aka) if layername in self._old_forward: raise ValueError('Layer %s already hooked' % layername) self._hooked_layer[aka] = layername self._old_forward[layername] = (layer, aka, layer.__dict__.get('forward', None)) editor = self original_forward = layer.forward def new_forward(self, *inputs, **kwargs): original_x = original_forward(*inputs, **kwargs) x = editor._postprocess_forward(original_x, aka) return x layer.forward = types.MethodType(new_forward, layer) def _unhook_layer(self, aka): ''' Internal method to remove a hook, restoring the original forward method. ''' if aka not in self._hooked_layer: return layername = self._hooked_layer[aka] # Remove any retained data and any edit rules if aka in self._retained: del self._retained[aka] del self._detach_retained[aka] self.remove_edits(aka) # Restore the unhooked method for the layer layer, check, old_forward = self._old_forward[layername] assert check == aka if old_forward is None: if 'forward' in layer.__dict__: del layer.__dict__['forward'] else: layer.forward = old_forward del self._old_forward[layername] del self._hooked_layer[aka] def _postprocess_forward(self, x, aka): ''' The internal method called by the hooked layers after they are run. ''' # Retain output before edits, if desired. if aka in self._retained: if self._detach_retained[aka]: self._retained[aka] = x.detach() else: self._retained[aka] = x # Apply any edits requested. rule = self._editrule.get(aka, None) if rule is not None: x = rule(x, self, **(self._editargs[aka])) return x def _hook_sequential(self): ''' Replaces 'forward' of sequential with a version that takes additional keyword arguments: layer allows a single layer to be run; first_layer and last_layer allow a subsequence of layers to be run. ''' model = self.model self._hooked_layer['.'] = '.' self._old_forward['.'] = (model, '.', model.__dict__.get('forward', None)) def new_forward(this, x, layer=None, first_layer=None, last_layer=None): assert layer is None or ( first_layer is None and last_layer is None) first_layer, last_layer = [str(layer) if layer is not None else str(d) if d is not None else None for d in [first_layer, last_layer]] including_children = (first_layer is None) for name, layer in this._modules.items(): if name == first_layer: first_layer = None including_children = True if including_children: x = layer(x) if name == last_layer: last_layer = None including_children = False assert first_layer is None, '%s not found' % first_layer assert last_layer is None, '%s not found' % last_layer return x model.forward = types.MethodType(new_forward, model) def close(self): ''' Unhooks all hooked layers in the model. ''' for aka in list(self._old_forward.keys()): self._unhook_layer(aka) assert len(self._old_forward) == 0 def apply_ablation_replacement(x, imodel, **buffers): if buffers is not None: # Apply any edits requested. a = make_matching_tensor(buffers, 'ablation', x) if a is not None: x = x * (1 - a) v = make_matching_tensor(buffers, 'replacement', x) if v is not None: x += (v * a) return x def make_matching_tensor(valuedict, name, data): ''' Converts `valuedict[name]` to be a tensor with the same dtype, device, and dimension count as `data`, and caches the converted tensor. ''' v = valuedict.get(name, None) if v is None: return None if not isinstance(v, torch.Tensor): # Accept non-torch data. v = torch.from_numpy(numpy.array(v)) valuedict[name] = v if not v.device == data.device or not v.dtype == data.dtype: # Ensure device and type matches. assert not v.requires_grad, '%s wrong device or type' % (name) v = v.to(device=data.device, dtype=data.dtype) valuedict[name] = v if len(v.shape) < len(data.shape): # Ensure dimensions are unsqueezed as needed. assert not v.requires_grad, '%s wrong dimensions' % (name) v = v.view((1,) + tuple(v.shape) + (1,) * (len(data.shape) - len(v.shape) - 1)) valuedict[name] = v return v def subsequence(sequential, first_layer=None, last_layer=None, share_weights=False): ''' Creates a subsequence of a pytorch Sequential model, copying over modules together with parameters for the subsequence. Only modules from first_layer to last_layer (inclusive) are included. If share_weights is True, then references the original modules and their parameters without copying them. Otherwise, by default, makes a separate brand-new copy. ''' included_children = OrderedDict() including_children = (first_layer is None) for name, layer in sequential._modules.items(): if name == first_layer: first_layer = None including_children = True if including_children: included_children[name] = layer if share_weights else ( copy.deepcopy(layer)) if name == last_layer: last_layer = None including_children = False if first_layer is not None: raise ValueError('Layer %s not found' % first_layer) if last_layer is not None: raise ValueError('Layer %s not found' % last_layer) if not len(included_children): raise ValueError('Empty subsequence') return torch.nn.Sequential(OrderedDict(included_children)) import os import numpy as np import pandas as pd from pathlib import Path from glob import glob from pprint import pprint from collections import OrderedDict import matplotlib.pyplot as plt import seaborn as sns; sns.set() from IPython.core.debugger import set_trace alexnet_pytorch_blocks = OrderedDict([ ('Conv1', ['features.0','features.1','features.2']), ('Conv2', ['features.3','features.4','features.5']), ('Conv3', ['features.6','features.7']), ('Conv4', ['features.8','features.9']), ('Conv5', ['features.10','features.11','features.12']), ('', ['avgpool']), ('fc6', ['classifier.0','classifier.1','classifier.2']), ('fc7', ['classifier.3','classifier.4','classifier.5']), ('fc8', ['classifier.6']), ]) def plot_results(df): pair_names = [] for i, row in df.iterrows(): img1 = row.image1.replace(".jpg","").replace(".png","").replace(".tiff","") img2 = row.image2.replace(".jpg","").replace(".png","").replace(".tiff","") pair_name = img1 + "_" + img2 pair_names.append(pair_name) df['pair_name'] = pair_names ax = sns.barplot(x="pair_name", y="euclidean_distance", data=df) ax.set_title("Euclidean Distance Between Pairs (larger = more different)", fontsize=20) return ax; def plot_df(df, pairs=[0,1,2], title='', blocks=None, legend_loc=(0.25, 0.80), group_by='pair_num', ceiling=1, ylabel='correlation', legend_color=(0.95,0.95,0.95,1.0)): if pairs is None: #ax = plot_data(df, title, ymax=1.10, ymin=-0.20, hue=group_by, ylabel=ylabel) ax = plot_data(df[df.pair_num.isin(pairs)], title, ymax=1.10, ymin=-0.20, hue=group_by, ylabel=ylabel) else: ax = plot_data(df[df.pair_num.isin(pairs)], title, ymax=1.10, ymin=-0.20, hue=group_by, ylabel=ylabel) if blocks: draw_bg(blocks, ypos=1.03, legend_loc=legend_loc) L = ax.legend() legend_labels = ['image_pair'] for pair in pairs: label = df[df.pair_num == pair].iloc[0].image1.replace('.jpg', '') + '_vs_' + df[df.pair_num == pair].iloc[0].image2.replace('.jpg', '') legend_labels.append(label) for label_num, label in enumerate(legend_labels): if label is not None: L.get_texts()[label_num].set_text(label) L.set_bbox_to_anchor(legend_loc) return ax def plot_data(df, title, ymax=.50, ymin=0.0, hue=None, ylabel='correlation'): sns.set(rc={'figure.figsize':(16.7,8.27)}) sns.set_style("whitegrid", {'axes.grid' : False}) ax = sns.lineplot(x="layer", y="y", hue=hue, data=df, linewidth=2) ax.set_title(title, fontsize=24); ax.set_ylabel(ylabel, fontsize=24, labelpad=15); ax.set_xlabel("layer", fontsize=24, labelpad=20); ax.set_ylim([ymin, ymax]) plt.xticks(rotation=90); return ax def draw_bg(blocks, ypos=0.475, alpha_b=.20, alpha_g=.15, legend_loc=(0.79, 0.80)): if blocks == None: return c = 0 for idx, (block_name, layers) in enumerate(blocks.items()): n_layers = len(layers) for i in range(c, c+n_layers): if idx % 2 == 0: plt.axvspan(i-.5, i+.5, facecolor='b', alpha=alpha_b, lw=0) else: plt.axvspan(i-.5, i+.5, facecolor='gray', alpha=alpha_g, lw=0) plt.text(c+(n_layers)/2-.5, ypos, block_name, fontdict=None, fontsize=16, ha='center', va='center') c += n_layers plt.legend(facecolor=(0.95,0.95,0.95,1.0), bbox_to_anchor=legend_loc) def plot(df, legend_loc=(0.25, 0.70)): df['y'] = df['r'] layer_name = lambda x: "{:02d}_{}".format(x.layer_num,x.layer_type.replace("BatchNorm2d","Norm").replace("GroupNorm", "Norm")) df['layer'] = df[['layer_num','layer_type']].apply(layer_name, axis=1) blocks = alexnet_pytorch_blocks pairs = df.pair_num.unique() ax = plot_df(df, blocks=blocks, pairs=pairs, legend_loc=legend_loc) return ax import pandas as pd from scipy.stats import pearsonr from fastprogress import master_bar, progress_bar from collections import OrderedDict from torch.utils.data import Dataset, DataLoader, random_split from torchvision import transforms import torch from facenet_pytorch import MTCNN, InceptionResnetV1 from IPython.core.debugger import set_trace tfrm = transforms.Compose([ transforms.CenterCrop(160), transforms.ToTensor() ]) def compute_embeddings(dataset): print(f"Computing Embeddings (N={len(dataset)} images)") cache = {} mtcnn = MTCNN(image_size=160) resnet = InceptionResnetV1(pretrained='vggface2').eval() embeddings = [] embedding = [] for idx, (img, label, index) in enumerate(progress_bar(dataset)): # Get cropped and prewhitened image tensor img_cropped = None try: img_cropped = mtcnn(img) except: pass if img_cropped is None: print("Warning, no human face detected, using center crop:", dataset.files[idx]) img_cropped = tfrm(img) # Calculate embedding (unsqueeze to add batch dimension) img_embedding = resnet(img_cropped.unsqueeze(0)) embedding.append(img_embedding) if len(embedding) == 2: embeddings.append(embedding) embedding = [] return embeddings def compare_embeddings(embeddings, image_pairs): df = pd.DataFrame(columns=['pair_num','image1','image2','euclidean_distance']) for pair_num, ((embed1, embed2), (image1, image2)) in enumerate(zip(embeddings, image_pairs)): df = df.append({ "pair_num": pair_num, "image1": image1, "image2": image2, "euclidean_distance": (embed1-embed2).pow(2).sum().item() }, ignore_index=True) return df def get_layer(m, layers): layer = layers.pop(0) m = getattr(m, layer) if len(layers) > 0: return get_layer(m, layers) return m def get_layers(model, parent_name='', layer_info=[]): for module_name, module in model.named_children(): layer_name = parent_name + '.' + module_name if len(list(module.named_children())): layer_info = get_layers(module, layer_name, layer_info=layer_info) else: layer_info.append(layer_name.strip('.')) return layer_info def get_layer_type(model, layer_name): m = get_layer(model, layer_name.split(".")) return m.__class__.__name__ def convert_relu_layers(parent): for child_name, child in parent.named_children(): if isinstance(child, nn.ReLU): setattr(parent, child_name, nn.ReLU(inplace=False)) elif len(list(child.children())) > 0: convert_relu_layers(child) def store_activations(model, layer_names): a = OrderedDict() for layer_num, layer_name in enumerate(layer_names): layer_type = get_layer_type(model.model, layer_name) X = model.retained_layer(layer_name) X = X.view(X.shape[0], -1) a[layer_name] = X return a def compute_similarity(model, dataset): device = 'cuda' if torch.cuda.is_available() else 'cpu' # hook model layer_names = get_layers(model, parent_name='', layer_info=[]) if not isinstance(model, nethook.InstrumentedModel): model = nethook.InstrumentedModel(model) for layer_name in layer_names: model.retain_layer(layer_name) model = model.to(device) model.eval() # create dataloader transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) dataset = ImageListDataset(imgs=dataset.files, transform=transform) dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0, pin_memory=False) # compute similarity by layer df = pd.DataFrame(columns=['pair_num', 'image1', 'image2', 'layer_num', 'layer_name', 'layer_type', 'r']) pair_num = 0 mb = master_bar(dataloader) for count, (imgs, labels, indexes) in enumerate(mb): with torch.no_grad(): model(imgs.to(device)) if count % 2 == 0: a1 = store_activations(model, layer_names) image1 = dataset.files[indexes].name if count % 2 == 1: a2 = store_activations(model, layer_names) image2 = dataset.files[indexes].name for layer_num, layer_name in enumerate(progress_bar(layer_names, parent=mb)): r = pearsonr(a1[layer_name].squeeze(), a2[layer_name].squeeze())[0] layer_type = get_layer_type(model.model, layer_name) df = df.append({ "pair_num": pair_num, "image1": image1, "image2": image2, "layer_num": layer_num, "layer_name": layer_name, "layer_type": layer_type, "r": r, }, ignore_index=True) pair_num += 1 df.pair_num = df.pair_num.astype(int) return dfvit face modelsimport torch import torch.nn.functional as F from einops import rearrange, repeat from torch import nn from torch.nn import Parameter from IPython import embed MIN_NUM_PATCHES = 16 class Softmax(nn.Module): r"""Implement of Softmax (normal classification head): Args: in_features: size of each input sample out_features: size of each output sample device_id: the ID of GPU where the model will be trained by model parallel. if device_id=None, it will be trained on CPU without model parallel. """ def __init__(self, in_features, out_features, device_id): super(Softmax, self).__init__() self.in_features = in_features self.out_features = out_features self.device_id = device_id self.weight = Parameter(torch.FloatTensor(out_features, in_features)) self.bias = Parameter(torch.FloatTensor(out_features)) nn.init.xavier_uniform_(self.weight) nn.init.zeros_(self.bias) def forward(self, input, label): if self.device_id == None: out = F.linear(x, self.weight, self.bias) else: x = input sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0) sub_biases = torch.chunk(self.bias, len(self.device_id), dim=0) temp_x = x.cuda(self.device_id[0]) weight = sub_weights[0].cuda(self.device_id[0]) bias = sub_biases[0].cuda(self.device_id[0]) out = F.linear(temp_x, weight, bias) for i in range(1, len(self.device_id)): temp_x = x.cuda(self.device_id[i]) weight = sub_weights[i].cuda(self.device_id[i]) bias = sub_biases[i].cuda(self.device_id[i]) out = torch.cat((out, F.linear(temp_x, weight, bias).cuda(self.device_id[0])), dim=1) return out def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.zero_() class ArcFace(nn.Module): r"""Implement of ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf): Args: in_features: size of each input sample out_features: size of each output sample device_id: the ID of GPU where the model will be trained by model parallel. if device_id=None, it will be trained on CPU without model parallel. s: norm of input feature m: margin cos(theta+m) """ def __init__(self, in_features, out_features, device_id, s=64.0, m=0.50, easy_margin=False): super(ArcFace, self).__init__() self.in_features = in_features self.out_features = out_features self.device_id = device_id self.s = s self.m = m self.weight = Parameter(torch.FloatTensor(out_features, in_features)) nn.init.xavier_uniform_(self.weight) self.easy_margin = easy_margin self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.th = math.cos(math.pi - m) self.mm = math.sin(math.pi - m) * m def forward(self, input, label): # --------------------------- cos(theta) & phi(theta) --------------------------- if self.device_id == None: cosine = F.linear(F.normalize(input), F.normalize(self.weight)) else: x = input sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0) temp_x = x.cuda(self.device_id[0]) weight = sub_weights[0].cuda(self.device_id[0]) cosine = F.linear(F.normalize(temp_x), F.normalize(weight)) for i in range(1, len(self.device_id)): temp_x = x.cuda(self.device_id[i]) weight = sub_weights[i].cuda(self.device_id[i]) cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1) sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine * self.sin_m if self.easy_margin: phi = torch.where(cosine > 0, phi, cosine) else: phi = torch.where(cosine > self.th, phi, cosine - self.mm) # --------------------------- convert label to one-hot --------------------------- one_hot = torch.zeros(cosine.size()) if self.device_id != None: one_hot = one_hot.cuda(self.device_id[0]) one_hot.scatter_(1, label.view(-1, 1).long(), 1) # -------------torch.where(out_i = {x_i if condition_i else y_i) ------------- output = (one_hot * phi) + ( (1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4 output *= self.s return output class CosFace(nn.Module): r"""Implement of CosFace (https://arxiv.org/pdf/1801.09414.pdf): Args: in_features: size of each input sample out_features: size of each output sample device_id: the ID of GPU where the model will be trained by model parallel. if device_id=None, it will be trained on CPU without model parallel. s: norm of input feature m: margin cos(theta)-m """ def __init__(self, in_features, out_features, device_id, s=64.0, m=0.35): super(CosFace, self).__init__() self.in_features = in_features self.out_features = out_features self.device_id = device_id self.s = s self.m = m print("self.device_id", self.device_id) self.weight = Parameter(torch.FloatTensor(out_features, in_features)) nn.init.xavier_uniform_(self.weight) def forward(self, input, label): # --------------------------- cos(theta) & phi(theta) --------------------------- if self.device_id == None: cosine = F.linear(F.normalize(input), F.normalize(self.weight)) else: x = input sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0) temp_x = x.cuda(self.device_id[0]) weight = sub_weights[0].cuda(self.device_id[0]) cosine = F.linear(F.normalize(temp_x), F.normalize(weight)) for i in range(1, len(self.device_id)): temp_x = x.cuda(self.device_id[i]) weight = sub_weights[i].cuda(self.device_id[i]) cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1) phi = cosine - self.m # --------------------------- convert label to one-hot --------------------------- one_hot = torch.zeros(cosine.size()) if self.device_id != None: one_hot = one_hot.cuda(self.device_id[0]) # one_hot = one_hot.cuda() if cosine.is_cuda else one_hot one_hot.scatter_(1, label.cuda(self.device_id[0]).view(-1, 1).long(), 1) # -------------torch.where(out_i = {x_i if condition_i else y_i) ------------- output = (one_hot * phi) + ( (1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4 output *= self.s return output def __repr__(self): return self.__class__.__name__ + '(' \ + 'in_features = ' + str(self.in_features) \ + ', out_features = ' + str(self.out_features) \ + ', s = ' + str(self.s) \ + ', m = ' + str(self.m) + ')' class SFaceLoss(nn.Module): def __init__(self, in_features, out_features, device_id, s = 64.0, k = 80.0, a = 0.80, b = 1.22): super(SFaceLoss, self).__init__() self.in_features = in_features self.out_features = out_features self.device_id = device_id self.s = s self.k = k self.a = a self.b = b self.weight = Parameter(torch.FloatTensor(out_features, in_features)) #nn.init.xavier_uniform_(self.weight) xavier_normal_(self.weight, gain=2, mode='out') def forward(self, input, label): # --------------------------- cos(theta) & phi(theta) --------------------------- if self.device_id == None: cosine = F.linear(F.normalize(input), F.normalize(self.weight)) else: x = input sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0) temp_x = x.cuda(self.device_id[0]) weight = sub_weights[0].cuda(self.device_id[0]) cosine = F.linear(F.normalize(temp_x), F.normalize(weight)) for i in range(1, len(self.device_id)): temp_x = x.cuda(self.device_id[i]) weight = sub_weights[i].cuda(self.device_id[i]) cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1) # --------------------------- s*cos(theta) --------------------------- output = cosine * self.s # --------------------------- sface loss --------------------------- one_hot = torch.zeros(cosine.size()) if self.device_id != None: one_hot = one_hot.cuda(self.device_id[0]) one_hot.scatter_(1, label.view(-1, 1), 1) zero_hot = torch.ones(cosine.size()) if self.device_id != None: zero_hot = zero_hot.cuda(self.device_id[0]) zero_hot.scatter_(1, label.view(-1, 1), 0) WyiX = torch.sum(one_hot * output, 1) with torch.no_grad(): # theta_yi = torch.acos(WyiX) theta_yi = torch.acos(WyiX / self.s) weight_yi = 1.0 / (1.0 + torch.exp(-self.k * (theta_yi - self.a))) intra_loss = - weight_yi * WyiX Wj = zero_hot * output with torch.no_grad(): # theta_j = torch.acos(Wj) theta_j = torch.acos(Wj / self.s) weight_j = 1.0 / (1.0 + torch.exp(self.k * (theta_j - self.b))) inter_loss = torch.sum(weight_j * Wj, 1) loss = intra_loss.mean() + inter_loss.mean() Wyi_s = WyiX / self.s Wj_s = Wj / self.s return output, loss, intra_loss.mean(), inter_loss.mean(), Wyi_s.mean(), Wj_s.mean() class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.norm = nn.LayerNorm(dim) self.fn = fn def forward(self, x, **kwargs): return self.fn(self.norm(x), **kwargs) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim ** -0.5 self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, mask = None): b, n, _, h = *x.shape, self.heads qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale mask_value = -torch.finfo(dots.dtype).max #embed() if mask is not None: mask = F.pad(mask.flatten(1), (1, 0), value = True) assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions' mask = mask[:, None, :] * mask[:, :, None] dots.masked_fill_(~mask, mask_value) del mask attn = dots.softmax(dim=-1) out = torch.einsum('bhij,bhjd->bhid', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) return out class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Residual(PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout))), Residual(PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))) ])) def forward(self, x, mask = None): for attn, ff in self.layers: x = attn(x, mask = mask) #embed() x = ff(x) return x class ViT_face(nn.Module): def __init__(self, *, loss_type, GPU_ID, num_class, image_size, patch_size, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 assert num_patches > MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for attention to be effective (at least 16). Try decreasing your patch size' assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.patch_size = patch_size self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.patch_to_embedding = nn.Linear(patch_dim, dim) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim), ) self.loss_type = loss_type self.GPU_ID = GPU_ID if self.loss_type == 'None': print("no loss for vit_face") else: if self.loss_type == 'Softmax': self.loss = Softmax(in_features=dim, out_features=num_class, device_id=self.GPU_ID) elif self.loss_type == 'CosFace': self.loss = CosFace(in_features=dim, out_features=num_class, device_id=self.GPU_ID) elif self.loss_type == 'ArcFace': self.loss = ArcFace(in_features=dim, out_features=num_class, device_id=self.GPU_ID) elif self.loss_type == 'SFace': self.loss = SFaceLoss(in_features=dim, out_features=num_class, device_id=self.GPU_ID) def forward(self, img, label= None , mask = None): p = self.patch_size x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) x = self.patch_to_embedding(x) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x, mask) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) emb = self.mlp_head(x) if label is not None: x = self.loss(emb, label) return x, emb else: return emb import torch import torch.nn.functional as F from einops import rearrange, repeat from torch import nn from torch.nn import Parameter from IPython import embed MIN_NUM_PATCHES = 16 class Softmax(nn.Module): r"""Implement of Softmax (normal classification head): Args: in_features: size of each input sample out_features: size of each output sample device_id: the ID of GPU where the model will be trained by model parallel. if device_id=None, it will be trained on CPU without model parallel. """ def __init__(self, in_features, out_features, device_id): super(Softmax, self).__init__() self.in_features = in_features self.out_features = out_features self.device_id = device_id self.weight = Parameter(torch.FloatTensor(out_features, in_features)) self.bias = Parameter(torch.FloatTensor(out_features)) nn.init.xavier_uniform_(self.weight) nn.init.zeros_(self.bias) def forward(self, input, label): if self.device_id == None: out = F.linear(x, self.weight, self.bias) else: x = input sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0) sub_biases = torch.chunk(self.bias, len(self.device_id), dim=0) temp_x = x.cuda(self.device_id[0]) weight = sub_weights[0].cuda(self.device_id[0]) bias = sub_biases[0].cuda(self.device_id[0]) out = F.linear(temp_x, weight, bias) for i in range(1, len(self.device_id)): temp_x = x.cuda(self.device_id[i]) weight = sub_weights[i].cuda(self.device_id[i]) bias = sub_biases[i].cuda(self.device_id[i]) out = torch.cat((out, F.linear(temp_x, weight, bias).cuda(self.device_id[0])), dim=1) return out def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.zero_() class ArcFace(nn.Module): r"""Implement of ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf): Args: in_features: size of each input sample out_features: size of each output sample device_id: the ID of GPU where the model will be trained by model parallel. if device_id=None, it will be trained on CPU without model parallel. s: norm of input feature m: margin cos(theta+m) """ def __init__(self, in_features, out_features, device_id, s=64.0, m=0.50, easy_margin=False): super(ArcFace, self).__init__() self.in_features = in_features self.out_features = out_features self.device_id = device_id self.s = s self.m = m self.weight = Parameter(torch.FloatTensor(out_features, in_features)) nn.init.xavier_uniform_(self.weight) self.easy_margin = easy_margin self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.th = math.cos(math.pi - m) self.mm = math.sin(math.pi - m) * m def forward(self, input, label): # --------------------------- cos(theta) & phi(theta) --------------------------- if self.device_id == None: cosine = F.linear(F.normalize(input), F.normalize(self.weight)) else: x = input sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0) temp_x = x.cuda(self.device_id[0]) weight = sub_weights[0].cuda(self.device_id[0]) cosine = F.linear(F.normalize(temp_x), F.normalize(weight)) for i in range(1, len(self.device_id)): temp_x = x.cuda(self.device_id[i]) weight = sub_weights[i].cuda(self.device_id[i]) cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1) sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine * self.sin_m if self.easy_margin: phi = torch.where(cosine > 0, phi, cosine) else: phi = torch.where(cosine > self.th, phi, cosine - self.mm) # --------------------------- convert label to one-hot --------------------------- one_hot = torch.zeros(cosine.size()) if self.device_id != None: one_hot = one_hot.cuda(self.device_id[0]) one_hot.scatter_(1, label.view(-1, 1).long(), 1) # -------------torch.where(out_i = {x_i if condition_i else y_i) ------------- output = (one_hot * phi) + ( (1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4 output *= self.s return output class CosFace(nn.Module): r"""Implement of CosFace (https://arxiv.org/pdf/1801.09414.pdf): Args: in_features: size of each input sample out_features: size of each output sample device_id: the ID of GPU where the model will be trained by model parallel. if device_id=None, it will be trained on CPU without model parallel. s: norm of input feature m: margin cos(theta)-m """ def __init__(self, in_features, out_features, device_id, s=64.0, m=0.35): super(CosFace, self).__init__() self.in_features = in_features self.out_features = out_features self.device_id = device_id self.s = s self.m = m print("self.device_id", self.device_id) self.weight = Parameter(torch.FloatTensor(out_features, in_features)) nn.init.xavier_uniform_(self.weight) def forward(self, input, label): # --------------------------- cos(theta) & phi(theta) --------------------------- if self.device_id == None: cosine = F.linear(F.normalize(input), F.normalize(self.weight)) else: x = input sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0) temp_x = x.cuda(self.device_id[0]) weight = sub_weights[0].cuda(self.device_id[0]) cosine = F.linear(F.normalize(temp_x), F.normalize(weight)) for i in range(1, len(self.device_id)): temp_x = x.cuda(self.device_id[i]) weight = sub_weights[i].cuda(self.device_id[i]) cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1) phi = cosine - self.m # --------------------------- convert label to one-hot --------------------------- one_hot = torch.zeros(cosine.size()) if self.device_id != None: one_hot = one_hot.cuda(self.device_id[0]) # one_hot = one_hot.cuda() if cosine.is_cuda else one_hot one_hot.scatter_(1, label.cuda(self.device_id[0]).view(-1, 1).long(), 1) # -------------torch.where(out_i = {x_i if condition_i else y_i) ------------- output = (one_hot * phi) + ( (1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4 output *= self.s return output def __repr__(self): return self.__class__.__name__ + '(' \ + 'in_features = ' + str(self.in_features) \ + ', out_features = ' + str(self.out_features) \ + ', s = ' + str(self.s) \ + ', m = ' + str(self.m) + ')' class SFaceLoss(nn.Module): def __init__(self, in_features, out_features, device_id, s = 64.0, k = 80.0, a = 0.90, b = 1.2): super(SFaceLoss, self).__init__() self.in_features = in_features self.out_features = out_features self.device_id = device_id self.s = s self.k = k self.a = a self.b = b self.weight = Parameter(torch.FloatTensor(out_features, in_features)) #nn.init.xavier_uniform_(self.weight) xavier_normal_(self.weight, gain=2, mode='out') def forward(self, input, label): # --------------------------- cos(theta) & phi(theta) --------------------------- if self.device_id == None: cosine = F.linear(F.normalize(input), F.normalize(self.weight)) else: x = input sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0) temp_x = x.cuda(self.device_id[0]) weight = sub_weights[0].cuda(self.device_id[0]) cosine = F.linear(F.normalize(temp_x), F.normalize(weight)) for i in range(1, len(self.device_id)): temp_x = x.cuda(self.device_id[i]) weight = sub_weights[i].cuda(self.device_id[i]) cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1) # --------------------------- s*cos(theta) --------------------------- output = cosine * self.s # --------------------------- sface loss --------------------------- one_hot = torch.zeros(cosine.size()) if self.device_id != None: one_hot = one_hot.cuda(self.device_id[0]) one_hot.scatter_(1, label.view(-1, 1), 1) zero_hot = torch.ones(cosine.size()) if self.device_id != None: zero_hot = zero_hot.cuda(self.device_id[0]) zero_hot.scatter_(1, label.view(-1, 1), 0) WyiX = torch.sum(one_hot * output, 1) with torch.no_grad(): theta_yi = torch.acos(WyiX / self.s) weight_yi = 1.0 / (1.0 + torch.exp(-self.k * (theta_yi - self.a))) intra_loss = - weight_yi * WyiX Wj = zero_hot * output with torch.no_grad(): # theta_j = torch.acos(Wj) theta_j = torch.acos(Wj / self.s) weight_j = 1.0 / (1.0 + torch.exp(self.k * (theta_j - self.b))) inter_loss = torch.sum(weight_j * Wj, 1) loss = intra_loss.mean() + inter_loss.mean() Wyi_s = WyiX / self.s Wj_s = Wj / self.s return output, loss, intra_loss.mean(), inter_loss.mean(), Wyi_s.mean(), Wj_s.mean() class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.norm = nn.LayerNorm(dim) self.fn = fn def forward(self, x, **kwargs): return self.fn(self.norm(x), **kwargs) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim ** -0.5 self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, mask = None): b, n, _, h = *x.shape, self.heads qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale mask_value = -torch.finfo(dots.dtype).max #embed() if mask is not None: mask = F.pad(mask.flatten(1), (1, 0), value = True) assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions' mask = mask[:, None, :] * mask[:, :, None] dots.masked_fill_(~mask, mask_value) del mask attn = dots.softmax(dim=-1) out = torch.einsum('bhij,bhjd->bhid', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) return out class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Residual(PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout))), Residual(PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))) ])) def forward(self, x, mask = None): for attn, ff in self.layers: x = attn(x, mask = mask) #embed() x = ff(x) return x class ViTs_face(nn.Module): def __init__(self, *, loss_type, GPU_ID, num_class, image_size, patch_size, ac_patch_size, pad, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * ac_patch_size ** 2 assert num_patches > MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for attention to be effective (at least 16). Try decreasing your patch size' assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.patch_size = patch_size self.soft_split = nn.Unfold(kernel_size=(ac_patch_size, ac_patch_size), stride=(self.patch_size, self.patch_size), padding=(pad, pad)) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.patch_to_embedding = nn.Linear(patch_dim, dim) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim), ) self.loss_type = loss_type self.GPU_ID = GPU_ID if self.loss_type == 'None': print("no loss for vit_face") else: if self.loss_type == 'Softmax': self.loss = Softmax(in_features=dim, out_features=num_class, device_id=self.GPU_ID) elif self.loss_type == 'CosFace': self.loss = CosFace(in_features=dim, out_features=num_class, device_id=self.GPU_ID) elif self.loss_type == 'ArcFace': self.loss = ArcFace(in_features=dim, out_features=num_class, device_id=self.GPU_ID) elif self.loss_type == 'SFace': self.loss = SFaceLoss(in_features=dim, out_features=num_class, device_id=self.GPU_ID) def forward(self, img, label= None , mask = None): p = self.patch_size x = self.soft_split(img).transpose(1, 2) x = self.patch_to_embedding(x) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x, mask) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) emb = self.mlp_head(x) if label is not None: x = self.loss(emb, label) return x, emb else: return embload transformer modelsdef load_face_transformer_vit(): checkpoint = torch.load('./weights/vit_checkpoint.pth', map_location='cpu') device = 'cuda' if torch.cuda.is_available() else 'cpu' model = ViT_face(image_size=112, patch_size=8, loss_type='CosFace', GPU_ID=device, num_class=93431, # number of face identities? dim=512, depth=20, heads=8, mlp_dim=2048, dropout=0.1, emb_dropout=0.1) model.load_state_dict(checkpoint, strict=True) print("face_transformer_vit loaded!") return model def load_face_transformer_vits(): checkpoint = torch.load('./weights/vits_checkpoint.pth', map_location='cpu') device = 'cuda' if torch.cuda.is_available() else 'cpu' model = ViTs_face(image_size=112, patch_size=8, ac_patch_size=12, pad=4, loss_type='CosFace', GPU_ID=device, num_class=93431, # number of face identities? dim=512, depth=20, heads=8, mlp_dim=2048, dropout=0.1, emb_dropout=0.1) model.load_state_dict(checkpoint, strict=True) print("face_transformer_vits loaded!") return modelnew helpers compute embeddingsimport torch import torch.nn.functional as F from pdb import set_trace model_names = [name.lower() for name in ["FaceNetCNN", "FaceVITs", "FaceVit"]] def denorm(im_data): # im_data = (im_data - 127.5) * 0.0078125 return im_data/0.0078125 + 127.5 # HxWxC -> CxHxW = x.permute(2,0,1) def to_channels_last(x): if x.shape[-1] == 3: return x.permute(2,0,1) return x def compute_embeddings(model_name, image_list): assert model_name.lower() in model_names, f"oops, model_name must be one of {model_names}, got {model_name}" print(f"Computing Embeddings (N={len(image_list)} images)") print(f"model_name: {model_name}") if model_name.lower() == "FaceNetCNN".lower(): image_size = 160 mtcnn = MTCNN(image_size=image_size) model = InceptionResnetV1(pretrained='vggface2').eval() transform = transforms.Compose([ transforms.Resize((image_size)), transforms.CenterCrop(image_size), lambda x: (x - 127.5) * 0.0078125 ]) elif model_name.lower() == "FaceVit".lower(): image_size = 112 mtcnn = MTCNN(image_size=image_size) model = load_face_transformer_vit() transform = transforms.Compose([ transforms.Resize((image_size)), transforms.CenterCrop(image_size), lambda x: to_channels_last(torch.tensor(np.array(x), dtype=torch.float32)), ]) elif model_name.lower() == "FaceVits".lower(): image_size = 112 mtcnn = MTCNN(image_size=image_size) model = load_face_transformer_vits() transform = transforms.Compose([ transforms.Resize((image_size)), transforms.CenterCrop(image_size), lambda x: to_channels_last(torch.tensor(np.array(x), dtype=torch.float32)), ]) images = {} embeddings = {} for idx, image_name in enumerate(progress_bar(image_list)): img = Image.open(os.path.join('./images', image_name)).convert('RGB') # Get cropped and prewhitened image tensor img_cropped = None try: img_cropped = mtcnn(img) img_cropped = denorm(img_cropped) img_cropped = transform(img_cropped) except: print(image_name) set_trace() pass if img_cropped is None: print("Warning, no human face detected, using center crop (OK, but you can try manually cropping your image):", image_name) img_cropped = transform(img) if model_name == "FaceNetCNN": image_save = Image.fromarray(denorm(img_cropped).permute(1,2,0).numpy().astype(np.uint8)) else: image_save = Image.fromarray(img_cropped.permute(1,2,0).numpy().astype(np.uint8)) images[image_name] = image_save # Calculate embedding (unsqueeze to add batch dimension) img_embedding = model(img_cropped.unsqueeze(0)) embeddings[image_name] = img_embedding.cpu().clone().detach() return embeddings, imagescompare embeddingsdef compare_embeddings_distance(embeddings, image_pairs): df = pd.DataFrame(columns=['pair_num','image1','image2','euclidean_distance']) for pair_num, (image1, image2) in enumerate(image_pairs): #embed1 = embeddings[image1] #embed2 = embeddings[image2] embed1 = F.normalize(embeddings[image1], dim=1) embed2 = F.normalize(embeddings[image2], dim=1) df = df.append({ "pair_num": pair_num, "image1": image1, "image2": image2, "euclidean_distance": (embed1-embed2).pow(2).sum().item() }, ignore_index=True) return df def compare_embeddings_angle(embeddings, image_pairs): df = pd.DataFrame(columns=['pair_num','image1','image2','euclidean_distance']) for pair_num, (image1, image2) in enumerate(image_pairs): embed1 = F.normalize(embeddings[image1], dim=1) embed2 = F.normalize(embeddings[image2], dim=1) df = df.append({ "pair_num": pair_num, "image1": image1, "image2": image2, "cosine_distance": 1 - (embed1 @ embed2.T).item() }, ignore_index=True) return dfshow imagesimport math import matplotlib.pyplot as plt def show_cropped_images(cropped_images, num_cols=5, figsize=(10,16)): N = len(cropped_images) num_rows = int(math.ceil(N/num_cols)) fig, axes = plt.subplots(num_rows, num_cols, sharey=True) if num_rows==1: axes = [axes] image_names = list(cropped_images.keys()) c = 0 for row in axes: for ax in row: if c < N: img_name = image_names[c] img = cropped_images[img_name] ax.imshow(img) ax.grid(False) ax.axis('off') ax.set_title(img_name) c += 1 else: ax.remove()Step 1 - Test Model Loading: FaceNetCNNThis loads a InceptionResnetV1 model (like a deeper Alexnet) with it's millions of weights and biases trained with the triplet loss we read about.model = InceptionResnetV1(pretrained='vggface2').eval() print("success")successStep 2 - Test Model Loading: FaceTransformermodel = load_face_transformer_vit() # model = load_face_transformer_vits() print("success")self.device_id cpu face_transformer_vit loaded! successStep 3 - upload ImagesFirst you'll need to upload your images to this Colab computer. If you click on the folder along the left hand side, you'll see a list of folders/files. If you ran Step 0 above, it will have created a folder called "images" and within it you should see a few different images (e.g., DiCaprio_Anchor.jpg, DiCaprio_NegativeClooney.jpg, etc.).You can add your own images by downloading them to your computer, then dragging and dropping them to the images folder.Step 3 - Compute EmbeddingsHere you can compute the embeddings for a set of images for a given face model.To do so, you specify a list of images (all images must be located in the images folder).```image_list = ['DiCaprio_Anchor.jpg', 'DiCaprio_HardNegative.jpg', 'DiCaprio_NegativeClooney.jpg', 'DiCaprio_Positive.jpg']```Then you choose a model from the set of available models (FaceVit, FaceVits, FaceNetCNN), and compute the embeddings.```model_name = "FaceNetCNN"embeddings, cropped_images = compute_embeddings(model_name, image_list)```You can then access the embeddings for each image by name.```embed1 = embeddings['DiCaprio_Anchor.jpg']print(embed1.shape)```Here you can see there is a vector of 512 numbers representing the image 'DiCaprio_Anchor.jpg'. The code makes an attempt to locate the faces in the image your supplied, so you can look at what the final cropped images looked like by running the following:```show_cropped_images(cropped_images, num_cols=5, figsize=(10,15)) ```image_list = ['DiCaprio_Anchor.jpg', 'DiCaprio_HardNegative.jpg', 'DiCaprio_NegativeClooney.jpg', 'DiCaprio_Positive.jpg'] # options include FaceVit, FaceVits, FaceNetCNN # model_name = "FaceVits" model_name = "FaceNetCNN" embeddings, cropped_images = compute_embeddings(model_name, image_list) embed1 = embeddings['DiCaprio_Anchor.jpg'] print(embed1.shape) # you can view any of the cropped images by name cropped_images['DiCaprio_Anchor.jpg'] # or show all of the cropped images show_cropped_images(cropped_images, num_cols=5, figsize=(10,15))Step 4 - Compare EmbeddingsIn this step you can compare embeddings for any pair of images.First you setup a list of "tuples", where a tuple is just a pair of image names surrounded by parentheses:```image_pairs = [ ('DiCaprio_Anchor.jpg', 'DiCaprio_NegativeClooney.jpg'), ('DiCaprio_Anchor.jpg', 'DiCaprio_Positive.jpg'), ('DiCaprio_Anchor.jpg', 'DiCaprio_HardNegative.jpg'),]```Then you just run ```results = compare_embeddings_distance(embeddings, image_pairs)print(results)plot_results(results);```And you will see the euclidean distance between each pair of images.image_pairs = [ ('DiCaprio_Anchor.jpg', 'DiCaprio_NegativeClooney.jpg'), ('DiCaprio_Anchor.jpg', 'DiCaprio_Positive.jpg'), ('DiCaprio_Anchor.jpg', 'DiCaprio_HardNegative.jpg'), ] results = compare_embeddings_distance(embeddings, image_pairs) results # plot the results plot_results(results);quick sanity check to make sure all models capture basic face-similarity This isn't very systematic, but I just wanted to make sure FaceNetCNN and FaceVit both have descent face representations. You could provide more thorough tests if you wanted to...image_list = ['clooney1.jpg', 'clooney2.jpg', 'pitt1.jpg', 'pitt2.jpg'] # options include FaceVit, FaceVits, FaceNetCNN # model_name = "FaceVits" model_name = "FaceNetCNN" # model_name = "FaceVit" embeddings, cropped_images = compute_embeddings(model_name, image_list) image_pairs = [ ('clooney1.jpg', 'clooney2.jpg'), ('clooney1.jpg', 'pitt1.jpg'), ('clooney1.jpg', 'pitt2.jpg'), ('clooney2.jpg', 'pitt1.jpg'), ('clooney2.jpg', 'pitt2.jpg'), ('pitt1.jpg', 'pitt2.jpg'), ] results = compare_embeddings_distance(embeddings, image_pairs) print(results) plot_results(results);gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) if gpu_info.find('failed') >= 0: print('Not connected to a GPU') else: print(gpu_info) from psutil import virtual_memory ram_gb = virtual_memory().total / 1e9 print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb)) if ram_gb < 20: print('Not using a high-RAM runtime') else: print('You are using a high-RAM runtime!') !pip install datasets !pip install transformers !pip3 install torchinfo import transformers import matplotlib.pyplot as plt from datasets import load_dataset import numpy as np import pandas as pd from pprint import pprint from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, f1_score, precision_score, recall_score,roc_auc_score,accuracy_score from sklearn.metrics import plot_confusion_matrix,classification_report from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay from datasets import load_metricDatasetsraw_datasets = load_dataset("glue","sst2") raw_datasets print(raw_datasets['train']) print(dir(raw_datasets['train'])) type(raw_datasets['train']) raw_datasets['train'].data raw_datasets['train'][0] raw_datasets['train'][50000:50003] raw_datasets['train'].features len(raw_datasets['train']) raw_datasets['train'].column_names raw_datasets['train']['sentence'][:5]From Datasets to DataFrameraw_datasets.set_format(type='pandas') tweet_data = raw_datasets['train'][:] int2str_label = raw_datasets['train'].features['label'].names int2str_mapping = {k:v for k,v in enumerate(int2str_label)} int2str_mapping %%timeit tweet_data['label_name']= tweet_data['label'].map(int2str_mapping) tweet_data.head() tweet_data['label_name'].value_counts(ascending=True).plot.barh() tweet_data['tweet_length']= tweet_data['sentence'].apply(lambda x: len(x.split(' '))) tweet_data.boxplot("tweet_length", by="label_name", color="black",grid=False, showfliers=False) tweet_data.groupby(['label_name'])['tweet_length'].describe() raw_datasets.reset_format()From Text to Tokenizationfrom transformers import AutoTokenizer checkpoint = 'distilbert-base-uncased' tokenizer = AutoTokenizer.from_pretrained(checkpoint)Text Tokens to Integer Idstokenized_sentences = tokenizer(raw_datasets['train'][0:3]['sentence']) pprint(tokenized_sentences){'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'input_ids': [[101, 5342, 2047, 3595, 8496, 2013, 1996, 18643, 3197, 102], [101, 3397, 2053, 15966, 1010, 2069, 4450, 2098, 18201, 2015, 102], [101, 2008, 7459, 2049, 3494, 1998, 10639, 2015, 2242, 2738, 3376, 2055, 2529, 3267, 102]]}Integer Ids to Tokensback2tokens = tokenizer.convert_ids_to_tokens(tokenized_sentences['input_ids'][0]) back2tokens print(tokenizer.convert_tokens_to_string(back2tokens))[CLS] hide new secretions from the parental units [SEP]Tokenization the entire datasetdef tokenize_fn(batch): return tokenizer(batch['sentence'], truncation=True,padding=True) tokenized_dataset = raw_datasets.map(tokenize_fn, batched=True) tokenized_dataset pprint(tokenize_fn(raw_datasets['train'][:3])){'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'input_ids': [[101, 5342, 2047, 3595, 8496, 2013, 1996, 18643, 3197, 102, 0, 0, 0, 0, 0], [101, 3397, 2053, 15966, 1010, 2069, 4450, 2098, 18201, 2015, 102, 0, 0, 0, 0], [101, 2008, 7459, 2049, 3494, 1998, 10639, [...]Fine Tuning- Getting the pre_trained model- All the parameters will be trained with a very slow learning rate. Training Argumentsfrom transformers import TrainingArguments from transformers import training_args batch_size= 64 logging_steps = len(tokenized_dataset["train"]) // batch_size model_name = "airline_tweet_analysis_model" training_args = TrainingArguments( 'airline_tweet_lp', evaluation_strategy='epoch', num_train_epochs=2, learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, save_strategy='epoch', logging_steps=logging_steps, log_level="error", push_to_hub=False, disable_tqdm=False ) from transformers import AutoModelForSequenceClassification import torch model_ckpt = 'distilbert-base-uncased' device = torch.device("cuda" if torch.cuda.is_available() else "cpu") num_labels=2 model = (AutoModelForSequenceClassification.from_pretrained(model_ckpt, num_labels=num_labels).to(device)) model type(model) #from torchinfo import summary #summary(model,input_size=(batch_size,512), dtypes=['torch.IntTensor'],device='cpu') #summary(model) params_before = [] for name, p in model.named_parameters(): print(name,p.shape) params_before.append(p.detach().cpu().numpy()) def compute_metrics(pred): labels=pred.label_ids preds = pred.predictions.argmax(-1) f1 = f1_score(labels,preds, average="weighted"), acc = accuracy_score(labels,preds) return {"accuracy":acc,"f1":f1} metric = load_metric("glue","sst2") metric.compute(predictions=[1,0,1], references=[1,0,1])Trainerfrom transformers import Trainer trainer = Trainer(model=model, args=training_args, compute_metrics=compute_metrics, train_dataset=tokenized_dataset['train'], eval_dataset = tokenized_dataset['validation'], tokenizer=tokenizer) trainer.train() pred_outputs = trainer.predict(tokenized_dataset['validation']) pred_outputs.metrics y_preds = np.argmax(pred_outputs.predictions,axis=1) y_valid = np.array(tokenized_dataset['validation']["label"]) labels = tokenized_dataset["train"].features["label"].names cm = confusion_matrix(y_valid, y_preds, labels=[0,1]) disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['negative', 'positive']) disp.plot() plt.grid(False) plt.show()Saving the modeltrainer.save_model('my_saved_model') !ls from transformers import pipeline newmodel = pipeline('text-classification',model='my_saved_model',device=0) newmodel('this airline does not give any good meal') newmodel('i think this airline would have been good if they had inflight entertainement') newmodel('this flight was peace') model !cat my_saved_model/config.json import json config_path = 'my_saved_model/config.json' with open(config_path) as f: j = json.load(f) j['id2label']={0: 'negative', 1: 'positive'} with open(config_path,'w') as f: json.dump(j,f,indent=2) !cat my_saved_model/config.json newmodel = pipeline('text-classification',model='my_saved_model',device=0) newmodel('this airline does not give any good meal') params_after = [] for name,p in model.named_parameters(): params_after.append(p.detach().cpu().numpy()) for p1,p2 in zip(params_before,params_after): print(np.sum(np.abs(p1-p2)))4295.8564 22.397516 0.42992195 0.37554002 315.29938 0.44935855 310.2603 0.0003125943 314.87762 0.36486322 294.90582 0.28997904 0.4074064 0.2932126 1264.9528 1.5678885 1179.0743 0.2436764 0.39350107 0.24740562 312.3826 0.38964438 307.3603 0.0002814801 277.76935 0.27510706 259.61444 0.23003757 0.36502448 0.22532144 1265.5698 1.4621453 1131.4347 0.22013886 0.38440046 0.23639287 314.73383 0.3718079 316.68652 0.0002969986 278.59503 0.25877658 271.8359 0.22087382 0.3821654 0.22072417 1320.4331 1.5025897 1137.2251 0.2309854 0.39983714 0.22972032 347.77847 0.44709066 348.8623 0.00033897278 319.97488 0.2544848 312.5946 0.24171343 0.3905411 0.2387199 1357.4976 1.6211772 1159.6282 0.2905829 0.40575072 0.2863285 349.1933 0.45537767 353.37854 0.00024720648 299.25183 0.2886273 321.3755 0.3158031 0.39768344 0.3406638 1339.9982 1.5978796 1068.1765 0.2795648 0.41227633 0.28192323 345.38422 0.4643112 341.55566 0.00011034015 323.89346 0.27253997 355.9033 0.2883676 0.4718718 0.34186974 1341.129 1.6796286 [...]Copyright 2020 The TensorFlow Authors.#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.The Sequential model View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Setupimport tensorflow as tf from tensorflow import keras from tensorflow.keras import layersWhen to use a Sequential modelA `Sequential` model is appropriate for **a plain stack of layers**where each layer has **exactly one input tensor and one output tensor**.Schematically, the following `Sequential` model:# Define Sequential model with 3 layers model = keras.Sequential( [ layers.Dense(2, activation="relu", name="layer1"), layers.Dense(3, activation="relu", name="layer2"), layers.Dense(4, name="layer3"), ] ) # Call model on a test input x = tf.ones((3, 3)) y = model(x)is equivalent to this function:# Create 3 layers layer1 = layers.Dense(2, activation="relu", name="layer1") layer2 = layers.Dense(3, activation="relu", name="layer2") layer3 = layers.Dense(4, name="layer3") # Call layers on a test input x = tf.ones((3, 3)) y = layer3(layer2(layer1(x)))A Sequential model is **not appropriate** when:- Your model has multiple inputs or multiple outputs- Any of your layers has multiple inputs or multiple outputs- You need to do layer sharing- You want non-linear topology (e.g. a residual connection, a multi-branchmodel) Creating a Sequential modelYou can create a Sequential model by passing a list of layers to the Sequentialconstructor:model = keras.Sequential( [ layers.Dense(2, activation="relu"), layers.Dense(3, activation="relu"), layers.Dense(4), ] )Its layers are accessible via the `layers` attribute:model.layersYou can also create a Sequential model incrementally via the `add()` method:model = keras.Sequential() model.add(layers.Dense(2, activation="relu")) model.add(layers.Dense(3, activation="relu")) model.add(layers.Dense(4))Note that there's also a corresponding `pop()` method to remove layers:a Sequential model behaves very much like a list of layers.model.pop() print(len(model.layers)) # 2Also note that the Sequential constructor accepts a `name` argument, just likeany layer or model in Keras. This is useful to annotate TensorBoard graphswith semantically meaningful names.model = keras.Sequential(name="my_sequential") model.add(layers.Dense(2, activation="relu", name="layer1")) model.add(layers.Dense(3, activation="relu", name="layer2")) model.add(layers.Dense(4, name="layer3"))Specifying the input shape in advanceGenerally, all layers in Keras need to know the shape of their inputsin order to be able to create their weights. So when you create a layer likethis, initially, it has no weights:layer = layers.Dense(3) layer.weights # EmptyIt creates its weights the first time it is called on an input, since the shapeof the weights depends on the shape of the inputs:# Call layer on a test input x = tf.ones((1, 4)) y = layer(x) layer.weights # Now it has weights, of shape (4, 3) and (3,)Naturally, this also applies to Sequential models. When you instantiate aSequential model without an input shape, it isn't "built": it has no weights(and calling`model.weights` results in an error stating just this). The weights are createdwhen the model first sees some input data:model = keras.Sequential( [ layers.Dense(2, activation="relu"), layers.Dense(3, activation="relu"), layers.Dense(4), ] ) # No weights at this stage! # At this point, you can't do this: # model.weights # You also can't do this: # model.summary() # Call the model on a test input x = tf.ones((1, 4)) y = model(x) print("Number of weights after calling the model:", len(model.weights)) # 6Once a model is "built", you can call its `summary()` method to display itscontents:model.summary()However, it can be very useful when building a Sequential model incrementallyto be able to display the summary of the model so far, including the currentoutput shape. In this case, you should start your model by passing an `Input`object to your model, so that it knows its input shape from the start:model = keras.Sequential() model.add(keras.Input(shape=(4,))) model.add(layers.Dense(2, activation="relu")) model.summary()Note that the `Input` object is not displayed as part of `model.layers`, sinceit isn't a layer:model.layersA simple alternative is to just pass an `input_shape` argument to your firstlayer:model = keras.Sequential() model.add(layers.Dense(2, activation="relu", input_shape=(4,))) model.summary()Models built with a predefined input shape like this always have weights (evenbefore seeing any data) and always have a defined output shape.In general, it's a recommended best practice to always specify the input shapeof a Sequential model in advance if you know what it is. A common debugging workflow: `add()` + `summary()`When building a new Sequential architecture, it's useful to incrementally stacklayers with `add()` and frequently print model summaries. For instance, thisenables you to monitor how a stack of `Conv2D` and `MaxPooling2D` layers isdownsampling image feature maps:model = keras.Sequential() model.add(keras.Input(shape=(250, 250, 3))) # 250x250 RGB images model.add(layers.Conv2D(32, 5, strides=2, activation="relu")) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.MaxPooling2D(3)) # Can you guess what the current output shape is at this point? Probably not. # Let's just print it: model.summary() # The answer was: (40, 40, 32), so we can keep downsampling... model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.MaxPooling2D(3)) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.MaxPooling2D(2)) # And now? model.summary() # Now that we have 4x4 feature maps, time to apply global max pooling. model.add(layers.GlobalMaxPooling2D()) # Finally, we add a classification layer. model.add(layers.Dense(10))Very practical, right? What to do once you have a modelOnce your model architecture is ready, you will want to:- Train your model, evaluate it, and run inference. See our[guide to training & evaluation with the built-in loops](https://www.tensorflow.org/guide/keras/train_and_evaluate/)- Save your model to disk and restore it. See our[guide to serialization & saving](https://www.tensorflow.org/guide/keras/save_and_serialize/).- Speed up model training by leveraging multiple GPUs. See our[guide to multi-GPU and distributed training](/guides/distributed_training). Feature extraction with a Sequential modelOnce a Sequential model has been built, it behaves like a [Functional APImodel](https://www.tensorflow.org/guide/keras/functional/). This means that every layer has an `input`and `output` attribute. These attributes can be used to do neat things, likequicklycreating a model that extracts the outputs of all intermediate layers in aSequential model:initial_model = keras.Sequential( [ keras.Input(shape=(250, 250, 3)), layers.Conv2D(32, 5, strides=2, activation="relu"), layers.Conv2D(32, 3, activation="relu"), layers.Conv2D(32, 3, activation="relu"), ] ) feature_extractor = keras.Model( inputs=initial_model.inputs, outputs=[layer.output for layer in initial_model.layers], ) # Call feature extractor on test input. x = tf.ones((1, 250, 250, 3)) features = feature_extractor(x)Here's a similar example that only extract features from one layer:initial_model = keras.Sequential( [ keras.Input(shape=(250, 250, 3)), layers.Conv2D(32, 5, strides=2, activation="relu"), layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"), layers.Conv2D(32, 3, activation="relu"), ] ) feature_extractor = keras.Model( inputs=initial_model.inputs, outputs=initial_model.get_layer(name="my_intermediate_layer").output, ) # Call feature extractor on test input. x = tf.ones((1, 250, 250, 3)) features = feature_extractor(x)!gdown --id 1TE6dok2KCbAzg-RHiDXn0W5hN9ichYX_ !gdown --id 1qmD5315SYiVCh-Ya5rTOhxoMpTVJd7u6 !gdown --id 1Ddtjl0db_zWgDQ48lRw7JSkTljITyJWe !gdown --id 1swHLI0qJN2GNzKMQixxz35RHkgFQlIor !ls -lrt !pwd def shImg(image, txt): plt.imshow(image, cmap="gray", origin="lower") plt.title(txt, fontweight ="bold") plt.show() def show_slices(slices): """ Function to display row of image slices """ fig, axes = plt.subplots(1, len(slices)) for i, slice in enumerate(slices): axes[i].imshow(slice.T, cmap="gray", origin="lower") import torch import torch.nn as nn import torch.nn.functional as F import scipy.io as io import matplotlib.pyplot as plt import torchvision.transforms as transforms import numpy as np class Bottleneck(nn.Module): def __init__(self, channels, internal_ratio=4, kernel_size=3, padding =1, dilation=1, asymmetric=False, dropout_prob=0, bias=False, relu=True): super().__init__() # Check in the internal_scale parameter is within the expected range # [1, channels] if internal_ratio <= 1 or internal_ratio > channels: raise RuntimeError("Value out of range. Expected value in the " "interval [1, {0}], got internal_scale={1}." .format(channels, internal_ratio)) internal_channels = channels // internal_ratio if relu: activation = nn.ReLU else: activation = nn.PReLU # Main branch - shortcut connection # 1x1 projection convolution self.ext_conv1 = nn.Sequential( nn.Conv2d( channels, internal_channels, kernel_size=1, stride=1, bias=bias), nn.BatchNorm2d(internal_channels), activation()) self.ext_conv2 = nn.Sequential( nn.Conv2d( internal_channels, internal_channels, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=bias), nn.BatchNorm2d(internal_channels), activation()) # 1x1 expansion convolution self.ext_conv3 = nn.Sequential( nn.Conv2d( internal_channels, channels, kernel_size=1, stride=1, bias=bias), nn.BatchNorm2d(channels), activation()) self.ext_regul = nn.Dropout2d(p=dropout_prob) # PReLU layer to apply after adding the branches self.out_activation = activation() def forward(self, x): # Main branch shortcut main = x # Extension branch ext = self.ext_conv1(x) ext = self.ext_conv2(ext) ext = self.ext_conv3(ext) ext = self.ext_regul(ext) # Add main and extension branches out = main + ext return self.out_activation(out) class AnamNet(nn.Module): def __init__(self): super(AnamNet, self).__init__() # Conv block 1 - Down 1 self.conv1_block = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True), ) self.max1 = nn.MaxPool2d(kernel_size=2, stride=2) # BottleNeck 1 self.bottleneck1 = Bottleneck(64) # Conv block 2 - Down 2 self.conv2_block = nn.Sequential( nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True), ) self.max2 = nn.MaxPool2d(kernel_size=2, stride=2) # BottleNeck 2 self.bottleneck2 = Bottleneck(128) # Conv block 3 - Down 3 self.conv3_block = nn.Sequential( nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), ) self.max3 = nn.MaxPool2d(kernel_size=2, stride=2) # BottleNeck 3 self.bottleneck3 = Bottleneck(256) # Conv block 4 - Down 4 self.conv4_block = nn.Sequential( nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), ) self.max4 = nn.MaxPool2d(kernel_size=2, stride=2) # Up 1 self.up_1 = nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=2, stride=2) self.bottleneck4 = Bottleneck(256) # Up Conv block 1 self.conv_up_1 = nn.Sequential( nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), ) # Up 2 self.up_2 = nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=2, stride=2) self.bottleneck5 =Bottleneck(256) # Up Conv block 2 self.conv_up_2 = nn.Sequential( nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), ) # Up 3 self.up_3 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=2, stride=2) self.bottleneck6 = Bottleneck(128) # Up Conv block 3 self.conv_up_3 = nn.Sequential( nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True), ) # Up 4 self.up_4 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=2, stride=2) # Up Conv block 4 self.conv_up_4 = nn.Sequential( nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True), ) # Final output self.conv_final = nn.Conv2d(in_channels=64, out_channels=3, kernel_size=1, padding=0, stride=1) def forward(self, x): print('inputTensor', x.shape) print('inputTensor Type', x.type()) #--------------------------------------------------------------------- # Down 1 x = self.conv1_block(x) #print('after conv1', x.shape) conv1_out = x # Save out1 conv1_dim = x.shape[2] x = self.max1(x) #print('after pool1', x.shape) #-------------------------------------------------------------------- x = self.bottleneck1(x) #print('after bnck1', x.shape) # Down 2 x = self.conv2_block(x) #print('after conv2', x.shape) conv2_out = x conv2_dim = x.shape[2] x = self.max2(x) #print('after pool2', x.shape) #------------------------------------------------------------------- x = self.bottleneck2(x) #print('after bnck2', x.shape) # Down 3 x = self.conv3_block(x) #print('after conv3', x.shape) conv3_out = x conv3_dim = x.shape[2] x = self.max3(x) #print('after pool3', x.shape) #------------------------------------------------------------------ x = self.bottleneck3(x) #print('after bnck3', x.shape) # Down 4 x = self.conv4_block(x) #print('after conv4', x.shape) conv4_out = x conv4_dim = x.shape[2] x = self.max4(x) #print('after pool4', x.shape) #---------------------------------------------------------------- # Up 1 x = self.up_1(x) #print('after up_1', x.shape) x = self.bottleneck4(x) #print('after bnck4', x.shape) lower = int((conv4_dim - x.shape[2]) / 2) upper = int(conv4_dim - lower) conv4_out_modified = conv4_out[:, :, lower:upper, lower:upper] x = torch.cat([x, conv4_out_modified], dim=1) #print('after cat_1',x.shape) x = self.conv_up_1(x) #print('after conv1', x.shape) #----------------------------------------------------------------- # Up 2 x = self.up_2(x) #print('after up_2', x.shape) x = self.bottleneck5(x) #print('after bnck5', x.shape) lower = int((conv3_dim - x.shape[2]) / 2) upper = int(conv3_dim - lower) conv3_out_modified = conv3_out[:, :, lower:upper, lower:upper] x = torch.cat([x, conv3_out_modified], dim=1) #print('after cat_2', x.shape) x = self.conv_up_2(x) #print('after conv2', x.shape) #---------------------------------------------------------------- # Up 3 x = self.up_3(x) #print('after up_3', x.shape) x = self.bottleneck6(x) #print('after bnck6', x.shape) lower = int((conv2_dim - x.shape[2]) / 2) upper = int(conv2_dim - lower) conv2_out_modified = conv2_out[:, :, lower:upper, lower:upper] x = torch.cat([x, conv2_out_modified], dim=1) #print('after cat_3', x.shape) x = self.conv_up_3(x) #print('after conv3', x.shape) #---------------------------------------------------------------- # Up 4 x = self.up_4(x) #print('after up_3', x.shape) lower = int((conv1_dim - x.shape[2]) / 2) upper = int(conv1_dim - lower) conv1_out_modified = conv1_out[:, :, lower:upper, lower:upper] x = torch.cat([x, conv1_out_modified], dim=1) #print('after cat_4', x.shape) x = self.conv_up_4(x) #print('after conv4', x.shape) # Final output x = self.conv_final(x) #print('Finaloutshape',x.shape) #----------------------------------------------------------------- return x #if __name__ == "__main__": # x= torch.rand(1,1,512,512) # print('In Type :', x.type()) # net=AnamNet() # yy=net(x) # print('In Shape :', x.shape) # print('Out Shape :', yy.shape) if __name__=="__main__" : net = AnamNet() data = io.loadmat('sampledata.mat') net.load_state_dict(torch.load('AnamNet_117_model.pth',map_location=torch.device('cpu'))) #model.pth net.eval() fig, ax = plt.subplots() inp1 = data['im1'] print("1", inp1.shape) plt.subplot(2,7,1) plt.imshow(inp1,cmap='gray', vmin=0, vmax=255) #plt.axis('off') plt.xlabel('CT-Scan') inp1 = np.reshape(inp1,(512,512,1)) print("2", inp1.shape) unique, counts = np.unique(inp1, return_counts=True) print("Unique inp_1",dict(zip(unique, counts))) inp1 = transforms.ToTensor()(inp1).unsqueeze(dim=0) unique, counts = np.unique(inp1, return_counts=True) print("Unique inp_1",dict(zip(unique, counts))) out1 = net(inp1) plt.subplot(2,7,8) out1 = out1.detach(); #https://stackoverflow.com/questions/41203137/how-do-you-reduce-the-dimension-of-a-numpy-array out1 = np.squeeze(out1, axis=0) print("out1 after squeeze", out1.shape) pred = torch.argmax(out1, dim=0) print("pred shape", pred.shape) unique, counts = np.unique(pred, return_counts=True) print("pred",dict(zip(unique, counts))) plt.imshow(pred.numpy()) plt.xlabel('Output') #plt.axis('off') plt.show() # https://www.analyticsvidhya.com/blog/2019/01/guide-pytorch-neural-networks-case-studies/ # https://www.analyticsvidhya.com/blog/2019/04/build-first-multi-label-image-classification-model-python/ # https://www.analyticsvidhya.com/blog/2019/10/building-image-classification-models-cnn-pytorch/ # https://towardsdatascience.com/convolution-neural-network-for-image-processing-using-keras-dc3429056306 # How to visualize segmentation output - multiclass feature map to rgb image? # https://discuss.pytorch.org/t/how-to-visualize-segmentation-output-multiclass-feature-map-to-rgb-image/26986/2 # colorize an output class labels of a segmentation in pytorch # https://github.com/dusty-nv/pytorch-segmentation/blob/master/datasets/deepscene_remap.py # https://towardsdatascience.com/semantic-hand-segmentation-using-pytorch-3e7a0a0386fa data_arr_im = np.loadtxt("test_image.npy",dtype=int) im_inp1 = data_arr_im.reshape(data_arr_im.shape[0], data_arr_im.shape[1] // 5, 5) data_arr_lab = np.loadtxt("test_label.npy",dtype=int) im_lab1 = data_arr_lab.reshape(data_arr_lab.shape[0], data_arr_lab.shape[1] // 5, 5) print("inp1.shape : ", im_inp1.shape) print("lab1.shape : ", im_lab1.shape) inp_1 = im_inp1[:,:,0] lab_1 = im_lab1[:,:,0] inp_2 = im_inp1[:,:,1] lab_2 = im_lab1[:,:,1] show_slices([inp_1, lab_1]) show_slices([inp_2, lab_2]) net = AnamNet() #data = io.loadmat('sampledata.mat') net.load_state_dict(torch.load('AnamNet_117_model.pth',map_location=torch.device('cpu'))) #cpu existing net.eval() #fig, ax = plt.subplots() #inp1 = data['im1'] inp1 = im_inp1[:,:,0] lab1 = im_lab1[:,:,0] print("1", inp1.shape) inp1 = np.reshape(inp1,(512,512,1)) # https://stackoverflow.com/questions/14476415/reshape-an-array-in-numpy/14476457#14476457 https://deeplizard.com/learn/video/fCVuiW9AFzY print("2",inp1.shape) unique, counts = np.unique(inp1, return_counts=True) print("Unique inp_1",dict(zip(unique, counts))) #inp1 = inp1.unsqueeze(dim=0) #print(inp1.shape) inp1 = inp1.astype(float) inp1 = transforms.ToTensor()(inp1).unsqueeze(dim=0) #https://stackoverflow.com/questions/57237352/what-does-unsqueeze-do-in-pytorch [Adds a new diamension at 0] print("3",inp1.type()) #inp1 = torch.long(inp1) unique, counts = np.unique(inp1, return_counts=True) print("Unique inp_1",dict(zip(unique, counts))) print("4",inp1.shape) out1 = net(inp1) #https://stackoverflow.com/questions/63383347/runtimeerror-expected-object-of-scalar-type-long-but-got-scalar-type-float-for #print(out1.shape) #lab_out = out1.numpy() #show_slices([inp1, lab1]) #show_slices([inp1, lab_out]) net = AnamNet() #data = io.loadmat('sampledata.mat') net.load_state_dict(torch.load('AnamNet_117_model.pth',map_location=torch.device('cpu'))) net.eval() fig, ax = plt.subplots() inp1 = data['im1'] plt.subplot(2,7,1) plt.imshow(inp1,cmap='gray', vmin=0, vmax=255) #plt.axis('off') plt.xlabel('CT-Scan') inp1 = np.reshape(inp1,(512,512,1)) inp1 = transforms.ToTensor()(inp1).unsqueeze(dim=0) out1 = net(inp1) plt.subplot(2,7,8) plt.imshow(out1.numpy()) plt.xlabel('Output') #plt.axis('off') #plt.show() inp2 = data['im2'] plt.subplot(2,7,3) plt.imshow(inp2,cmap='gray', vmin=0, vmax=255) plt.xlabel('CT-Scan') #plt.axis('off') plt.xlabel('CT-Scan') inp2 = np.reshape(inp2,(512,512,1)) inp2 = transforms.ToTensor()(inp2).unsqueeze(dim=0) out2 = net(inp2) plt.subplot(2,7,10) plt.imshow(out2.numpy()) plt.xlabel('Output') #plt.axis('off') #plt.show() inp3 = data['im3'] plt.subplot(2,7,5) plt.imshow(inp3,cmap='gray', vmin=0, vmax=255) #plt.axis('off') plt.xlabel('CT-Scan') inp3 = np.reshape(inp3,(512,512,1)) inp3 = transforms.ToTensor()(inp3).unsqueeze(dim=0) out3 = net(inp3) plt.subplot(2,7,12) plt.imshow(out3.numpy()) plt.xlabel('Output') #plt.axis('off') #plt.show() inp4 = data['im4'] plt.subplot(2,7,7) plt.imshow(inp4,cmap='gray', vmin=0, vmax=255) #plt.axis('off') plt.xlabel('CT-Scan') inp4 = np.reshape(inp4,(512,512,1)) inp4 = transforms.ToTensor()(inp4).unsqueeze(dim=0) out4 = net(inp4) plt.subplot(2,7,14) plt.imshow(out4.numpy()) plt.xlabel('Output') #plt.axis('off') plt.show()Markdown Autor: [](https://www.linkedin.com/in/daniel-ortiz-lópez/) ¿Qué es?Lenguaje de marcado que nos permite aplicar formato a nuestros textos mediante unos caracteres especiales. Muy útil cuando tenemos que documentar algo, escribir un artículo, o entregar un reporte. Este lenguaje está pensado para web, pero es muy común utilizarlo en cualquier tipo de texto, independientemente de su destino.Lo bueno que tiene es que se edita en **texto plano y está integrado en muchísimas herramientas**, como Jupyter Notebook o RStudio. Markdown vs HTMLTenemos un viejo conocido en cuanto a programación web: HTML. Son lenguajes muy diferentes. Con HTML podemos construir un complejo **árbol de tags**, mientras que markdown se desarrolla en texto plano. Por supuesto, las finalidades también son distintas. HTML se aplica a todo tipo de webs, ya sean sencillas o complejas, mientras que markdown se suele usar para blogs o artículos. Su sencillez a la hora de desarrollar le penaliza en su versatilidad. Pero como el objetivo de este curso no es hacer páginas web, markdown cumple más que de sobra para acompañar y mejorar la comprensión de nuestro código. Además, ya verás a lo largo de este notebook que ambos lenguajes son perfectamente compatibles. ¿Cómo funciona?Contiene una serie de **caracteres especiales** que le dan forma a los textos. Por ejemplo, si queremos un texto en *cursiva*, simplemente lo rodearemos con asteriscos. Lo veremos en detalle en este Notebook. ¿De qué nos va a servir?En Jupyter lo normal será crear celdas con código, pero también tenemos la posibilidad de insertar celdas de markdown, donde podremos poner **imágenes, títulos, enumerar texto, listar, citar y mucho más!** 1. Primera celdaHaz doble clik en esta celda y verás cómo cambia el texto. Significa que estás en el **modo edición** de Markdown.Como puedes observar, markdown se edita como si fuese texto plano, y en el caso concreto de los párrafos, no necesita de ningún caracter para que markdown sepa que es un párrafo. Sin embargo, fíjate que para la cabecera "1.Primer celda", hay dos hashtags delante que indican que es un encabezado. Veremos en el apartado 2 cómo crear cabeceras.Haz ctrl + enter para ejecuta la celda (o botón de play de arriba). Así abandonamos el modo edición y nuestro texto obtiene el formato que deseábamos.**¡Tu turno!** Crea una celda nueva en el menu de arriba y selecciona la opción Markdown ![imagen](img/primer_celda.png) *Cabecera* Cabecera# Esto es código de Python. # Va a ser muy habitual en el curso, acompañar el código de Python mediante celdas de markdown.**TIP**: cuando estemos escribiendo markdown, un buen indicador de que lo estamos haciendo bien es que **la letra cambia de color o de forma**. Significa que markdown ha interpretado los simbolos que has puesto. Si estamos escribiendo en cursiva, verás que la letra cambia a cursiva si lo estas haciendo bien. Por supuesto, también podemos ejecutar y ver el resultado, pero si queremos comprobar que la sentencia que escribimos es correcta, tendrás esa opción en Jupyter. 2. CabecerasYa has visto que en el apartado anterior usábamos dos hashtag para poner una cabecera. ¿Por qué dos? Cuantos más hashtags, menor es el tamaño del título. El tamaño mínimo lo obtenemos con 6 hashtags. Es decir, tenemos hasta 6 niveles de profundidad para aplicar a los apartados de nuestro notebook. Normalmente con 3 o 4 hay más que de sobra, pero también depende del tamaño que queramos darle a las cabeceras. 3. HTMLComo te comentaba al principio, una cosa es utilizar markdown y otra HTML. No obstante, markdown nos ofrece la posibilidad de escribir código HTML, dentro de una celda markdown. Si te manejas bien con HTML y quieres insertar una porción de código de este lenguaje, markdown lo va a interpretar. 4. Negrita, cursivaPaara resaltar texto en negrita tenemos que rodearlo con asteriscos. En el caso en que queramos cursiva, será un único asterisco, y si deseamos combinar negrita con cursiva, son 3 asteriscos. *cursiva***negrita*****negrita y cursiva*** Cuidado con dejar espacios entre los asteriscos y el texto. Es decir, si queremos escribir en negrita, inmediatamente despues de los asteriscos tiene que ir el texto: ** No es negrita ** 5. CitarEn ocasiones resulta útil poner una citación, o una nota, destacándola con un margen. Esto lo podemos hacer mediante el símbolo mayor que ">" esto no es una cita>esto si es una cita>>Esto es una cita con mayor tabulacion 6. ListasHay dos opciones. **Listas ordenadas o sin ordenar**. Si queremos listas ordenadas, simplemente usamos números 1. elemento 12. elemento 2 Lista no ordenada * Elemento 1- Elemento 2+ Elemento 3 7. Código de PythonEs otra manera de enseñar código. Se suele usar cuando lo único que quieres es mostrar un fragmento de código, pero sin ejecutarlo ```Pythonstring = "Esto es codigo Python"`palabra```` 8. Líneas de separaciónPara separar secciones utilizamos líneas horizontales. Hay varias opciones en markdown para insertar una lína horizontal. En este ejemplo se usa o asteriscos o guiones. ***--- 9. Links y enlacesPara crear enlaces externos, a páginas web, se usa la sintaxis `[ enlace ] (web)`[enlace en línea](http://www.google.es)Tambien podemos definir [un enlace][mi_web].A una [web][mi_web] a la que podemos referenciar mas adelante[mi_web]: http://www.google.esPor otro lado, podemos definir links que vayan a otras partes del Notebook, como por ejemplo a una cabecera concreta. Si haces clik en [este enlace](Markdown), volverás al inicio del notebook.Con [este otro enlace](1.-Primera-celda) vas al primer apartado.¿Cómo linkarlos? Copiamos el nombre de la cabecera, sustituimos espacios por guiones, le añadimos en hashtag al principio, y eso es lo que va dentro de los paréntesis.[link de prueba](https://www.google.com/)Load the Datasetdf=pd.read_table("SMSSpamCollection",header=None,encoding='utf-8') df.head() df.info() df.value_counts() classes=df[0] classes.value_counts()Preprocessing the Datafrom sklearn.preprocessing import LabelEncoder encoder=LabelEncoder() y=encoder.fit_transform(classes) print(classes[:10]) print(y[:10]) text_messages = df[1] text_messages[:10] import re #replace email processed=text_messages.str.replace(r'^.+@[^\.].*\.[a-z]{2,}$','emailaddr') #replace url processed=processed.str.replace(r'^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$','webaddr') #replace moneysym processed=processed.str.replace(r'£|\$','moneysymbol') #replace phonenumber processed=processed.str.replace(r'^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$','phonenumber') #replace normal number processed=processed.str.replace(r'\d+(\.\d+)?','number') #remove punctuation processed=processed.str.replace(r'[^\w\d\s]',' ') #removce whitespace processed=processed.str.replace(r'\s+',' ') #remove leading and trailing whitespace processed=processed.str.replace(r'^\s+|\s+?$',' ') #change word to lower case processed=processed.str.lower() processed #remove stopword fromtext message from nltk.corpus import stopwords stop_words=set(stopwords.words('english')) processed=processed.apply(lambda x : ' '.join(term for term in x.split() if term not in stop_words)) #remove word stems using Porter stemmer ps=nltk.PorterStemmer() processed=processed.apply(lambda x : ' '.join(ps.stem(term) for term in x.split())) processed[:10] from nltk.tokenize import word_tokenize #creating bag of words all_words=[] for message in processed: words=word_tokenize(message) for w in words: all_words.append(w) all_words=nltk.FreqDist(all_words) print(f'Number of words',len(all_words)) print(f'Most Common Words',all_words.most_common(15)) #use the most common words as features word_features=list(all_words.keys())[:1500] #define a find_features functionj def find_features(message): words=word_tokenize(message) features={} for word in word_features: features[word]=(word in words) return features features=find_features(processed[0]) for key,value in features.items(): if value==True: print (key) processed[0] message=list(zip(processed,y)) seed=1 np.random.seed=seed np.random.shuffle(message) featuresets=[(find_features(text),label) for (text,label) in message] from sklearn import model_selection training,testing=model_selection.train_test_split(featuresets,test_size=0.25,random_state=seed) print(f'training data length:',len(training)) print(f'testing data length:',len(testing))training data length: 4179 testing data length: 1393SKLearn classifiers with NLTKfrom sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression,SGDClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC from sklearn.metrics import classification_report,accuracy_score,confusion_matrix names=['K Nearest Neighbors','Decision Tree','Random Forest','Logistic Regression','SGD Classifier','Naive Bayes','SVM Linear'] classifier=[KNeighborsClassifier(), DecisionTreeClassifier(), RandomForestClassifier(), LogisticRegression(), SGDClassifier(max_iter=100), MultinomialNB(), SVC(kernel='linear')] models=zip(names,classifier) print(models) from nltk.classify.scikitlearn import SklearnClassifier for name,model in models: nltk_model=SklearnClassifier(model) nltk_model.train(training) accuracy=nltk.classify.accuracy(nltk_model,testing)*100 print('{}: Accuracy {}'.format(name,accuracy)) #voting classifier from sklearn.ensemble import VotingClassifier names=['K Nearest Neighbors','Decision Tree','Random Forest','Logistic Regression','SGD Classifier','Naive Bayes','SVM Linear'] classifier=[KNeighborsClassifier(),DecisionTreeClassifier(),RandomForestClassifier(),LogisticRegression(),SGDClassifier(max_iter=100),MultinomialNB(),SVC(kernel='linear')] models=zip(names,classifier) nltk_ensemble=SklearnClassifier(VotingClassifier(estimators=list(models),voting='hard',n_jobs=-1)) nltk_ensemble.train(training) accuracy=nltk.classify.accuracy(nltk_ensemble, testing)*100 print('Ensemble Method Accuracy : {} '.format(accuracy)) txt_features,labels=zip(*testing) prediction=nltk_ensemble.classify_many(txt_features) #print a classfication report and confusion matrix print(classification_report(labels,prediction)) pd.DataFrame(confusion_matrix(labels,prediction), index=[['actual','actual'],['ham','spam']], columns=[['predicted','predicted'],['ham','spam']])precision recall f1-score support 0 0.98 1.00 0.99 1210 1 0.99 0.89 0.94 183 accuracy 0.98 1393 macro avg 0.99 0.94 0.96 1393 weighted avg 0.98 0.98 0.98 1393Three Little CirclesThe "Hello World" (or [Maxwell's Equations](http://www.michaelnielsen.org/ddi/lisp-as-the-maxwells-equations-of-software/)) of d3, [Three Little Circles](http://bost.ocks.org/mike/circles/) introduces all of the main concepts in d3, which gives you a pretty good grounding in data visualization, JavaScript, and SVG. Let's try out some circles in livecoder.First, we need `Livecoder`, and `traitlets`, the Observer/Observable pattern used in building widgets.from livecoder.widgets import Livecoder from IPython.utils import traitlets as T:0: FutureWarning: IPython widgets are experimental and may change in the future.`Livecoder` by itself doesn't do much. Let's add a traitlet for where we want to draw the circles (the `cx` attribute).class ThreeCircles(Livecoder): x = T.Tuple([1, 2, 3], sync=True)Notice the `sync` argument: this tells IPython that it should propagate changes to the front-end. No REST for the wicked?circles = ThreeCircles(description="three-circles") circles.descriptionAlmost there! To view our widget, we need to `display` it, which is the default behavior by just having the widget be the last line of a code cell.circlesLearning Curves para Machine Learning 01 - O trade-off viés-variânciaAntes de cair de cabeça nós estudos sobre **Learning Curves (ou Curvas de Aprendizado)** nós precisamos esclarecer algumas coisas primeiro. Principalmente, **O trade-off viés-variância**.Inicialmente, o que vocês tem que ter em mente é que:> A medida que aumenta a *complexidade* de um modelo o **vies/bias** e **variância** vão se distanciando inversamente.Para ficar mais claro vamos analisar a imagem (gráfico) abaixo:![img](images/overfitting-underfitting-03.png) - **Quanto MAIOR A COMPLEXIDADE do modelo (eixo-x), ou seja, mais parâmetros (features) são adicionados:** - Maior é o Erro da Variância; - Menor é o Erro do Viés/Bias. - **Quanto MENOR A COMPLEXIDADE do modelo (eixo-x), ou seja, menos parâmetros (features) são adicionados:** - Menor é o Erro de Variância; - Maior é o Erro de Viés/Bias. - **O erro total (Total Error) independente da Complexidade do modelo vai iniciar e terminar lá em cima, porém, ele não é constante**: - Vejam que ele começa lá em cima > vai caindo > sobe novamente - Por isso, não é constante - **O melhor a se fazer é encontrarmos o equilíbrio entre os dois erros e o erro total:** - A linha tracejada demonstra o equilíbrio (Optimum Model Complexity) entre os dois erros e o erro total e é lá onde queremos chegar.**NOTE:** Olhando para a imagem e as observações acima fica claro que existe um **trade-off** entre o **viés/bias** e a **variância** e nós queremos chegar no valor mínimo possível dos dois erros simultaneamente. --- 02 - Introdução a Learning Curves (ou Curvas de Aprendizado)Para entender como funcionam as **Learning Curves (ou Curvas de Aprendizado)** vamos seguir com a seguinte analogia: - **Digamos que temos alguns dados e os dividimos em um conjunto de treinamento e um conjunto de validação:** - Pegamos uma única instância (isso mesmo, uma!) do conjunto de treinamento e a usamos para estimar um modelo; - Em seguida, medimos o erro do modelo no conjunto de validação em relação essa única instância de treinamento.Algo parecido com isso:![img](images/lc-01.png) - **Erro no conjunto de treinamento:** - O erro na instância de treinamento será 0 (zero), pois é muito fácil ajustar perfeitamente um único ponto de dados. - **Erro no conjunto de validação:** - Porém, o erro no conjunto de validação, no entanto, foi muito grande; - Isso ocorre porque o modelo é construído (treinado) em torno de uma única instância e quase certamente não será capaz de generalizar com precisão em dados que não foram vistos antes.Agora digamos que em vez de uma instância de treinamento, pegamos 10 (dez) e repetimos as medidas de erro, algo parecido com isso:![img](images/lc-02.png) - **Erro no conjunto de treinamento:** - Agora temos uma situação onde os dados não se ajustam perfeitamente como antes. - **Erro no conjunto de validação:** - Porém, o erro nos dados de validação ainda é muito grande, ou seja, o conjunto de treinamento não aprendeu o suficiente para generalizar em dados que nunca viu antes.Ok, agora vamos aumentar o número de instâncias de treinamento para 80, algo parecido com isso:![img](images/lc-03.png) - **Erro no conjunto de treinamento:** - Vejam que quanto mais dados nós passamos para o modelo aprender mais aumenta o erro do conjunto de treinamento. - **Erro no conjunto de validação:** - Porém, o erro nos dados de validação diminuem.**Então, e se pegamos cem, quinhentos, mil, até usarmos todo o nosso conjunto de treinamento?** As pontuações de erro variam mais ou menos à medida que alteramos o conjunto de treinamento. Assim, temos duas pontuações de erro para monitorar: - Uma para o conjunto de treinamento; - E outra para os conjuntos de validação.**NOTE:** Se traçarmos a evolução das duas pontuações de erro à medida que os conjuntos de treinamento mudam, acabamos com duas curvas. Estas curvas são chamadas de **Learning Curves (ou Curvas de Aprendizado)**.> Em poucas palavras, uma **Learning Curves (ou Curvas de Aprendizado)** mostra como o erro muda à medida que o tamanho do conjunto de treinamento aumenta.Se tivéssemos plotado **Learning Curves (ou Curvas de Aprendizado)** para os conjuntos de treinamento e validação dos exemplos acima nós teríamos seguintes gráficos:![img](images/learning_curves.webp) - **Erro no conjunto de treinamento:** - A medida que aumenta as instâncias do conjunto de treinamento, aumenta o erro do mesmo. - **Erro no conjunto de validação:** - Porém, a medida que aumenta as instâncias do conjunto de treinamento diminui o erro do conjunto de validação.**NOTE:** As **Learning Curves (ou Curvas de Aprendizado)** nos dão a oportunidade de diagnosticar **variância** e **viés/bias** em modelos de *aprendizado supervisionado*. --- 03 - Learning Curves na Prática com Scikit-Learn 03.1 - Escolhendo e entendendo o ProblemaAs **Learning Curves (ou Curvas de Aprendizado)** traçadas acima são idealizadas para fins de ensino. Na prática, no entanto, elas geralmente parecem significativamente diferentes. Então, vamos levar a discussão para um cenário prático usando alguns dados do mundo real.Para entender na prática como funcionam às **Learning Curves (ou Curvas de Aprendizado)** tentaremos construir:> **Modelo de regressão que prevejam a produção horária de energia elétrica de uma usina.****NOTE:** Vamos utilizar o conjunto de dados [Combined Cycle Power Plant Data Set](https://archive.ics.uci.edu/ml/datasets/Combined+Cycle+Power+Plant) vêm dos pesquisadores turcos **** e ****.**Resumo do problema:** O conjunto de dados contém *9.568 pontos de dados (amostras)* coletados de uma Usina de Ciclo Combinado ao longo de 6 anos (2006-2011), quando a usina foi configurada para trabalhar com carga total.**Informações do atributo:** - Os recursos consistem em variáveis ​​ambientais médias por hora - Temperatura (T) na faixa de 1,81°C e 37,11°C, - Pressão ambiente (AP) na faixa de 992,89-1033,30 milibar, - Umidade Relativa (RH) na faixa de 25,56% a 100,16 % - Vácuo de Exaustão (V) na faixa 25,36-81,56 cm Hg - Produção de energia elétrica horária líquida (EP) 420,26-495,76 MWAgora que já entendemos o básico sobre o problema, vamos pegar o conjunto de dados com a função **read_excel()** da biblioteca Pandas:import pandas as pd electricity = pd.read_excel('data/Folds5x2_pp.xlsx') electricity.info() electricity.head() RangeIndex: 9568 entries, 0 to 9567 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 AT 9568 non-null float64 1 V 9568 non-null float64 2 AP 9568 non-null float64 3 RH 9568 non-null float64 4 PE 9568 non-null float64 dtypes: float64(5) memory usage: 373.9 KB**Observações do problema:** - A coluna (feature) **PE** acima é a variável de destino (target) e descreve a saída horária líquida de energia elétrica. - Todas as outras variáveis são características potenciais, e os valores para cada uma são, na verdade, médias horárias (não valores líquidos, como para **PE**). - A eletricidade é gerada por: - turbinas a gás; - turbinas a vapor; - E geradores de vapor de recuperação de calor. - De acordo com a documentação do conjunto de dados: - O nível de vácuo tem efeito nas turbinas a vapor; - Enquanto as outras três variáveis afetam as turbinas a gás.Consequentemente, usaremos todas as colunas (features) de recursos em nossos modelos de regressão. Nesta etapa, normalmente deixamos de lado um conjunto de teste, exploramos os dados de treinamento minuciosamente, removemos quaisquer discrepâncias, medimos correlações etc.**NOTE:** Para fins de ensino, no entanto, vamos supor que isso já foi feito e pular direto para gerar algumas curvas de aprendizado. Antes de começarmos, vale a pena notar que não há valores ausentes. Além disso, os números não são dimensionados, mas evitaremos o uso de modelos que tenham problemas com dados não dimensionados. --- 03.2 - Decidindo os tamanhos dos conjuntos de treinamentoVamos primeiro decidir quais tamanhos dos conjuntos de treinamento queremos usar para gerar as curvas de aprendizado: - O valor mínimo é 1; - O máximo é dado pelo número de instâncias no conjunto de treinamento. - Nosso conjunto de treinamento possui 9.568 instâncias, portanto, o valor máximo é 9.568. - No entanto, ainda não deixamos de lado um conjunto de validação. Faremos isso usando uma proporção de **80:20**: - Terminando com um conjunto de treinamento de 7.654 instâncias (80%); - E um conjunto de validação de 1.914 instâncias (20%). - Dado que nosso conjunto de treinamento terá 7654 instâncias, o valor máximo que podemos usar para gerar nossas curvas de aprendizado é 7654.Para o nosso exemplo usaremos os seguintes 6 tamanhos:train_sizes = [1, 100, 500, 2000, 5000, 7654]**Algumas observações aqui são:** - Uma coisa importante a se ter em mente é que para cada tamanho especificado um novo modelo é treinado. - Para economizar tempo de execução do código, é uma boa prática limitar-se a **5** a **10 tamanhos de treinamento**. --- 03.2 - Utilizando a função learning_curve() da biblioteca scikit-learnÓtimo, agora que já definimos os tamanhos para os conjuntos de treinamento vamos aprender sobre a função **learning_curve()** da biblioteca *scikit-learn*, responsável por gerar **Learning Curves (ou Curvas de Aprendizado)**.**NOTE:** Um ponto importante aqui é que não há necessidade da nossa parte deixar de lado um *conjunto de validação*, porque a função **learning_curve()** cuidará disso.Primeiro, vamos deixar o código abaixo que faz toda essa bruxaria e depois vamos para as explicações:from sklearn.linear_model import LinearRegression from sklearn.model_selection import learning_curve features = ['AT', 'V', 'AP', 'RH'] # Features. target = 'PE' # Target variable. train_sizes, train_scores, validation_scores = learning_curve( estimator = LinearRegression(), X = electricity[features], y = electricity[target], train_sizes = train_sizes, cv = 5, scoring = 'neg_mean_squared_error' )**O que aconteceu no código acima foi o seguinte:** - **Importamos as classes necessárias:** - LinearRegression() - learning_curve() - **Criamos variáveis que vão armazenar as variáveis independentes e dependente:** - Independentes = features = ['AT', 'V', 'AP', 'RH'] - Dependente = target = 'PE' - **Criamos uma instância da classe *learning_curve* que:** - **Recebeu como argumento:** - Estimator = Objeto (modelo) que ele vai utilizar para fazer previsões: - Esse objeto tem que ser capaz de executar os métodos “fit” e “predict”. - Variáveis independentes (X); - Variável dependente (y ou target); - train_sizes = O tamanho dos conjuntos de treinamento; - cv = Cross-Validation - Scoring = Métrica de Avaliação - **Retornou:** - train_sizes: - Números de exemplos de treinamento que foram usados para gerar a *curva de aprendizado*. - Observe que o número de ticks pode ser menor que n_ticks porque as entradas duplicadas serão removidas. - *Na verdade esse retorno vai ser a mesma lista que nós passamos como entrada: train_sizes = [1, 100, 500, 2000, 5000, 7654]* - train_scores: - Scores DOS conjuntoS de treinamento. - validation_scores: - Scores DO conjunto de teste (ou validação). --- 03.3 - Analisando as saídas da função learning_curve()Como nós já sabemos o que está dentro da variável **train_sizes (1, 100, 500, 2000, 5000, 7654)** vamos apenas inspecionar as outras duas variáveis para ver o que **learning_curve()** nos retornou. Que na verdade, que são: - **train_scores::** - Os resultados para os erros de treinamento. - **validation_scores:** - Os resultados para os erros de validação. Vamos começar com os erros de treinamento:type(train_scores)Opa, vejam que nós temos Arrays Numpy como saída... vamos ver as dimenões?train_scores.shape**O que significa essa matriz 6x5?** - **Linhas (6):** - Representam os erros para os 6 conjuntos de treinamentos que nós separamos. - **Colunas (5):** - Como nós estamos utilizando Cross-Validation=5, significa que para cada conjunto de treinamento vamos ter 5 splits de 5 folds cada. **NOTE:** Essa mesma abordagem de análise nós vamos fazer agora, porém, para os dados de validação:type(train_scores) train_scores.shape**Por que nós temos as mesmas dimensões para os conjuntos de validação?** Para cada conjunto de treinamento (e suas divisões) nós temos seus respectivos erros de conjunto de validação. Por isso, as mesmas dimensões.**NOTE:** Por fim, vamos exibir esses erros para os **conjuntos de treinamento** e **validação**:print('Training scores:\n\n', train_scores) print('\n', '-' * 70) # Estamos multiplicando o caractere '-' por 70, ou seja, uma linha tracejada. print('\nValidation scores:\n\n', validation_scores)Training scores: [[ -0. -0. -0. -0. -0. ] [-19.71230701 -18.31492642 -18.31492642 -18.31492642 -18.31492642] [-18.14420459 -19.63885072 -19.63885072 -19.63885072 -19.63885072] [-21.53603444 -20.18568787 -19.98317419 -19.98317419 -19.98317419] [-20.47708899 -19.93364211 -20.56091569 -20.4150839 -20.4150839 ] [-20.98565335 -20.63006094 -21.04384703 -20.63526811 -20.52955609]] ---------------------------------------------------------------------- Validation scores: [[-619.30514723 -379.81090366 -374.4107861 -370.03037109 -373.30597982] [ -21.80224219 -23.01103419 -20.81350389 -22.88459236 -23.44955492] [ -19.96005238 -21.2771561 -19.75136596 -21.4325615 -21.89067652] [ -19.92863783 -21.35440062 -19.62974239 -21.38631648 -21.811031 ] [ -19.88806264 -21.3183303 -19.68228562 -21.35019525 -21.75949097] [ -19.9046791 -21.33448781 -19.67831137 -21.31935146 -21.73778949]]**NOTE:** Como nós especificamos 6 tamanhos de conjuntos de treinamento (1, 100, 500, 2000, 5000, 7654), você talvez estaja esperando 6 valores para cada tipo de pontuação (score). Entretando, como já foi analisado acima, na hora da instanciação da classe **learning_curve()** nós passamos o parâmetro **cv=5**, ou seja, estamos utilizando uma validação-cruzada (K-Fold) igual a 5. Por isso, temos matrizes **6x5**, onde:Vamos pegar apenas **train_scores** e abstrair para a seguinte tabela:| Tamanho do conjunto de treinamento (índice) | Divisão1 | Divisão2 | Divisão3 | Divisão 4 | Divisão 5 ||---------------------------------------------|--------------|--------------|--------------|--------------|--------------|| 1 | 0 | 0 | 0 | 0 | 0 || 100 | -19.71230701 | -18.31492642 | -18.31492642 | -18.31492642 | -18.31492642 || 500 | -18.14420459 | -19.63885072 | -19.63885072 | -19.63885072 | -19.63885072 || 2000 | -21.53603444 | -20.18568787 | -19.98317419 | -19.98317419 | -19.98317419 || 5000 | -20.47708899 | -19.93364211 | -20.56091569 | -20.4150839 | -20.4150839 || 7654 | -20.98565335 | -20.63006094 | -21.04384703 | -20.63526811 | -20.52955609 | **NOTE:** Uma observação aqui é que para plotar as **curvas de aprendizado**, precisamos apenas de uma única pontuação (score) de erro por tamanho de conjunto de treinamento e não 5. Por esse motivo, vamos pegar o valor médio de cada linha e também inverter os sinais das pontuações de erro, ou seja, vamos deixar os scores positivos (menos o primeiro que foi 0 erro).train_scores_mean = -train_scores.mean(axis = 1) validation_scores_mean = -validation_scores.mean(axis = 1) print('Mean training scores:\n', pd.Series(train_scores_mean, index = train_sizes)) print('\n', '-' * 20) # Separator print('\nMean validation scores:\n',pd.Series(validation_scores_mean, index = train_sizes))Mean training scores: 1 -0.000000 100 18.594403 500 19.339921 2000 20.334249 5000 20.360363 7654 20.764877 dtype: float64 -------------------- Mean validation scores: 1 423.372638 100 22.392186 500 20.862362 2000 20.822026 5000 20.799673 7654 20.794924 dtype: float64**NOTE:** Ótimo, agora nós temos as médias dos erros para os 6 conjuntos de treinamento e os respectivos erros de validação. Finalmente, temos todos os dados que precisamos para traçar as **Learning Curves (ou Curvas de Aprendizado)**. Porém, antes de fazer a plotagem, precisamos parar e fazer algumas observações importantes...Primeiro, você deve ter notado que algumas pontuações (scores) de erro nos conjuntos de treinamento são as mesmas:| Tamanho do conjunto de treinamento (índice) | Divisão1 | Divisão2 | Divisão3 | Divisão 4 | Divisão 5 ||---------------------------------------------|--------------|--------------|--------------|--------------|--------------|| 1 | **0** | **0** | **0** | **0** | **0** || 100 | -19.71230701 | **-18.31492642** | **-18.31492642** | **-18.31492642** | **-18.31492642** || 500 | -18.14420459 | **-19.63885072** | **-19.63885072** | **-19.63885072** | **-19.63885072** || 2000 | -21.53603444 | -20.18568787 | **-19.98317419** | **-19.98317419** | **-19.98317419** || 5000 | -20.47708899 | -19.93364211 | -20.56091569 | **-20.4150839** | **-20.4150839** || 7654 | -20.98565335 | -20.63006094 | -21.04384703 | -20.63526811 | -20.52955609 |Vejam que elas ou são as mesmas ou se aproximam muito, para a linha correspondente ao tamanho do conjunto de treinamento de 1, isso é esperado, mas e as outras linhas?Com exceção da última linha, temos muitos valores idênticos. Por exemplo, pegue a segunda linha onde temos valores idênticos da segunda divisão em diante. Por que, isso?> **Isso é causado por não randomizar os dados de treinamento para cada divisão.**Para entender melhor veja o diagrama (imagem) abaixo:![img](images/splits.png) Quando o tamanho de treinamento é 500, **as primeiras 500 instâncias no conjunto de treinamento são selecionadas**, porém, como a primeira parte foi reservada para os dados de validação as 500 instâncias serão retiradas da segunda parte:![img](images/splits-01.png) **NOTE:** Uma observação aqui é que a partir da segunda divisão (splits), essas 500 instâncias serão retiradas da primeira parte:![img](images/splits-02.png) Como não randomizamos o conjunto de treinamento, as 500 instâncias usadas para treinamento são as mesmas para a segunda divisão (splits) em diante.**NOTE:** Isso explica os valores idênticos da segunda divisão em diante para o caso de 500 instâncias de treinamento.| Tamanho do conjunto de treinamento (índice) | Divisão1 | Divisão2 | Divisão3 | Divisão 4 | Divisão 5 ||---------------------------------------------|--------------|--------------|--------------|--------------|--------------|| 500 | -18.14420459 | **-19.63885072** | **-19.63885072** | **-19.63885072** | **-19.63885072** |**NOTE:** Um raciocínio idêntico se aplica ao caso de 100 instâncias, e um raciocínio semelhante se aplica aos outros casos.![img](images/splits-02.png) | Tamanho do conjunto de treinamento (índice) | Divisão1 | Divisão2 | Divisão3 | Divisão 4 | Divisão 5 ||---------------------------------------------|--------------|--------------|--------------|--------------|--------------|| 100 | -19.71230701 | **-18.31492642** | **-18.31492642** | **-18.31492642** | **-18.31492642** |**NOTE:** Para interromper esse comportamento, precisamos definir o parâmetro **shuffle=True** na função **learning_curve()**. Isso irá randomizar os índices para os dados de treinamento para cada divisão.**Nós não aplicamos essa randomização acima antes por dois motivos:** - Os dados que nós estamos trabalhando já vêm pré-embaralhados cinco vezes (como mencionado na [documentação](https://archive.ics.uci.edu/ml/datasets/Combined+Cycle+Power+Plant)), então não há necessidade de randomizar mais; - Eu queria deixá-lo ciente sobre essa peculiaridade caso você se depare com esse problema na prática. --- 03.4 - Plotando e analisando as Learning Curves (ou Curvas de Aprendizado)Nessa parte a primeira coisa que nós vamos fazer é criar um gráfico (plot) com as **Learning Curves (ou Curvas de Aprendizado)**, que vão representar: - **Os erros dos conjuntos de treinamentos;** - **Os referentes erros para os conjuntos de validação:** - Lembrando, que mesmo que o conjundo de validação tenha sempre o mesmo tamanho, ele vai ser comparado com conjuntos de treinamentos diferentes, por isso, não vai ser sempre o mesmo.Ok, agora vamos ver isso na prática:import matplotlib.pyplot as plt plt.figure(figsize=(10, 7)) plt.style.use('seaborn') plt.plot(train_sizes, train_scores_mean, marker='o', label = 'Training error') plt.plot(train_sizes, validation_scores_mean, marker='o', label = 'Validation error') plt.ylabel('MSE', fontsize = 14) plt.xlabel('Training set size', fontsize = 14) plt.title('Learning curves for a linear regression model', fontsize = 18, y = 1.03) plt.legend() plt.ylim(0, 40) plt.savefig('images/learning-curve-01.png', format='png') plt.show()**NOTE:** Há muitas informações que podemos extrair do gráfico acima. Por exemplo: - **Quando o tamanho do conjunto de treinamento é 1, podemos ver que o *MSE* para o conjunto de treinamento é 0 (zero):** - Este é um comportamento normal, pois o modelo não tem problemas para ajustar perfeitamente um único ponto de dados. Portanto, quando testado no mesmo ponto de dados, a previsão é perfeita. - **Mas quando testado no conjunto de validação (que tem 1914 instâncias), o *MSE* dispara até aproximadamente 423,4:** - Esse valor relativamente alto é a razão pela qual restringimos o intervalo do eixo y entre 0 e 40. Isso nos permite ler a maioria dos valores de MSE com precisão. - Um valor tão alto é esperado, pois é extremamente improvável que um modelo treinado em um único ponto de dados possa generalizar com precisão para 1.914 novas instâncias que não foram vistas no treinamento. - **Quando o tamanho do conjunto de treinamento aumenta para 100, o *MSE* de treinamento aumenta acentuadamente:** - O modelo de regressão linear não prevê todos os 100 pontos de treinamento perfeitamente, então o MSE de treinamento é maior que 0. - No entanto, o modelo tem um desempenho muito melhor agora no conjunto de validação porque é estimado com mais dados.**Agora vem a observação mais importante de todas:** A partir de 500 pontos (instâncias) de dados de treinamento, o *MSE* de validação permanece praticamente o mesmo. Isso nos diz algo extremamente importante: - **Adicionar mais pontos (instâncias) de dados de treinamento não levará a modelos significativamente melhores. Então, em vez de perder tempo (e possivelmente dinheiro) coletando mais dados, precisamos tentar outra abordagem, como:** - Mudar para um algoritmo que possa construir modelos mais complexos; - Preprocessar os recursos (features) existentes; - Ou adicionar novos recursos (features).Vamos comparar a nosssa análise com a imagem (gráfico) abaixo:![img](images/add_data.webp) - **No primeiro gráfico (imagem) nós temos a seguinte situação:** - As duas curvas já convergiram; - Adicionar mais instâncias de treinamento provavelmente não ajudará diminuir o erro do conjunto de validação; - Provavelmente será melhor preprocessar ou adicionar novos recursos (features) do que adicionar novas instâncias. - **No segundo gráfico (imagem) nós temos a seguinte situação:** - A curva de validação pode convergir para a curva de treinamento se mais instâncias de treinamento forem adicionadas**NOTE:** Viram que tanto no nosso gráfico como nesse último exemplo (imagem) nós estamos trabalhando no **trade-off viés-variância** até chegar nos valores mínimos possíveis simultaneamente (provavelmente quando as curvas convergem). --- 03.5 - Analisando o erro do conjunto de validação (viés/bias)Para evitar um equívoco aqui, é importante notar que o que realmente não vai ajudar a diminuir o erro no conjunto de validação é adicionar mais instâncias (linhas) aos dados de treinamento. Então, qual seria uma solução para diminuir os erros no conjunto de validação?> **Adicionar mais recursos (features), no entanto, é uma abordagem diferente e provavelmente ajudará porque aumentará a complexidade do nosso modelo atual. E como nós sabemos quando a complexidade de um modelo aumenta a variância também aumenta e o viés/bias diminui. Ou seja, estamos diminuindo os erros do conjunto de validação.****NOTE:** Como nós sabemos o principal indicador de um problema de **viés/bias** é um *alto erro de validação*. No nosso caso, o **MSE** de validação estagna em um valor de aproximadamente **20**.> Mas isso é bom ou ruim?**NOTE:** Nós nos beneficiaríamos de algum conhecimento do domínio (talvez física ou engenharia neste caso) do problema para responder essa pergunta. Mas imagine que nós pagamos uma consultoria técnica que nós deu a seguinte conclusão:> Que um *MSE* de *20 MW* é bem grande para esse tipo de problema. Portanto, nosso modelo tem um problema de viés.Mas é um **problema de baixo viés** ou um **problema de alto viés**? Para encontrar a resposta, precisamos olhar para o erro de treinamento:** - **Se o erro de treinamento for muito baixo, significa que os dados de treinamento estão muito bem ajustados pelo modelo estimado:** - Se o modelo se ajustar muito bem aos dados de treinamento, significa que ele tem baixo viés em relação a esse conjunto de dados. - **Se o erro de treinamento for alto, significa que os dados de treinamento não estão bem ajustados pelo modelo estimado:** - Se o modelo não se ajustar bem aos dados de treinamento, isso significa que ele possui um alto viés em relação a esse conjunto de dados.Para ficar mais claro, vamos analisar a imagem abaixo:![img](images/low_high_bias.png) - **No caso de alto viés/bias (High Bias):** - Quando nós temos um alto erro no conjunto de treinamento indica que teremos um alto erro viés/bias; - Esse erro de viés/bias está relacionado com o erro de validação (teste). - **No caso de baixo viés/bias (Low Bias):** - Quando nós temos um baixo erro no conjunto de treinamento indica que teremos um baixo erro de viés/bias; - Esse baixo erro de viés/bias também está relacionado com o erro de validação.**NOTE:** Resumindo, agora nós sabemos que o nosso modelo tem um problema **alto viés/bias**.> Mas, como resolver esse problema de **Alto viés/bias**? --- 03.6 - Analisando o erro do conjunto de treinamento (variação/variância)Bem, como nós diagnosticamos no tópico anterior temos um **problema de alto viés/bias** no nosso modelo. Agora vamos tentar um **trade-off** de **viés-variância** para ver se conseguimos resolver ou minimizar esse problema.> Similar ao conjunto de validação, nosso conjunto de treinamento também tem um **MSE** de aproximadamente **20 MW**. Como já estabelecemos, essa é uma pontuação de (score) **erro alta**.Agora vamos avançar com o diagnóstico de eventuais problemas de **variação**. A estimativa da **variância** pode ser feita de pelo menos duas maneiras: - **Examinando a lacuna (gap) entre a curva de aprendizado de treinamento e a curva de aprendizado de validação.** - **Examinando o erro de treinamento:** - Seu valor e sua evolução à medida que o tamanho do conjunto de treinamento aumenta.Vamos começar analisando a lacuna (gap) entre a curva de aprendizado de treinamento e a curva de aprendizado de validação para o nosso problema:![img](images/lc_regression.webp) **Vejam que nós temos uma lacuna (gap) estreita:** - Geralmente, quanto mais estreita a lacuna (gap), menor a variância; - O oposto também é verdadeiro: quanto maior a diferença da lacuna (gap), maior a variância.**Vamos agora explicar por que esse é o caso:** - **Como discutimos anteriormente:** - **Se a variação for alta, o modelo se ajusta muito bem aos dados de treinamento:** - Quando os dados de treinamento são ajustados muito bem, o modelo terá problemas para generalizar em dados que não foram vistos no treinamento. - Quando tal modelo é testado em seu conjunto de treinamento e, em seguida, em um conjunto de validação, o erro de treinamento será baixo e o erro de validação geralmente será alto.A relação entre o erro de treinamento e validação e a lacuna pode ser resumida desta forma:![img](images/gap-01.png) **NOTE:** Quanto maior a diferença entre os dois erros, maior a variação. No nosso caso, a diferença é muito estreita, então podemos concluir com segurança que a **variância é baixa**.Vamos analisar os gráficos (imagens) abaixo agora:![img](images/low_high_var.webp) **Analisando os gráficos acima nós temos que:** - **Quanto MAIOR o erro nos dados de treinamento:** - Menor a lacuna (gap): - Menor a variância (ou baixa variância). - **Quanto MENOR o erro nos dados de treinamento:** - Maior a lacuna (gap): - Maior a variância (ou alta variância).**NOTE:** No nosso caso, o **MSE** de treinamento se estabiliza em torno de **20 MW**, e já concluímos que é um valor alto. Então, além de uma lacuna (gap) estreita, agora temos outra confirmação de que temos um problema de **baixa variância**. --- 03.7 - Resumo das Análises das Learning Curves (ou Curvas de Aprendizado)Até agora nós já fizemos várias análises e podemos concluir que: - **Nosso algoritmo (modelo):** - Tem problema alto viés/bias; - Tem problema baixa variância; - Adicionar mais instâncias (linhas) aos dados de treinamento é altamente improvável que leve a melhores modelos no algoritmo (modelo) de aprendizado atual.**NOTE:** Uma solução (não definitiva, podemos ter outras) neste momento é mudar para um algoritmo de aprendizado mais complexo. Isso deve: - **Aumenta a variância:** - Visto que quando a complexidade de um modelo aumenta a variância também aumenta e como temos um problema de baixa variãncia, essa abordagem ajudaria resolver ou diminuir esse problema. - **Diminuir o viés/bias:** - Nós também sabemos que quando a complexidade de um modelo aumenta o viés/bias diminui e isso também nós ajudaria no nosso problema de alta viés/bias. --- 03.8 - Aumentando a Complexidade do modeloComo já ficou bem claro no resumo das análises acima mudar para um algoritmo de aprendizado mais complexo pode ser uma das soluções. Vamos ver como o Algoritmo **RandomForestRegressor** não regularizado se sai aqui.**NOTE:** Geraremos as curvas de aprendizado usando o mesmo fluxo de trabalho de antes. Porém, desta vez, agruparemos tudo em uma função para que possamos usá-la mais tarde. Para comparação, também exibiremos as curvas de aprendizado para o modelo de regressão linear feitas anteriormente.def create_learning_curves(estimator, data, features, target, train_sizes, cv): train_sizes, train_scores, validation_scores = learning_curve( estimator, data[features], data[target], train_sizes = train_sizes, cv = cv, scoring = 'neg_mean_squared_error') train_scores_mean = -train_scores.mean(axis = 1) validation_scores_mean = -validation_scores.mean(axis = 1) plt.plot(train_sizes, train_scores_mean, label = 'Training error') plt.plot(train_sizes, validation_scores_mean, label = 'Validation error') plt.ylabel('MSE', fontsize = 14) plt.xlabel('Training set size', fontsize = 14) title = 'Learning curves for a ' + str(estimator).split('(')[0] + ' model' plt.title(title, fontsize = 18, y = 1.03) plt.legend() plt.ylim(0,40) ### Plotting the two learning curves ### from sklearn.ensemble import RandomForestRegressor plt.figure(figsize = (16,5)) for model, i in [(RandomForestRegressor(), 1), (LinearRegression(),2)]: plt.subplot(1, 2, i) create_learning_curves(model, electricity, features, target, train_sizes, 5)**Vamos agora comparar, analisar e interpretar os 2 gráficos acima:** - Observando a curva de validação do novo modelo (algoritmo), podemos ver que conseguimos diminuir o viés/bias: - Ainda há algum viés/bias significativo, mas não tanto quanto antes. - Observando a curva de treinamento, podemos *deduzir* que desta vez há um problema de baixo viés; - A nova lacuna (gap) entre as duas curvas de aprendizado sugere um aumento substancial na variância; - A grande lacuna (gap) e o baixo erro de treinamento também indicam um problema de *overfitting:* - O *Overfitting* acontece quando o modelo tem um bom desempenho no conjunto de treinamento, mas muito pior no conjunto de teste (ou validação). - Mais uma observação importante é que agora podemos adição de novas instâncias de treinamento e provavelmente levará a modelos melhores; - A curva de validação não se estabiliza no tamanho máximo do conjunto de treinamento usado. Ele ainda tem potencial para diminuir e convergir para a curva de treinamento, semelhante à convergência que vemos no caso da regressão linear.**Diante de todas essas observações, podemos concluir que:** - Nosso novo algoritmo de aprendizado (RandomForestRegressor) sofre de: - Alta variância; - E um viés bastante baixo, superajustando os dados de treinamento. - A adição de mais instâncias de treinamento provavelmente levará a modelos melhores no algoritmo de aprendizado atual (RandomForestRegressor).**Neste ponto, aqui estão algumas coisas que podemos fazer para melhorar nosso modelo:** - Adicionando mais instâncias de treinamento; - Aumentar a regularização do nosso algoritmo de aprendizado atual. Isso deve diminuir a variância e aumentar o viés; - Reduzindo o número de recursos nos dados de treinamento que usamos atualmente. O algoritmo ainda se ajustará muito bem aos dados de treinamento, mas devido ao número reduzido de recursos, ele construirá modelos menos complexos. Isso deve aumentar o viés e diminuir a variância.**NOTE:** No nosso caso, não temos outros dados prontamente disponíveis. Poderíamos entrar na usina e fazer algumas medições, mas vamos guardar ignorar essa possibilidade.**Regularizando o nosso modelo (Algoritmo):** Vamos tentar regularizar nosso algoritmo **RandomForestRegressor**. Uma maneira de fazer isso é ajustar o número máximo de nós folha em cada árvore de decisão. Isso pode ser feito usando o parâmetro **max_leaf_nodes**. Não é necessariamente para você entender essa técnica de regularização. Para nosso propósito aqui, o que você precisa focar é o efeito dessa regularização nas curvas de aprendizado.create_learning_curves(RandomForestRegressor(max_leaf_nodes = 350), electricity, features, target, train_sizes, 5)download test data# folder for all downloaded files import os if not os.path.exists("data"): os.makedirs("data") # grid file for ICON-D2 from enstools.misc import download grid_file = download("http://icon-downloads.mpimet.mpg.de/grids/public/edzw/icon_grid_0047_R19B07_L.nc", "data/icon_grid_0047_R19B07_L.nc") # use opendata from DWD from enstools.opendata import retrieve_nwp # Example data on D2 grid: temperature in 500 hPa file_t500 = retrieve_nwp(variable=["t", "fi", "u", "v"], model="icon-d2", grid_type="icosahedral", level_type="pressure", levels=[500], init_time=0, forecast_hour=[0], dest="data", merge_files=False)read test datafrom enstools.io import read icon_d2 = read(file_t500, constant=grid_file) icon_d2create an interactive plot using Bokehfrom enstools.plot import interactive_contours, Backend from bokeh.io import show, output_notebook output_notebook() fig1 = interactive_contours(icon_d2['t'][0,0,...], aspect=1.25) show(fig1) fig1 = interactive_contours(icon_d2['z'][0,0,...], filled=False, figure=fig1, line_width=4) show(fig1)Create an interactive plot using Plotlyfig2 = interactive_contours(icon_d2['t'][0,0,...], aspect=1.25, filled=False, backend=Backend.PLOTLY) fig2Create a streamline plot with Bokehfrom enstools.plot import interactive_streamlines, Backend, Stream import xarray as xr icon_d2['ff'] = xr.ufuncs.sqrt(icon_d2['u']**2 + icon_d2['v']**2) fig3 = interactive_contours(icon_d2['ff'][0,0,...], aspect=1.25) fig3 = interactive_streamlines(icon_d2['u'][0,0,...], icon_d2['v'][0,0,...], figure=fig3) show(fig3)Create a streamline plot with Plotlyfig4 = interactive_streamlines(icon_d2['u'][0,0,...], icon_d2['v'][0,0,...], aspect=1.25, backend=Backend.PLOTLY) fig4Create a vector plot with Bokehfig5 = interactive_streamlines(icon_d2['u'][0,0,...], icon_d2['v'][0,0,...], aspect=1.25, line_type=Stream.VECTOR, density=1, map_resolution="50m") show(fig5)Memory locality, Rows vs. Columns The effect of row vs column major layoutThe way you traverse a 2D array effects speed. * [numpy arrays](https://docs.scipy.org/doc/numpy/reference/generated/numpy.array.htmlnumpy-array) are, by default, organized in a row-major order. ```pythona=array([range(1,31)]).reshape([3,10])``` * `a[i,j]` and `a[i,j+1]` are placed in consecutive places in memory. * `a[i,j]` and `a[i+1,j]` are 10 memory locations apart. * This implies that scanning the array row by row is more local than scanning column by column. * locality implies speed.%pylab inline from time import time # create an n by n array n=1000 a=ones([n,n]) %%time # Scan column by column s=0; for i in range(n): s+=sum(a[:,i]) %%time ## Scan row by row s=0; for i in range(n): s+=sum(a[i,:])CPU times: user 7.53 ms, sys: 4 ms, total: 11.5 ms Wall time: 7.89 msSome experiments with row vs column scanningWe want to see how the run time of these two code snippets varies as `n`, the size of the array, is changed.def sample_run_times(T,k=10): """ compare the time to sum an array row by row vs column by column T: the sizes of the matrix, [10**e for e in T] k: the number of repetitions of each experiment """ all_times=[] for e in T: n=int(10**e) #print('\r',n) a=np.ones([n,n]) times=[] for i in range(k): t0=time() s=0; for i in range(n): s+=sum(a[:,i]) t1=time() s=0; for i in range(n): s+=sum(a[i,:]) t2=time() times.append({'row minor':t1-t0,'row major':t2-t1}) all_times.append({'n':n,'times':times}) return all_times #example run sample_run_times([1,2],k=1)Plot the ratio between run times as function of nHere we have small steps between consecutive values of `n` and only one measurement for each (`k=1`)all_times=sample_run_times(np.arange(1.5,3.01,0.001),k=1) n_list=[a['n'] for a in all_times] ratios=[a['times'][0]['row minor']/a['times'][0]['row major'] for a in all_times] figure(figsize=(15,10)) plot(n_list,ratios) grid() xlabel('size of matrix') ylabel('ratio or running times') title('time ratio as a function of size of array');Conclusions* Traversing a numpy array column by column takes more than row by row.* The effect increasese proportionally to the number of elements in the array (square of the number of rows or columns).* Run time has large fluctuations. * See you next time. Next, we want to quantify the random fluctuationsand see what is their sourcek=100 all_times=sample_run_times(np.arange(1,3.001,0.01),k=k) _n=[] _row_major_mean=[] _row_major_std=[] _row_major_std=[] _row_minor_mean=[] _row_minor_std=[] _row_minor_min=[] _row_minor_max=[] _row_major_min=[] _row_major_max=[] for times in all_times: _n.append(times['n']) row_major=[a['row major'] for a in times['times']] row_minor=[a['row minor'] for a in times['times']] _row_major_mean.append(np.mean(row_major)) _row_major_std.append(np.std(row_major)) _row_major_min.append(np.min(row_major)) _row_major_max.append(np.max(row_major)) _row_minor_mean.append(np.mean(row_minor)) _row_minor_std.append(np.std(row_minor)) _row_minor_min.append(np.min(row_minor)) _row_minor_max.append(np.max(row_minor)) _row_major_mean=np.array(_row_major_mean) _row_major_std=np.array(_row_major_std) _row_minor_mean=np.array(_row_minor_mean) _row_minor_std=np.array(_row_minor_std) figure(figsize=(20,13)) plot(_n,_row_major_mean,'o',label='row major mean') plot(_n,_row_major_mean-_row_major_std,'x',label='row major mean-std') plot(_n,_row_major_mean+_row_major_std,'x',label='row major mean+std') plot(_n,_row_major_min,label='row major min among %d'%k) plot(_n,_row_major_max,label='row major max among %d'%k) plot(_n,_row_minor_mean,'o',label='row minor mean') plot(_n,_row_minor_mean-_row_minor_std,'x',label='row minor mean-std') plot(_n,_row_minor_mean+_row_minor_std,'x',label='row minor mean+std') plot(_n,_row_minor_min,label='row minor min among %d'%k) plot(_n,_row_minor_max,label='row minor max among %d'%k) xlabel('size of matrix') ylabel('running time') legend() grid()- for a given model, for each ligand that is Zn, check for atoms within 5 and 10 angstrom. turn this into a function, possibly a python script.- make a 2d and 3d vidsual from the atoms center of mass, using then go into 3d the center of origin will be on the zinc ligand, look for offset from zinc atomhttps://proteopedia.org/wiki/index.php/Hetero_atomsHETNon-standard residuesimport atomium import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # %matplotlib notebook # must restart kernel to switch from widget to notebook, commnt out one %matplotlib widget from mpl_toolkits import mplot3dGet Dataligands = zinc_ligands(MODEL) ligand_center = ligands[1].center_of_mass #ndarray atoms = nearby_ligand_atoms(ligands[0], MODEL, radius=5.0) # [{'atom': , # 'location': (-11.953, 22.816, 27.173), # 'distance': 0.0, # 'is_metal': True, # 'atom_id': 3394, # 'near_hets': [, # , # , # ]}, # ...]Plot datafig = plt.figure() ax = plt.axes(projection='3d') # Data for ligand center of mass xdata = ligand_center[0] ydata = ligand_center[1] zdata = ligand_center[2] ax.scatter3D(xdata, ydata, zdata, c="green", s=100); # Data for atoms for atom in atoms: xdata = atom["location"][0] ydata = atom["location"][1] zdata = atom["location"][2] ax.scatter3D(xdata, ydata, zdata, c='blue', s=20); ax.plot([xdata, ligand_center[0]],[ydata, ligand_center[1]],[zdata, ligand_center[2]], 'gray');Globals# The basic structures within a model are chains, residues, ligands, and atoms # Biological molecules are hierarchical, building from atoms to residues to chains to assemblies MODEL_NAME = "1ADG" FILE = atomium.fetch(MODEL_NAME) MODEL = atomium.fetch(MODEL_NAME).model MODEL_CENTER_MASS = MODEL.center_of_mass print(FILE.title)CRYSTALLOGRAPHIC STUDIES OF TWO ALCOHOL DEHYDROGENASE-BOUND ANALOGS OF THIAZOLE-4-CARBOXAMIDE ADENINE DINUCLEOTIDE (TAD), THE ACTIVE ANABOLITE OF THE ANTITUMOR AGENT TIAZOFURINHelpersdef nearby_ligand_atoms(ligand, model, radius=10.0, sort_atoms=True): """ Takes a ligand obj and a model's center of mass and returns a dictionary of atom data where the atoms fall within some set distance of the ligand's center - radius in angstroms - sort_atoms sorts the dict data structure by distance from ligand center """ ligand_center = tuple(ligand.center_of_mass) atoms_in_radius = list(model.atoms_in_sphere(ligand_center, radius)) atoms = [] for idx, atom in enumerate(atoms_in_radius): atoms.append( { "atom": atom, "location": atom.location, #see Atom class: self._location = np.array([x, y, z]) "distance": atom.distance_to(tuple(ligand.center_of_mass)), "is_metal": atom.is_metal, "atom_id": atom.id, "near_hets": list(atom.nearby_hets(cutoff=3)) } ) if sort_atoms: return sorted(atoms, key=lambda d: d["distance"]) else: return atoms def zinc_ligands(model, optimize=True): """ For a given model name, query model via atomium API and return an array of any ZN ligand objects found in the model. model_name is a string, e.g. "1ADG" """ # pdb = atomium.fetch(model_name) #should check for nil here i.e. does the model exist ligands = model.ligands() if optimize: model = model.optimise_distances() zn_ligands = [] for ligand in ligands: if ligand.name == "ZN": zn_ligands.append(ligand) return zn_ligandsTesting# test ligands = zinc_ligands(MODEL) print(len(nearby_ligand_atoms(ligands[1], MODEL, radius=5.0))) print(nearby_ligand_atoms(ligands[1], MODEL, radius=5.0)) # [{'atom': , # 'distance': 0.0, # 'is_metal': True, # 'atom_id': 3393, # 'near_hets': [,,,] # }, # {'atom': , # 'distance': 1.6169613477136673, # 'is_metal': False, # 'atom_id': 3473, # 'near_hets': [,,,,] # }, # ... # hets: # sorted_dist_to_lig_a376[1]["atom"].nearby_hets(cutoff=3) # ... # {, # , # , # , # }Table of Contents1  Load saved models for both people2  Load Spark Dataframe3  Plot Total Screen Time for Multiple People by Show3.1  Plot Difference in Screen Time Between Pairs of People By Show4  Compare Screen Time Over Time For Multiple People on a Single Show5  Co-occurence on Screenfrom esper.stdlib import * from esper.prelude import * from esper.identity import * from esper.spark_identity import * from esper.spark_util import * from esper.validation import * people = ['', '', '']Load saved models for both peopledef load_model(name): print('Loading model for {}.'.format(name)) model = FaceIdentityModel.load_from_gcs(name=name) imshow(tile_imgs([ cv2.resize(x[1][0], (200, 200)) for x in model.model_params['images']], cols=10 )) plt.show() plot_precision_and_cdf(model) return model face_models = [load_model(x) for x in people]Load Spark Dataframeface_identities = get_face_identities() print('Schema:', face_identities)Plot Total Screen Time for Multiple People by Showdate_range = ['2016-01-01', '2016-11-09'] screen_time_by_canonical_show = [ get_screen_time_by_canonical_show_spark( name.lower(), face_identities.where(face_identities.in_commercial == False), date_range=date_range ) for name in people ] plot_screen_time_by_show(people, screen_time_by_canonical_show)Plot Difference in Screen Time Between Pairs of People By Showfrom itertools import combinations for i, j in combinations(range(len(people)), 2): plot_difference_in_screen_time_by_show( [x.lower() for x in [people[i], people[j]]], [screen_time_by_canonical_show[i], screen_time_by_canonical_show[j]], plot_proportion=False )Compare Screen Time Over Time For Multiple People on a Single Showcanonical_show_name = 'MSNBC Live' face_identities_filtered = face_identities.where( face_identities.canonical_show_id == CanonicalShow.objects.get(name=canonical_show_name).id ) screen_times_by_video = [ { vid : st for vid, (st, var) in get_screen_time_by_video_spark( name.lower(), face_identities_filtered, date_range=date_range ).items() } for name in people ] plot_screentime_over_time(people, canonical_show_name, screen_times_by_video)Co-occurence on Screenget_person_in_shot_similarity_spark( [x.lower() for x in people], face_identities, date_range=date_range )Welcome to fastcore> Python goodies to make your coding faster, easier, and more maintainable Python is a powerful, dynamic language. Rather than bake everything into the language, it lets the programmer customize it to make it work for them. `fastcore` uses this flexibility to add to Python features inspired by other languages we've loved, like multiple dispatch from Julia, mixins from Ruby, and currying, binding, and more from Haskell. It also adds some "missing features" and clean up some rough edges in the Python standard library, such as simplifying parallel processing, and bringing ideas from NumPy over to Python's `list` type. Installing To install fastcore run: `conda install fastcore` (if you use Anaconda, which we strongly recommend) or `pip install fastcore`. For an [editable install](https://stackoverflow.com/questions/35064426/when-would-the-e-editable-option-be-useful-with-pip-install), clone this repo and run: `pip install -e ".[dev]"`.fastcore is tested to work on Ubuntu, Macos and Windows, for the versions tagged with the `-latest` suffix in [these docs](https://docs.github.com/en/actions/reference/specifications-for-github-hosted-runnerssupported-runners-and-hardware-resources). A tour `fastcore` contains many features. See the [docs](https://fastcore.fast.ai) for all the details, which cover the modules provided:- `test`: Simple testing functions- `foundation`: Mixins, delegation, composition, and more- `xtras`: Utility functions to help with functional-style programming, parallel processing, and more- `dispatch`: Multiple dispatch methods- `transform`: Pipelines of composed partially reversible transformationsHere's a (somewhat) quick tour of a few higlights, showing examples from each of these modules. Documentation All fast.ai projects, including this one, are built with [nbdev](https://nbdev.fast.ai), which is a full literate programming environment built on Jupyter Notebooks. That means that every piece of documentation, including the page you're reading now, can be accessed as interactive Jupyter notebooks. In fact, you can even grab a link directly to a notebook running interactively on Google Colab - if you want to follow along with this tour, click the link below, or click the badge at the top of the page:colab_link('index')The full docs are available at [fastcore.fast.ai](https://fastcore.fast.ai). The code in the examples and in all fast.ai libraries follow the [fast.ai style guide](https://docs.fast.ai/dev/style.html). In order to support interactive programming, all fast.ai libraries are designed to allow for `import *` to be used safely, particular by ensuring that [`__all__`](https://riptutorial.com/python/example/2894/the---all---special-variable) is defined in all packages. In order to see where a function is from, just type it:coll_reprFor more details, including a link to the full documentation and source code, use `doc`, which pops up a window with this information:```pythondoc(coll_repr)``` The documentation also contains links to any related functions or classes, which appear like this: `coll_repr` (in the notebook itself you will just see a word with back-ticks around it; the links are auto-generated in the documentation site). The documentation will generally show one or more examples of use, along with any background context necessary to understand them. As you'll see, the examples for each function and method are shown as tests, rather than example outputs, so let's start by explaining that. Testing fastcore's testing module is designed to work well with [nbdev](https://nbdev.fast.ai), which is a full literate programming environment built on Jupyter Notebooks. That means that your tests, docs, and code all live together in the same notebook. fastcore and nbdev's approach to testing starts with the premise that all your tests should pass. If one fails, no more tests in a notebook are run.Tests look like this:test_eq(coll_repr(range(1000), 5), '(#1000) [0,1,2,3,4...]')That's an example from the docs for `coll_repr`. As you see, it's not showing you the output directly. Here's what that would look like:coll_repr(range(1000), 5)So, the test is actually showing you what the output looks like, because if the function call didn't return `'(1000) [0,1,2,3,4...]'`, then the test would have failed.So every test shown in the docs is also showing you the behavior of the library --- and vice versa!Test functions always start with `test_`, and then follow with the operation being tested. So `test_eq` tests for equality (as you saw in the example above). This includes tests for equality of arrays and tensors, lists and generators, and many more:test_eq([0,1,2,3], np.arange(4))When a test fails, it prints out information about what was expected:```pythontest_eq([0,1,2,3], np.arange(3))``````---- AssertionError: ==: [0, 1, 2, 3] [0 1 2]```If you want to check that objects are the same type, rather than the just contain the same collection, use `test_eq_type`.You can test with any comparison function using `test`, e.g test whether an object is less than:test(2, 3, operator.lt)You can even test that exceptions are raised:def divide_zero(): return 1/0 test_fail(divide_zero)...and test that things are printed to stdout:test_stdout(lambda: print('hi'), 'hi')Foundations fast.ai is unusual in that we often use [mixins](https://en.wikipedia.org/wiki/Mixin) in our code. Mixins are widely used in many programming languages, such as Ruby, but not so much in Python. We use mixins to attach new behavior to existing libraries, or to allow modules to add new behavior to our own classes, such as in extension modules. One useful example of a mixin we define is `Path.ls`, which lists a directory and returns an `L` (an extended list class which we'll discuss shortly):p = Path('images') p.ls()You can easily add you own mixins with the `patch` [decorator](https://realpython.com/primer-on-python-decorators/), which takes advantage of Python 3 [function annotations](https://www.python.org/dev/peps/pep-3107/parameters) to say what class to patch:@patch def num_items(self:Path): return len(self.ls()) p.num_items()We also use `**kwargs` frequently. In python `**kwargs` in a parameter like means "*put any additional keyword arguments into a dict called `kwargs`*". Normally, using `kwargs` makes an API quite difficult to work with, because it breaks things like tab-completion and popup lists of signatures. `utils` provides `use_kwargs` and `delegates` to avoid this problem. See our [detailed article on delegation](https://www.fast.ai/2019/08/06/delegation/) on this topic.`GetAttr` solves a similar problem (and is also discussed in the article linked above): it's allows you to use Python's exceptionally useful `__getattr__` magic method, but avoids the problem that normally in Python tab-completion and docs break when using this. For instance, you can see here that Python's `dir` function, which is used to find the attributes of a python object, finds everything inside the `self.default` attribute here:class Author: def __init__(self, name): self.name = name class ProductPage(GetAttr): _default = 'author' def __init__(self,author,price,cost): self.author,self.price,self.cost = author,price,cost p = ProductPage(Author("Jeremy"), 1.50, 0.50) [o for o in dir(p) if not o.startswith('_')]Looking at that `ProductPage` example, it's rather verbose and duplicates a lot of attribute names, which can lead to bugs later if you change them only in one place. `fastcore` provides `store_attr` to simplify this common pattern. It also provides `basic_repr` to give simple objects a useful `repr`:class ProductPage: def __init__(self,author,price,cost): store_attr() __repr__ = basic_repr('author,price,cost') ProductPage("Jeremy", 1.50, 0.50)One of the most interesting `fastcore` functions is the `funcs_kwargs` decorator. This allows class behavior to be modified without sub-classing. This can allow folks that aren't familiar with object-oriented progressing to customize your class more easily. Here's an example of a class that uses `funcs_kwargs`:@funcs_kwargs class T: _methods=['some_method'] def __init__(self, **kwargs): assert not kwargs, f'Passed unknown args: {kwargs}' p = T(some_method = print) p.some_method("hello")helloThe `assert not kwargs` above is used to ensure that the user doesn't pass an unknown parameter (i.e one that's not in `_methods`). `fastai` uses `funcs_kwargs` in many places, for instance, you can customize any part of a `DataLoader` by passing your own methods.`fastcore` also provides many utility functions that make a Python programmer's life easier, in `fastcore.utils`. We won't look at many here, since you can easily look at the docs yourself. To get you started, have a look at the docs for `chunked` (remember, if you're in a notebook, type `doc(chunked)`), which is a handy function for creating lazily generated batches from a collection.Python's `ProcessPoolExecutor` is extended to allow `max_workers` to be set to `0`, to easily turn off parallel processing. This makes it easy to debug your code in serial, then run it in parallel. It also allows you to pass arguments to your parallel function, and to ensure there's a pause between calls, in case the process you are running has race conditions. `parallel` makes parallel processing even easier to use, and even adds an optional progress bar. L Like most languages, Python allows for very concise syntax for some very common types, such as `list`, which can be constructed with `[1,2,3]`. Perl's designer explained the reasoning for this kind of syntax:> In metaphorical honor of Huffman’s compression code that assigns smaller numbers of bits to more common bytes. In terms of syntax, it simply means that commonly used things should be shorter, but you shouldn’t waste short sequences on less common constructs.On this basis, `fastcore` has just one type that has a single letter name: `L`. The reason for this is that it is designed to be a replacement for `list`, so we want it to be just as easy to use as `[1,2,3]`. Here's how to create that as an `L`:L(1,2,3)The first thing to notice is that an `L` object includes in its representation its number of elements; that's the `(3)` in the output above. If there's more than 10 elements, it will automatically truncate the list:p = L.range(20).shuffle() p`L` contains many of the same indexing ideas that NumPy's `array` does, including indexing with a list of indexes, or a boolean mask list:p[2,4,6]It also contains other methods used in `array`, such as `L.argwhere`:p.argwhere(ge(15))As you can see from this example, `fastcore` also includes a number of features that make a functional style of programming easier, such as a full range of boolean functions (e.g `ge`, `gt`, etc) which give the same answer as the functions from Python's `operator` module if given two parameters, but return a [curried function](https://en.wikipedia.org/wiki/Currying) if given one parameter.There's too much functionality to show it all here, so be sure to check the docs. Many little things are added that we thought should have been in `list` in the first place, such as making this do what you'd expect (which is an error with `list`, but works fine with `L`):1 + L(2,3,4)Function dispatch and Transforms Most Python programmers use object oriented methods and inheritance to allow different objects to behave in different ways even when called with the same method name. Some languages use a very different approach, such as Julia, which uses [multiple dispatch generic functions](https://docs.julialang.org/en/v1/manual/methods/). Python provides [single dispatch generic functions](https://www.python.org/dev/peps/pep-0443/) as part of the standard library. `fastcore` provides multiple dispatch, with the `typedispatch` decorator (which is actually an instance of `DispatchReg`):@typedispatch def _f(x:numbers.Integral, y): return x+1 @typedispatch def _f(x:int, y:float): return x+y _f(3,2.0), _f(3,2)This approach to dispatch is particularly useful for adding implementations of functionality in extension modules or user code. It is heavily used in the `Transform` class. A `Transform` is the main building block of the fastai data pipelines. In the most general terms a transform can be any function you want to apply to your data, however the `Transform` class provides several mechanisms that make the process of building them easy and flexible (see the docs for information about each of these):- Type dispatch- Dispatch over tuples- Reversability- Type propagation- Preprocessing- Filtering based on the dataset type- Ordering- Appending new behavior with decorators`Transform` looks for three special methods, encodes, decodes, and setups, which provide the implementation for [`__call__`](https://www.python-course.eu/python3_magic_methods.php), `decode`, and `setup` respectively. For instance:class A(Transform): def encodes(self, x): return x+1 A()(1)For simple transforms like this, you can also use `Transform` as a decorator:@Transform def f(x): return x+1 f(1)Transforms can be composed into a `Pipeline`:@Transform def g(x): return x/2 pipe = Pipeline([f,g]) pipe(3)Lecture 10:- Learn about "object oriented programming" (OOP)- Learn how to create a "class"- Learn more about namespaces - Learn more about copies Object oriented programmingUntil now we haven't mentioned object oriented programming (OOP), yet we have been using **objects**from the beginning. Knowing how to create and use **objects** in Python is very powerful. Examples of **objects** that we have already encountered are the various data containers we have been using and things like plots. **Objects** have **methods** that can be used to change an object and **attributes** that describe features of an object. Now we will learn how to make our own objects with our own special blend of **attributes** and **methods**. The trick is to make a **class** and define it to have the desired **attributes** and **methods**. ClassesTo create an object with methods, we use a **class** definition, which is a blueprint or recipe defining the **attributes** and **methods** associated with the **class**. When we call the class, we create an **instance** of the **class**, also known as an **object**. Here is an example of a **class** definition:class Circle: """ This is an example of a class called Circle """ import numpy as np # get some math power # define some attributes of the Circle class pi=np.pi # pi is now an attribute of this class too. # initialize the class with the attribute r (no parentheses when called) def __init__(self,r): self.r=r # define a variable, r # define some methods (these have parentheses when called) def area(self): return (1./2.)*self.pi*self.r**2 def circumference(self): return 2.*self.pi*self.rNow we can create an **instance** of the Circle **class** called C with a radius of $r$.r=3.0 # assign 3 to a variable r C=Circle(r) # create a class instance with radius of 3.0We can use any of the attributes or methods of this class like this:print ("The value of pi is: ",C.pi) # no parentheses! print ("The radius of this circle is: ",C.r)# no parentheses! print ("The area of a circle with radius = ",r,'is: ',C.area()) # with parentheses! print ("The circumference of that circle is: ",C.circumference()) # with parentheses!The value of pi is: 3.141592653589793 The radius of this circle is: 3.0 The area of a circle with radius = 3.0 is: 14.137166941154069 The circumference of that circle is: 18.84955592153876We can also save the Circle class in a module, just as we did in earlier Lectures for functions. Then we can import it into other notebooks of scripts as desired.%%writefile Shapes.py class Circle: """ This is an example of a class called Circle """ import numpy as np # get some math power # define some attributes of the Circle class pi=np.pi # pi is now an attribute of this class too. # initialize the class with the attribute r (no parentheses when called) def __init__(self,r): self.r=r # define a variable, r # define some methods (these have parentheses when called) def area(self): return (1./2.)*self.pi*self.r**2 def circumference(self): return 2.*self.pi*self.rWriting Shapes.pyNow we can use it! Here is an example how:import Shapes as S newCirc=S.Circle(6.0) print (newCirc.pi)3.141592653589793Attributes and methodsYou might be wondering about some things by now. For example, you should have noticed is that when we asked for **C.pi** there were no parentheses, but both **C.area( )** and **C.circumference( )** did have parentheses. Why? The answer is that __r__ and **pi** are **attributes**, and **area** and **circumference** are **methods**. Did you notice that the method definitions look a lot like functions, but are inside the class definition. A **method** really is a function, but it is special in that it belongs to a **class** and works on the **instance** of the **class**. They can only be called by using the name of the **instance**, followed by a dot, followed by the **method** (with parentheses). More about classesClasses are not the same as functions. Although our **Shape** module can be imported just the same as any other module, to use it, we first have to create a class **instance** (**C=Shapes.Circle(r)**). All _**methods** (parts that start with **def**), have an **argument** list. The first **argument** has to be a reference to the class instance itself, which is always **self**, followed by any variables you want to pass into the **method**. The "**\_\_init\_\_**" method initializes the **instance** attributes. In the Circle class, the **\_\_init\_\_** method defined the **attribute** **r**, which gets passed in when the class is first called. Asking for any **attribute**, retrieves the current value of that **attribute**. But. Attributes can be changed:print (C.r) C.r=7. print (C.r)3.0 7.0To summarize: The **methods** (**area** and **circumference**) are defined just like any function except note the use of **self** as the first argument. This is required in all class method definitions. In our case, no other variables are passed in because the only one used is $r$, so the argument list consists of only **self**. Calling these **methods** requires no further arguments (the parentheses are empty) and the class returns the current values.C.area()You can make a subclass (child) of the parent class which has all the attributes and methods of the parent, but may have a few attributes and methods of its own. You do this by setting up another class definition within a class. So, the bottom line about classes is that they are in the same category of things as variables, lists, dictionaries, etc. That is, they are "data containers" but with benefits. They hold data, and they also hold the methods to process those data.If you are curious about classes, there's lots more to know about them that we don't have time to get into. You can find useful tutorials online: https://www.python-course.eu/python3_object_oriented_programming.phpor http://www.sthurlow.com/python/lesson08/ [but be careful with this one as it is for Python 2.7, so the **print** statements won't work without parentheses, e.g., **print ('this way')**, not, **print 'not this way'**. ] NamespacesAnother thing you might be wondering about is why did we import **NumPy** inside the class definition when it was imported into the notebook at the top? The answer is we didn't have to. The class definition works perfectly well without it in this case. But if we don't import **Numpy** within in the Shape module, the module won't work at all because it doesn't "know" about **NumPy**. So in the module, you have to import whatever you need to run the module. CopiesAnother issue we have been tiptoeing around is the concept of a copy of an object and what that means. In Python, this can be a bit confusing. When we define some simple variables, the behavior is pretty much what you might expect:x=3 # define x y=x # set y equal to x print (y) # print out y x=4 # change the value of X print (y) # and y is still equal to its first definition.3 3But if we define a list object (a _compound_ object with more than one variable), things get weird:L1=['spam','ocelot',42] # define the list L2=L1 # make a copy of the list print (L2) # print the copy L1[2]='not an ocelot' # change the original print (L2) # and oops - the copy got changed too!['spam', 'ocelot', 42] ['spam', 'ocelot', 'not an ocelot']This means that **L1** and **L2** refer to the SAME OBJECT. So how do I make a copy that is its own object (doesn't change)? For simple lists (that do not contain sublists), we already learned how to do this:L3=L1[:] print (L3) L1[2]=42 print (L3)['spam', 'ocelot', 'not an ocelot'] ['spam', 'ocelot', 'not an ocelot']This approach breaks down if the object is more complicated. The copies will sometimes be subject to mutation. (Try this yourself!). To avoid this problem, there is a module called **copy** with a function called **deepcopy**, which will make an independent copy of the object in question:from copy import deepcopy L1=['spam','ocelot',42] # define the list L2=deepcopy(L1) # make a copy of the list print ("L2: ",L2) # print the copy L1[2]='not an ocelot' # change the original print ("L1: ",L1) print ("L2: ",L2) # and bingo, L2 didn't # clean up os.remove('Shapes.py')Apply CNN Classifier to DESI Spectra and visualize results with gradCAMMini-SV2 tiles from February-March 2020:- https://desi.lbl.gov/trac/wiki/TargetSelectionWG/miniSV2See also the DESI tile picker with (limited) SV0 tiles from March 2020:- https://desi.lbl.gov/svn/data/tiles/trunk/- https://desi.lbl.gov/svn/data/tiles/trunk/SV0.htmlimport sys sys.path.append('/global/homes/p/palmese/desi/timedomain/desitrip/py/') from desispec.io import read_spectra, write_spectra from desispec.spectra import Spectra from desitarget.cmx.cmx_targetmask import cmx_mask from desitrip.preproc import rebin_flux, rescale_flux from astropy.io import fits from astropy.table import Table, vstack, hstack from glob import glob from datetime import date import os import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from tensorflow import keras mpl.rc('font', size=14) # Set up BGS target bit selection. cmx_bgs_bits = '|'.join([_ for _ in cmx_mask.names() if 'BGS' in _])Select a Date & Tile from the Tile Picker# Choose tile 66000 from Mar 14, 2020 tile_id = 66003 obsdate = 20200315 # Access redux folder. redux='/global/project/projectdirs/desi/spectro/redux/daily/tiles' prefix_in='/'.join([redux, '{:05d}/{}'.format(tile_id, obsdate)]) if not os.path.isdir(prefix_in): print('{} does not exist.'.format(prefix_in)) # List zbest and coadd files. # Data are stored by petal ID. zbfiles = sorted(glob('{}/zbest*.fits'.format(prefix_in))) cafiles = sorted(glob('{}/coadd*.fits'.format(prefix_in))) # zbest files from redrock. zbfiles # Co-added spectra from multiple exposures + the three spectrograph arms. cafilesLoad the Keras ModelLoad a model trained on real or simulated data using the native Keras output format. In the future this could be updated to just load the Keras weights.tfmodel = '/global/homes/l/lehsani/timedomain/desitrip/docs/nb/models_9label_first/6_b65_e200_9label/b65_e200_9label_model' #tfmodel = '/global/homes/s/sybenzvi/desi/timedomain/desitrip/docs/nb/6label_cnn_restframe' if os.path.exists(tfmodel): classifier = keras.models.load_model(tfmodel) else: classifier = None print('Sorry, could not find {}'.format(tfmodel)) if classifier is not None: classifier.summary()Model: "SNnet" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= Input_Spec (InputLayer) [(None, 150, 1)] 0 _________________________________________________________________ conv1d_20 (Conv1D) (None, 150, 8) 48 _________________________________________________________________ batch_normalization_20 (Batc (None, 150, 8) 32 _________________________________________________________________ activation_20 (Activation) (None, 150, 8) 0 _________________________________________________________________ max_pooling1d_20 (MaxPooling (None, 75, 8) 0 _________________________________________________________________ conv1d_21 (Conv1D) (None, 75, 16) 656 _____________________________________________________________[...]Loop Through Spectra and Classify# Loop through zbest and coadd files for each petal. # Extract the fibermaps, ZBEST tables, and spectra. # Keep only BGS targets passing basic event selection. allzbest = None allfmap = None allwave = None allflux = None allivar = None allmask = None allres = None for cafile, zbfile in zip(cafiles, zbfiles): # Access data per petal. zbest = Table.read(zbfile, 'ZBEST') fibermap = Table.read(zbfile, 'FIBERMAP') pspectra = read_spectra(cafile) # Apply standard event selection. isTGT = fibermap['OBJTYPE'] == 'TGT' isGAL = zbest['SPECTYPE'] == 'GALAXY' isBGS = fibermap['CMX_TARGET'] & cmx_mask.mask(cmx_bgs_bits) != 0 select = isTGT & isGAL & isBGS # Accumulate spectrum data. if allzbest is None: allzbest = zbest[select] allfmap = fibermap[select] allwave = pspectra.wave['brz'] allflux = pspectra.flux['brz'][select] allivar = pspectra.ivar['brz'][select] allmask = pspectra.mask['brz'][select] allres = pspectra.resolution_data['brz'][select] else: allzbest = vstack([allzbest, zbest[select]]) allfmap = vstack([allfmap, fibermap[select]]) allflux = np.vstack([allflux, pspectra.flux['brz'][select]]) allivar = np.vstack([allivar, pspectra.ivar['brz'][select]]) allmask = np.vstack([allmask, pspectra.mask['brz'][select]]) allres = np.vstack([allres, pspectra.resolution_data['brz'][select]]) # Apply the DESITRIP preprocessing to selected spectra. rewave, reflux, reivar = rebin_flux(allwave, allflux, allivar, allzbest['Z'], minwave=2500., maxwave=9500., nbins=150, log=True, clip=True) rsflux = rescale_flux(reflux) # Run the classifier on the spectra. # The output layer uses softmax activation to produce an array of label probabilities. # The classification is based on argmax(pred). pred = classifier.predict(rsflux) pred.shape ymax = np.max(pred, axis=1) fig, ax = plt.subplots(1,1, figsize=(6,4), tight_layout=True) ax.hist(ymax, bins=np.linspace(0,1,51)) ax.set(xlabel='$\max{(y_\mathrm{pred})}$', ylabel='count', title='Tile {}, {}'.format(tile_id, obsdate));Selection on Classifier OutputTo be conservative we can select only spectra where the classifier is very confident in its output, e.g., ymax > 0.99. See the [CNN training notebook](https://github.com/desihub/timedomain/blob/master/desitrip/docs/nb/cnn_multilabel-restframe.ipynb) for the motivation behind this cut.idx = np.argwhere(ymax > 0.99) labels = np.argmax(pred, axis=1) idx.shape label_names = ['Galaxy', 'SN Ia', 'SN Ib', 'SN Ib/c', 'SN Ic', 'SN IIn', 'SN IIL/P', 'SN IIP', 'KN'] # Randomly select 16 of the spectra and plot them. #selection = sorted(np.random.choice(idx.flatten(), size=idx.shape[0], replace=False)) selection=idxGradCAM action happens hereAdapting from https://keras.io/examples/vision/grad_cam/import tensorflow as tf last_conv_layer_name = "conv1d_23" classifier_layer_names = [ "batch_normalization_23", "activation_23", "max_pooling1d_23", "flatten_5", "dense_5", "dropout_5", "Output_Classes" ] def make_gradcam_heatmap( img_array, model, last_conv_layer_name, classifier_layer_names ): # First, we create a model that maps the input image to the activations # of the last conv layer last_conv_layer = model.get_layer(last_conv_layer_name) last_conv_layer_model = keras.Model(model.inputs, last_conv_layer.output) # Second, we create a model that maps the activations of the last conv # layer to the final class predictions classifier_input = keras.Input(shape=last_conv_layer.output.shape[1:]) x = classifier_input for layer_name in classifier_layer_names: #print(layer_name,x.shape) x = model.get_layer(layer_name)(x) classifier_model = keras.Model(classifier_input, x) # Then, we compute the gradient of the top predicted class for our input image # with respect to the activations of the last conv layer with tf.GradientTape() as tape: # Compute activations of the last conv layer and make the tape watch it last_conv_layer_output = last_conv_layer_model(img_array) tape.watch(last_conv_layer_output) # Compute class predictions preds = classifier_model(last_conv_layer_output) top_pred_index = tf.argmax(preds[0]) top_class_channel = preds[:, top_pred_index] # This is the gradient of the top predicted class with regard to # the output feature map of the last conv layer grads = tape.gradient(top_class_channel, last_conv_layer_output) # This is a vector where each entry is the mean intensity of the gradient # over a specific feature map channel pooled_grads = tf.reduce_mean(grads, axis=(0, 1)) #print(grads.shape,pooled_grads.shape) # We multiply each channel in the feature map array # by "how important this channel is" with regard to the top predicted class last_conv_layer_output = last_conv_layer_output.numpy()[0] pooled_grads = pooled_grads.numpy() for i in range(pooled_grads.shape[-1]): last_conv_layer_output[:, i] *= pooled_grads[i] # The channel-wise mean of the resulting feature map # is our heatmap of class activation heatmap = np.mean(last_conv_layer_output, axis=-1) #We apply ReLU here and select only elements>0 # For visualization purpose, we will also normalize the heatmap between 0 & 1 heatmap = np.maximum(heatmap, 0) / np.max(heatmap) return heatmapApply GradCAM to one spectrum only# Prepare image preprocess_input = keras.applications.xception.preprocess_input decode_predictions = keras.applications.xception.decode_predictions #myarr=rsflux[:1,:] myarr=rsflux[selection[1],:] # Print what the top predicted class is preds = classifier.predict(myarr) #print("Predicted:", preds) # Generate class activation heatmap heatmap = make_gradcam_heatmap( myarr, classifier, last_conv_layer_name, classifier_layer_names ) color='blue' rewave_nbin_inblock=rewave.shape[0]/float(heatmap.shape[0]) first_bin=0 for i in range(1,heatmap.shape[0]+1): alpha=np.min([1,heatmap[i-1]+0.2]) last_bin=int(i*rewave_nbin_inblock) plt.plot(rewave[first_bin:last_bin+1], myarr[0,first_bin:last_bin+1],c=color,alpha=alpha) first_bin=last_binApply GradCAM to all spectra classified as transientspreprocess_input = keras.applications.xception.preprocess_input decode_predictions = keras.applications.xception.decode_predictions fig, axes = plt.subplots(4,4, figsize=(15,10), sharex=True, sharey=True, gridspec_kw={'wspace':0, 'hspace':0}) for j, ax in zip(selection, axes.flatten()): myarr=rsflux[j,:] # Print what the top predicted class is preds = classifier.predict(myarr) #print("Predicted:", preds) # Generate class activation heatmap heatmap = make_gradcam_heatmap( myarr, classifier, last_conv_layer_name, classifier_layer_names ) color='blue' rewave_nbin_inblock=rewave.shape[0]/float(heatmap.shape[0]) first_bin=0 for i in range(1,heatmap.shape[0]+1): alpha=np.min([1,heatmap[i-1]+0.2]) last_bin=int(i*rewave_nbin_inblock) if (i==1): ax.plot(rewave[first_bin:last_bin+1], myarr[0,first_bin:last_bin+1],c=color,alpha=alpha,\ label=label_names[labels[j[0]]]+'\nz={:.2f}'.format(allzbest[j[0]]['Z'])) else: ax.plot(rewave[first_bin:last_bin+1], myarr[0,first_bin:last_bin+1],c=color,alpha=alpha) first_bin=last_bin ax.legend(fontsize=10)Plot spectra of objects classified as transientsPlot observed spectratestwave, testflux, testivar = rebin_flux(allwave, allflux, allivar, minwave=2500., maxwave=9500., nbins=150, log=True, clip=True) fig, axes = plt.subplots(4,4, figsize=(15,10), sharex=True, sharey=True, gridspec_kw={'wspace':0, 'hspace':0}) for j, ax in zip(selection, axes.flatten()): ax.plot(testwave, testflux[j[0]], alpha=0.7, label='label: '+label_names[labels[j[0]]]+'\nz={:.2f}'.format(allzbest[j[0]]['Z'])) ax.set(xlim=(3500,9900),ylim=(-0.1,4)) ax.fill_between([5600,6000],[-0.1,-0.1],[4,4],alpha=0.1,color='blue') ax.fill_between([7400,7800],[-0.1,-0.1],[4,4],alpha=0.1,color='blue') ax.legend(fontsize=10) # for k in [0,1,2]: # axes[k,0].set(ylabel=r'flux [erg s$^{-1}$ cm$^{-1}$ $\AA^{-1}$]') # axes[2,k].set(xlabel=r'$\lambda_\mathrm{obs}$ [$\AA$]', # xlim=(3500,9900)) fig.tight_layout();Cats and Dogs from [CIFAR10](https://keras.io/datasets/)!# numpy and plotting import numpy as np import matplotlib.pyplot as plt %matplotlib inline # keras from tensorflow import keras from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D # getting the datset from tensorflow.keras.datasets import cifar10 from tensorflow.keras import backend as K** Load Cats and Dogs From CIFAR10 dataset **First we load the CIFAR10 data and extract all cats and dogs from it.# The data, shuffled and split between train and test sets: (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Reduce to cats and dogs def extract(x,y,labels): arg_train = [] new_y = np.zeros(np.max(labels)+1) for i,l in enumerate(labels): arg_train.append(np.argwhere(y == l)[:,0]) new_y[l] = i arg_train = np.concatenate(arg_train) return x[arg_train], new_y[y[arg_train]] x_train, y_train = extract(x_train, y_train, [3,5]) x_test, y_test = extract(x_test, y_test, [3,5]) print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') x_train = x_train.astype('float32') x_test = x_test.astype('float32') # maximum value normalization x_train /= 255 x_test /= 255 img_rows, img_cols, channels = x_train.shape[1:] print(K.image_data_format()) if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], channels, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], channels, img_rows, img_cols) input_shape = (channels, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, channels) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels) input_shape = (img_rows, img_cols, channels) plt.imshow(x_train[np.argmax(y_train==0)]) plt.title("a cat") plt.show() plt.imshow(x_train[np.argmax(y_train==1)]) plt.title("a dog") plt.show()x_train shape: (10000, 32, 32, 3) 10000 train samples 2000 test samples channels_lastNext, we just define a function which will display the results later.def predict(idx, model, data, avg=None, norm=None, cols=5, threshold=.3): try: idx = list(idx) except: idx = [idx] cats = 0 dogs = 0 data = data[idx] if norm is None: p = model.predict(data) else: p = model.predict(data/norm) i = 0 while i < p.shape[0]: fig, axs = plt.subplots(1,cols,figsize=(5*cols,5)) fig.figsize=(20,10) for ax in axs: if avg is not None: img = (data[i]+avg) else: img = (data[i]) ax.imshow(img) if p[i] < threshold: label = "cat" cats += 1 elif p[i] > 1-threshold: label = "dog" dogs += 1 else: label = "not sure" ax.text(.5,0, label+ "; score = " + str(p[i]), horizontalalignment='center', verticalalignment='bottom', transform=ax.axes.transAxes, backgroundcolor="white", size="large") i += 1 if i >= p.shape[0]: break plt.show() print(cats, " cats (", cats/len(idx)*100., "%),", dogs, " dogs (", dogs/len(idx)*100., "%)") batch_size = 128Convolutional Neural NetworkA very simple CNN... (> 70% validation accuray after 10 epochs)model1 = Sequential() model1.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model1.add(Conv2D(64, (3, 3), activation='relu')) model1.add(MaxPooling2D(pool_size=(2, 2))) model1.add(Dropout(0.25)) model1.add(Flatten()) model1.add(Dense(256, activation='relu')) model1.add(Dense(128, activation='relu')) model1.add(Dropout(0.5)) model1.add(Dense(1, activation='sigmoid')) model1.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy']) # model.summary()We will train for ten epochs (iterations ofer the whole training data)...model1.fit(x_train, y_train, batch_size=batch_size, epochs=10, validation_data=(x_test, y_test), shuffle=True) predict(range(10), model1, x_test) predict(range(x_test.shape[0]-10, x_test.shape[0]), model1, x_test)A rather simple CNN: Deeper than the one above, but actually with fewer trainable parameters. It gives slightly better performance.model2 = Sequential() model2.add(Conv2D(48, kernel_size=(5, 5), padding="same", activation='relu', input_shape=input_shape)) model2.add(Conv2D(32, (5, 5), activation='relu', padding="same")) model2.add(Conv2D(32, (3, 3), activation='relu', padding="same")) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(BatchNormalization()) model2.add(Conv2D(96, (5, 5), activation='relu', padding="same")) model2.add(Conv2D(64, (3, 3), activation='relu', padding="same")) model2.add(Conv2D(64, (3, 3), activation='relu', padding="same")) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(BatchNormalization()) model2.add(Conv2D(160, (3, 3), activation='relu', padding="same")) model2.add(Conv2D(128, (3, 3), activation='relu', padding="same")) model2.add(MaxPooling2D(pool_size=(2, 2))) model2.add(BatchNormalization()) model2.add(Conv2D(256, (3, 3), activation='relu', padding="same")) model2.add(Flatten()) model2.add(Dropout(.3)) model2.add(Dense(512, activation='relu')) model2.add(Dropout(.5)) model2.add(Dense(128, activation='relu')) model2.add(Dense(1)) model2.add(Activation('sigmoid')) model2.compile(loss='binary_crossentropy', optimizer="adam", metrics=['accuracy']) # model.summary() model2.fit(x_train, y_train, batch_size=batch_size, epochs=10, validation_data=(x_test, y_test), shuffle=True) predict(range(20), model2, x_test) predict(range(x_test.shape[0]//2, x_test.shape[0]//2+20), model2, x_test)Data AugmentationLooking at the training progress we can see, that in the end the loss decreases but the validation loss does not. The network is only optimized for the training data. The test data is used to validate the performance on unseen images. What we observe here is called overfitting. This problem will be adressed in depth in the session on regularization.One simple explanation for the phenomenon in our case is, that we have limited training data over which we interate over and over and many parameters in our network which can *memorize* the data. One way to get more data is augmenting the available data with randomly transformed data which stil retains the same labels.from keras.preprocessing.image import ImageDataGenerator # traning data is augmented train_datagen = ImageDataGenerator( shear_range=0.2, zoom_range=0.2, horizontal_flip=True) train_generator = train_datagen.flow(x_train, y_train, batch_size=batch_size)With the augmented training data we can continue training and improve the results on the test set.model2.fit_generator( train_generator, steps_per_epoch=2000 // batch_size, epochs=50, validation_data=(x_test, y_test))WARNING:tensorflow:From :5: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version. Instructions for updating: Please use Model.fit, which supports generators. WARNING:tensorflow:sample_weight modes were coerced from ... to ['...'] Train for 15 steps, validate on 2000 samples Epoch 1/50 15/15 [==============================] - 2s 106ms/step - loss: 0.4324 - accuracy: 0.7948 - val_loss: 0.6412 - val_accuracy: 0.6475 Epoch 2/50 15/15 [==============================] - 1s 67ms/step - loss: 0.4380 - accuracy: 0.7854 - val_loss: 0.5278 - val_accuracy: 0.7405 Epoch 3/50 15/15 [==============================] - 1s 65ms/step - loss: 0.4308 - accuracy: 0.7998 - val_loss: 0.5166 - val_accuracy: 0.7540 Epoch 4/50 15/15 [==============================] - 1s 70ms/step - loss: 0.4440 - accuracy: 0.7844 - val_loss: 0.4686 - val_accuracy: 0.7755 Epoch 5/50 15/15 [==============================] -[...]PLEASE RUN THIS COMMAND IF YOU FINISHED THE NOTEBOOKimport os temp=os.getpid() !kill -9 $tempPredicting Republican and Democratic donations Part 1下面以“预测共和党和民主党的捐款”为例,进行说明,数据下载[地址](https://www.dataquest.io/blog/large_files/input.csv)。import numpy as np import pandas as pd import matplotlib.pyplot as plt # set seed to reproducibility SEED = 222 np.random.seed(SEED) #读取数据 df = pd.read_csv("/tmp/data_input/kaggle/Predicting_donations/input.csv") df.info() RangeIndex: 100000 entries, 0 to 99999 Data columns (total 11 columns): cand_pty_affiliation 100000 non-null object cand_office_st 100000 non-null object cand_office 100000 non-null object cand_status 100000 non-null object rpt_tp 100000 non-null object transaction_tp 100000 non-null object entity_tp 100000 non-null object state 100000 non-null object classification 100000 non-null object cycle 100000 non-null float64 transaction_amt 100000 non-null float64 dtypes: float64(2), object(9) memory usage: 8.4+ MB从上图可以看出可以用到的属性:- entity_tp: 用来区分是个人做出的预测还是组织做出的- state: 捐款者的所在居住地(州)- classification: 所工作的领域- rpt_tp: 竞选过程中的捐献- transaction_tp: 捐助的组成,竞选者自己还是政治委员会- cycle: 竞选周期- transaction_amt: 捐助的总额- cand_pty_affiliation: 候选者所属党派的类别,这就是需要进行预测的特征- cand_office_st: 参与州- cand_office: 政府机关候选还是occupying- cand_status: 政府机关参与候选的状态df.head() #处理数据 from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score def get_train_test(test_size = 0.95): # 将数据分为训练集和测试集 # 获取共和党的标签 y = 1 * (df.cand_pty_affiliation == "REP") #去掉除共和党和民主党其他党派的捐款 X = df.drop(["cand_pty_affiliation"], axis=1) X = pd.get_dummies(X, sparse=True) X.drop(X.columns[X.std() == 0], axis=1, inplace=True) return train_test_split(X, y, test_size=test_size) #将原始数据处理为训练数据集和测试数据集 xtrain, xtest, ytrain, ytest = get_train_test() print("\nData set :") df.head() df.cand_pty_affiliation.value_counts(normalize=True).plot(kind="bar", title="Share of No. donations") plt.show()从上图可以看出,大约有75%的捐款是给民主党的,接下来进行模型的集成,即结合多个模型的预测,对特异性误差取平均值,从而获得更好的整体预测效果下面使用决策树来对模型进行预测。为了更好的观察决策树的结果,需要对决策树进行可视化,环境需要准备:- 安装 pip install pydotplus- 安装 sudo apt-get install graphviz#使用辅助函数对决策树进行可视化 import pydotplus # you can install pydotplus with: pip install pydotplus from IPython.display import Image from sklearn.metrics import roc_auc_score from sklearn.tree import DecisionTreeClassifier, export_graphviz def print_graph(clf, feature_names): # print decision tree graph = export_graphviz(clf, label='root', proportion=True, impurity=False, out_file=None, feature_names=feature_names, class_names={0:"D", 1:"R"}, filled=True, rounded=True) graph = pydotplus.graph_from_dot_data(graph) return Image(graph.create_png())使用决策树对训练集进行训练,并通测试集对训练的模型进行交叉测试,进行性能测试t1 = DecisionTreeClassifier(max_depth=1, random_state=SEED) t1.fit(xtrain, ytrain) pre1 = t1.predict_proba(xtest)[:,1] print("Decision Tree ROC-AUC score : %.3f\n" % roc_auc_score(ytest, pre1)) print_graph(t1, xtrain.columns)Decision Tree ROC-AUC score : 0.672每个叶节点记录它们在训练样本中的比例、类别分布和类别标签预测。我们的决策树根据捐款金额是否超过 101.5 进行预测,将更多的特征参数加入,构建多层的决策树进行观察,分别构建二层决策树和三层决策树t2 = DecisionTreeClassifier(max_depth=2, random_state=SEED) t2.fit(xtrain, ytrain) pre2 = t2.predict_proba(xtest)[:,1] print("Decision Tree ROC-AUC score : %.3f\n" % roc_auc_score(ytest, pre2)) print_graph(t2, xtrain.columns) t3 = DecisionTreeClassifier(max_depth=3, random_state=SEED) t3.fit(xtrain, ytrain) pre3 = t3.predict_proba(xtest)[:,1] print("Decision Tree ROC-AUC score : %.3f\n" % roc_auc_score(ytest, pre3)) print_graph(t3, xtrain.columns)Decision Tree ROC-AUC score : 0.751可以看到三层的决策树并不比一层的决策树要好,共和党的预测率大约为5%,比25%差距不小,产生了过拟合。在固定层数的情况下,增加决策树的宽度也可以达到增加复杂度的目的。由于我们关系的是捐助的分类,所以对捐助的总额不是很关心,所以考虑去掉transaction_amt的特征。drop = ["transaction_amt"] xtrain_slim = xtrain.drop(drop, axis=1) xtest_slim = xtest.drop(drop, axis=1) t3 = DecisionTreeClassifier(max_depth=3, random_state=SEED) t3.fit(xtrain_slim, ytrain) pre3 = t3.predict_proba(xtest_slim)[:, 1] print("Decision Tree ROC-AUC score :%.3f\n", roc_auc_score(ytest, pre3)) print_graph(t3, xtrain_slim.columns)Decision Tree ROC-AUC score :%.3f 0.740318258788从结果上看出,对于共和党的分类大约为7.3%,比上一次使用决策树要好一些,这两个模型采用不同特征进行训练的决策,可以将这两个模型通过集成的方式,来减小预测误差。目前集成学习根据个体生成器的的生成方式,大体分为两种类型,一是个体生成器之间有强依赖关系,必须串行生成序列,另外一种就是个体生成器之间不存在依赖关系,可以并行生成序列,前一种比较典型的方法是Boosting,后一种典型的方法时Bagging和Random Forest。 使用两个决策树进行集成1、首先检查误差的关联性,高度关联的误差会引起差的集成tree1 = DecisionTreeClassifier(max_depth=3, random_state=SEED) tree1.fit(xtrain, ytrain) pre1 = tree1.predict_proba(xtest)[:, 1] tree2 = DecisionTreeClassifier(max_depth=3, random_state=SEED) tree2.fit(xtrain_slim, ytrain) pre2 = tree2.predict_proba(xtest_slim)[:, 1] #计算两者的协方差矩阵 pd.DataFrame({"full_data": pre1, "red_data": pre2}).corr()可以看出方差还是有一些关联性,但是不是具有特别大的影响性。2、平均两个模型的预测pre_mean = np.mean([pre1, pre2], axis=0) print("Average decision tree ROC-AUC score is \n", roc_auc_score(ytest, pre_mean))Average decision tree ROC-AUC score is 0.782742456807可以看出,经过平均后性能会有一定的提升,如果再有更多的决策树,将其进行平均也会有更好的性能提升。从上述的模型训练中可以看出,特征参数的选择比较重要,那么如何进行特征参数的选择呢,一个快速有效的实践方法是随机地选择一个特征子集,在每个 draw 上拟合一个决策树并平均其预测。这一过程被称为自举平均(bootstrapped averaging,通常缩写为 bagging),它应用于决策树所产生的模型是随机森林。#构建10个决策树的生成,每一个拟合包含3个特征的子集 from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=10, # 10个决策树 max_features=3, # 3个特征子集 random_state=SEED) rf.fit(xtrain, ytrain) pre = rf.predict_proba(xtest)[:, 1] print("Average decision tree ROC-AUC score is \n", roc_auc_score(ytest, pre))Average decision tree ROC-AUC score is 0.844018408542采用其他的模型进行集成采用集成的时候有两个方面的需要注意:- 预测误差的关联性越低,效果越好- 模型越多,效果越好# 集成更多的模型来对数据进行训练 from sklearn.svm import SVC, LinearSVC # 支持向量机 from sklearn.naive_bayes import GaussianNB # 高斯朴素贝叶斯 from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier # 随机森林和Boosting from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier # K近邻 from sklearn.neural_network import MLPClassifier # 神经网络 from sklearn.kernel_approximation import Nystroem, RBFSampler from sklearn.pipeline import make_pipeline #生成基本的学习器 def get_models(): nb = GaussianNB(); svc = SVC(C=100, probability=True) knn = KNeighborsClassifier(n_neighbors=3) lr = LogisticRegression(C=100, random_state=SEED) nn = MLPClassifier((80, 10), early_stopping=False, random_state=SEED) gb = GradientBoostingClassifier(n_estimators=10, random_state=SEED) rf = RandomForestClassifier(n_estimators=10, max_features=3, random_state=SEED) models = {"svm": svc, "knn":knn, "native bayes": nb, "lr": lr, "nn": nn, "boosting": gb, "random forest": rf, } return models # 开始进行模型的训练 def train_predict(models_list): # fit多个学习模型,并返回预测结果 store = np.zeros((ytest.shape[0], len(models_list))) store = pd.DataFrame(store) print("Starting to fit\n") cols = list() for i, (name, model) in enumerate(models_list.items()): print("%s..." % name, end=" ", flush=False) model.fit(xtrain, ytrain) store.iloc[:, i] = model.predict_proba(xtest)[:, 1] cols.append(name) print("model done\n") store.columns = cols print("Done\n") return store # 构建预测模型 def score_model(y, store): print("Scoring model\n") for m in store.columns: score = roc_auc_score(y, store.loc[:, m]) print ("%-26s: %.3f" % (m, score)) print("Done\n") models = get_models() store = train_predict(models) score_model(ytest, store)Starting to fit svm... model done knn... model done native bayes... model done lr... model done nn... model done boosting... model done random forest... model done Done Scoring model svm : 0.850 knn : 0.779 native bayes : 0.803 lr : 0.857 nn : 0.851 boosting : 0.823 random forest : 0.844 Done对于集成策略来讲,各个基本学习器之间的预测误差必须是不相等的. 安装mlens工具包pip install mlensfrom mlens.visualization import corrmat corrmat(store.corr(), inflate=False) plt.show()针对误差关联明显的,是可以进行预处理的,但是大多数误差的关联都在45%- 80%之间,可以对模型进行进一步的优化corrmat(store.apply(lambda pre: 1 * (pre > 0.5) - ytest.values).corr(), inflate=False) plt.show()继续对多个模型进行平均处理print("Ensemble ROC-AUC score is %.3f" % roc_auc_score(ytest, store.mean(axis=1)))Ensemble ROC-AUC score is 0.882画出roc曲线进行进一步的分析from sklearn.metrics import roc_curve def plot_roc_curve(ytest, p_base_learners, p_ensemble, labels, ens_label): plt.figure(figsize=(10, 8)) plt.plot([0, 1], [0, 1], 'k--') cm = [plt.cm.rainbow(i) for i in np.linspace(0, 1.0, p_base_learners.shape[1] + 1)] for i in range(p_base_learners.shape[1]): p = p_base_learners[:, i] fpr, tpr, _ = roc_curve(ytest, p) plt.plot(fpr, tpr, label = labels[i], c=cm[i + 1]) fpr, tpr, _ = roc_curve(ytest, p_ensemble) plt.plot(fpr, tpr, label=ens_label, c=cm[0]) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.legend(frameon=False) plt.show() plot_roc_curve(ytest, store.values, store.mean(axis=1), list(store.columns), "ensemble")采用平均法进行模型的集成会带来一些问题,有一些模型比其他的表现的性能更差,但是影响却一样大,这对于集成后整体的性能影比较大。由于只需要预测对共和党的捐款情况,所以可以用这个来做参考标准,下面来简单的检查各个模型性能的表现情况。performance_diff = store.apply(lambda per: 1 * (per > 0.5).value_counts(normalize=True)) performance_diff.index = ["DEM", "REP"] performance_diff.loc["REP",:].sort_values().plot(kind="bar") plt.axhline(0.25, color="k", linewidth=0.5) plt.text(0., 0.23, "True share republic") plt.show()去除比较差的模型,比如MLP-nn或者Boostingcurrent = [c for c in store.columns if c not in ["boosting"]] print("Average roc score is %.3f" % roc_auc_score(ytest, store.loc[:, current].mean(axis=1)))Average roc score is 0.880Exercícios--- **Exercício 1**: O que são strings ? Strings são imutáveis ? Demonstre!# solução # Strings são um conjunto de caracteres que estão dentro de aspas simples ('') ou aspas duplas("") # Strings são imutáveis, isto é, não não podemos alterar o valor dentro do índice da string! nome = 'Juca' nome[0] nome[0] = 'L'**Exercício 2**: Como funciona o operador `in` ?# o operado in faz a seguinte pergunta a string: está dentro ? se o valor perguntado estiver dentro da string, ele retorna True # se não ele retorna False nome = 'Juca' print(nome) print('j' in nome) print('a' in nome)True![title](https://i.ibb.co/f2W87Fg/logo2020.png)--- Task 17 - You Only Look Once Object DetectionYou only look once (YOLO) is a state-of-the-art, real-time object detection system. The latest improvement of YOLO achieve mAP of 57.9% on COCO test-dev with an ability of processing images at 30 FPS.Currently there are several version of improvements, from YOLO, YOLO9000, YOLOv2, and the latest is YOLOv3In this exercise, we will build a simplified version of YOLO detection system and train it using **makeshift** detection dataset Write down your Name and Student ID## --- start your code here ---- NIM = 1301170073 Nama = '' ## --- end your code here ----------[Part 0] Import Libraries --- 1 - Import Moduleimport the necessary modulesimport numpy as np import tensorflow as tf from PIL import Image, ImageDraw, ImageFont from tensorflow.keras.layers import * from tensorflow.keras.models import Sequential, Model from tensorflow.keras.utils import to_categorical, plot_model from tensorflow.keras import backend as K import matplotlib.pyplot as plt %matplotlib inline np.set_printoptions(precision=2,suppress=True)download font type needed to write to the imageyou can change this to another font type of your liking!wget -O 'font.ttf' 'http://webpagepublicity.com/free-fonts/x/Xerox%20Sans%20Serif%20Narrow.ttf' -q--- 2 - Turn off Eager ExecutionSince this simplified version is using old Keras backend, when using TensorFlow 2.0 we need to turn off the Eager executionto do that run the following celltf.compat.v1.disable_eager_execution()------[Part 1] Detection DatasetNow the Detection Dataset is usually quite expensive to build, and the already existing datasets are usually quite large in scale. Which is too much for our simplified exerciseSo let's generate a simplified dataset which is only images of string labels simulating the obects much like the image belowThis way we can generate a detection dataset with much cheaper process --- 1 - Define Class and ColorNow let's define the class and colorFirst, define each object label in `cl_label` list. You can change the label or even add more class if you want.**But the last labes should always be `'background'`**Then define what color you want the label to be drawn on the image. You can very much use black and white images, but the network will train easier if the color vary for each classAlso define the background image color outside the `cl_color` list --- **EXERCISE:** change or add the labels and their corresponding colorcl_label = ['car', 'person', 'background'] cl_color = ['darkred', 'orangered', 'cyanred'] cl_hot = to_categorical(np.arange(len(cl_label))) bg_color = 'LightGray' classes = list(zip(cl_label, cl_hot, cl_color)) nb_class = len(classes)let's see the class listprint('number of class:',nb_class) for c in classes: print(c)number of class: 3 ('car', array([1., 0., 0.], dtype=float32), 'darkred') ('person', array([0., 1., 0.], dtype=float32), 'orangered') ('background', array([0., 0., 1.], dtype=float32), 'cyanred')**EXPECTED OUTPUT**:if you're using default $3$ classesnumber of class: 3('bike', array([1., 0., 0.], dtype=float32), 'darkred')('person', array([0., 1., 0.], dtype=float32), 'orangered')('background', array([0., 0., 1.], dtype=float32), '') --- 2 - Define Image and Grid SizeIn the actual YOLO Detection system, it divides the input image into an $S\times S$ grid.Each grid cell predicts only one object and it incorporates a fixed number of boundary boxes called **anchors** or **priors**. The number anchors are defined by the shape of the bounding boxes. This tries to incorporate that in the real-life domain, the boundary boxes are not arbitrary. For example, cars and pedestrians have different box orientation (aspect ratio). Instead of predicting 5 arbitrary boundary boxes, we predict offsets to each of the constrained anchor boxes ---The more cells the image is divided to increase the detection accuracy as it will be more detailed. But too much cells and the detection will slow down.For this exercise, we will use only one anchor.The implementation in this exercise actually support multiple anchors, but we're not going to use it.You can, however, change the image size and grid number. For example changing it to $96\times 96$ with $3\times 3$ grid --- **EXERCISE:** Define the image size and grid sizeimg_size = (64, 64) grid_size = (2, 2)for this exercise, we only use 1 anchornb_anchor = 1Now calculate the cell size in each gridimg_w, img_h = img_size grid_w, grid_h = grid_size cell_w = img_w//grid_w cell_h = img_h//grid_h print('image w/h :', img_w, img_h) print('grid w/h :', grid_w, grid_h) print('cell w/h :', cell_w, cell_h)image w/h : 64 64 grid w/h : 2 2 cell w/h : 32 32**EXPECTED OUTPUT**:if you're using default $64\times 64$ image with $2\times 2$ gridimage w/h : 64 64grid w/h : 2 2cell w/h : 32 32 --- 3 - Detection TargetThis is how we will generate our dataset.**So read this carefully*** First we start off with a **blank image*** For each grid cell, we randomly add either **ONE** object or **BACKGROUND** * Adding background means that there is no object to draw in that grid * If it is an object, randomly draw the object in that grid range * Generate the target list* Each grid cell returns a list as the **cell target*** All lists of cell targets of an image are then combined into the **image target** listNow for the explanation of each list --- a. Cell TargetEach cell has a list of number as target consisting three parts: **class score**, **bounding box**, and **object confidence**grid target list* The **class score** is one-hot label score very much like classification target, indicates what object classified in that cell location. * The **bounding box** consists of $4$ values indicates the starting position ($x, y$ coordinates) followed by its box **width** and **height** ($w, h$) * The $x$ and $y$ is a scalar ranged from $0$ to number of grid $S$ * The $w$ and $h$ is a scalar ranged from $0$ to $1$ scaling the **image width** and **image height*** The **object confidence** score is a value range from $0$ to $1$ indicates the network confidance of the appearance of an object in that locationThe length of cell target is  $C+4+1$, where  $C$  is the number of object to be detected. Usually background is not included in label, but for our exercise we did. Thus for this exercise, with $3$ classes, the length of target should be  $3+4+1=8$ --- b. Multiple AnchorsAs mentioned before, we can use multiple anchor box detection. This is useful if we want to detect multiple objects in one cell, or if we want to specialize different box shape for different class.anchor boxFor multiple Anchors, the target list will be multiplied as many achors usedSo the length will be  $B*(C+4+1)$, where  $C$  is the number of class and  $B$  is the number of anchorgrid target list (version A)Another variation in multiple anchor box (**like the one implemented in this exercise**) is that although it uses multiple anchor, each grid only perform **single detection**.Thus the  $C$  number of class only defined once at the beginning. So the length will only be  $C + B*(4+1)$grid target list (version B) --- c. Image TargetEach grid cell has a list $B*(C+4+1)$. Now for an input image of $S\times S$ grid cell, we stack all those lists into a giant image target listSo the target length of an image is $S*S*\big(B*(C+4+1)\big)$Or if we use the other alternative, like the image below, we will get list with the length of$S*S*\big(C+B*(4+1)\big)$ --- 4 - Add Background FunctionIf in a grid, indicates by `row` and `col` position, we want to add a background target (no object), then all we have to do is generate a target in the center of that grid celladd background at $(1,0)$For example, if we have a background in grid $( 1, 0)$ from a $2\times 2$  grid as depicted above, then* the target $x$ will be at $1.5$($\text{col}+\frac{1}{2}$ )* the target $y$ will be at $0.5$ ($\text{row}+\frac{1}{2}$ )* the width $w$ and height $h$ will be $0.5$ and $0.5$ (half the image, since it's $2\times 2$ grid)* lastly, the object confidence is $0$ --- **EXERCISE:** Implement the function as defined abovedef add_background(y_hot, col, row): # convert y_hot vector # from array float into a list of integers y_hot = list(y_hot.astype('int')) # calculate bounding box x = col + 0.5 # col number + 1/2 y = row + 0.5 # row number + 1/2 w = 1 / grid_w # 1 / grid width h = 1 / grid_h # 1 / grid height p = 0 # probability is 0 # combine all into a single list target = [] target += y_hot target += [x, y, w, h] target += [0] return targetCheck your implementation(l, y, c) = classes[-1] print('background at (0,0):') t = add_background(y, 0, 0) print('tarhet :', t) print('len(t) :', len(t), '\n') print('background at (1,0):') t = add_background(y, 1, 0) print('tarhet :', t) print('len(t) :', len(t), '\n')background at (0,0): tarhet : [0, 0, 1, 0.5, 0.5, 0.5, 0.5, 0] len(t) : 8 background at (1,0): tarhet : [0, 0, 1, 1.5, 0.5, 0.5, 0.5, 0] len(t) : 8**EXPECTED OUTPUT**:if you're using default $3$ classes background at (0,0): tarhet : [0, 0, 1, 0.5, 0.5, 0.5, 0.5, 0] len(t) : 8 background at (1,0): tarhet : [0, 0, 1, 1.5, 0.5, 0.5, 0.5, 0] len(t) : 8 --- 5 - Add Object FunctionThen if we want to add an object sized   $[w_\text{obj}\times h_\text{obj}]$  to the image at **CENTER** coordinate of $(x, y)$, we need to draw the bounding box from half $w_\text{obj}$ up and half $h_\text{obj}$ left to the coordinateFor example as shown in the picture, if we were to add * an object sized $[24\times 10]$ * to an image sized $[64\times 64]$ with $[2\times 2]$ grid frame* at a **CENTER** coordinate of $x=20$ and $y=40$ Then the object **STARTING** coordinate to draw is* $x_\text{obj} = 20-\frac{24}{2} = 8$* $y_\text{obj} = 40-\frac{10}{2} = 35$Now to get the target, we need to scale it accordingly, thus we get* $x = 20/32 = 0.625$* $y = 40/32 = 1.25$* $w = 24/64 = 0.375$* $h = 10/64 = 0.156$Lastly, the object confidence is $1$ --- **EXERCISE:** Implement the function as defined abovedef add_object(img_draw, obj_idx, x, y): # retrieve the object information (label, y_hot, color) = classes[obj_idx] # convert y_hot vector # from array float into a list of integers # see implementation above y_hot = list(y_hot.astype('int')) # set object width and height obj_w = len(label)*6 obj_h = 10 # calculate object coordinates obj_x = x - (obj_w / 2) # x - object width / 2 obj_y = y - (obj_h / 2) # y - object height / 2 # draw the object (label) to image img_draw.text((obj_x, obj_y), label, fill=color) # calculate bounding box x = x / cell_w # x / cell width y = y / cell_h # y / cell height w = obj_w / img_w # object width / image width h = obj_h / img_h # object height / image height p = 1 # probability is 1 # combine all into a single list target = [] target += y_hot target += [x, y, w, h] target += [1] return targetCheck your implementationtest_img = Image.new('RGB', img_size, color=bg_color) draw = ImageDraw.Draw(test_img) x = 20 y = 40 target = add_object(draw, 0, x, y) plt.imshow(test_img) plt.axis('off') plt.show() print('target:',target)**EXPECTED OUTPUT**:if you're using default $3$ classes with $64\times 64$ image with $2\times 2$ gridtarget: [1, 0, 0, 0.625, 1.25, 0.375, 0.15625, 1] --- 6 - Generate Data FunctionNow we define our generate data function which repeatedly randomize an object and put it into each grid of an image.If the randomized object is `background`, call `add_background()` function, andif it is an object, randomize the center location, and call `add_object()` function --- **EXERCISE:** Implement the function as defined abovedef generate_data(nb_data): # create empty list for data and label x_train = [] y_train = [] for j in range(0, nb_data): # generate empty image base base_img = Image.new('RGB', img_size, color=bg_color) img_draw = ImageDraw.Draw(base_img) # create empty list for targets img_target = [] # loop over all grid (row and col) for row in range(grid_w): for col in range(grid_h): # randomize object id # call .randint() function from numpy # with random range (0 , nb_class) idx = np.random.randint(0, nb_class) # get class information (label, y_hot, color) = classes[idx] # check if it's a background if (label=='background'): # call add_background() funtion with input y_hot, col, and row new_target = add_background(y_hot, col, row) else: # randomize coordinate center location # call .randint() function from numpy # with random range (col*cell_w , (col+1)*cell_w) x = np.random.randint(col*cell_w, (col+1)*cell_w) # call .randint() function from numpy # with random range (row*cell_h , (row+1)*cell_h) y = np.random.randint(row*cell_h , (row+1)*cell_h) # call add_object() function with input img_draw, idx, x, and y new_target = add_object(img_draw, idx, x, y) # append new_target into img_target list img_target.append(new_target) # convert base_img into a numpy array np_img = np.asarray(base_img) # append np_img into x_train list x_train.append(np_img) # append img_target into y_train list y_train.append(img_target) # convert into numpy array x_train = np.array(x_train).astype('float32')/255 y_train = np.array(y_train).astype('float32') return x_train, y_train--- 7 - Generate DataAnd that's itNow let's generate $5000$ training data and another $1000$ as validation datax_train, y_train = generate_data(5000) x_val, y_val = generate_data(1000) print('x_train shape:', x_train.shape) print('y_train shape:', y_train.shape)x_train shape: (5000, 64, 64, 3) y_train shape: (5000, 4, 8)**EXPECTED OUTPUT**:if you're using default $3$ classes $64\times 64$ image with $2\times 2$ gridx_train shape: (5000, 64, 64, 3)y_train shape: (5000, 4, 8) Let's view the first image from training set and its targetplt.imshow(x_train[0]) plt.axis('off') plt.show() for tgt in y_train[0]: print(tgt)Now view 10 images from training setYou should see various images with random number of objects at random locationsplt.figure(figsize=(10,6)) for i in range(10): plt.subplot(2,5,i+1) plt.imshow(x_train[i]) plt.axis('off') plt.tight_layout() plt.show()------[Part 2] Visualizing Detection BoxNow let's create a function to draw the bounding box to an image.**REMEMBER** that we're using target list design where each grid consist of a list with length $C + B*(4+1)$ --- 1 - Box Grid FunctionGiven an image, the target `y_grid`, along with `col` and `row` number for current grid location, we draw the bounding box as follow:* First, extract the class from `y_grid` and get the class index* Then reshape the rest of `y_grid` into a matrix of `(nb_anchor, 5)`* Then loop over each anchor box * Extract the $x, y, w, h, $ and $p$ from the anchor * The $x$ and $y$ from prediction is in range $0$ to $1$ as the result from the grid input, and not the image. So to draw the prediction box, we need to add $x$ and $y$ with `row` and `col` number * Now to perform a simplified **Non-max Suppression**, check if confidence $p$ is higher than `max_p` from other anchor * If the current confidence $p$ is higher, calculate and overwrite the bounding box coordinates * Now, it's a little bit different when we draw the rectangle as it uses $[x1,y1,x2,y2]$ rather than using **width** and **height** * Set color to draw the box. For example, target box is **gray**, high confidence prediction is **green**, and low confidence prediction is **blue** * If the class is not background, draw the box --- **EXERCISE:** Implement the function as defined abovedef draw_box_grid(draw, y_grid, row=0, col=0, pad=0, is_prediction=False, threshold = 0.1): # initialize max probability = 0 max_p = 0 # get class from y_grid[0:nb_class] y_class = y_grid[0:nb_class] # get class id, use np.argmax() with input y_class y_pred = np.argmax(y_class) # reshape the rest of anchor list into (nb_anchor, 5) anchor_box = y_grid[nb_class:].reshape(nb_anchor, 5) # loop over anchor box for b in anchor_box: # retrieve x, y, w, h and p # from b[0] to b[4] x = b[0] y = b[1] w = b[2] h = b[3] p = b[4] # add row number to x x += row # add col number to y y += col # if current probability is higher than max if p > max_p: max_p = p rx1 = (x * cell_w) - (w*img_w / 2) + pad # x*cell width - w*image width/2 + pad ry1 = (y * cell_h) - (h*img_h / 2) + pad # y*cell height - h*image height/2 + pad rx2 = w*img_w + rx1 # w*image width + rx1 ry2 = h*img_h + ry1 # h*image height + ry1 # is object if probability is higher than threshold is_object = p > threshold # set color box if is_prediction: if is_object: box_color = 'LimeGreen' else: box_color = 'RoyalBlue' else: box_color = 'DarkGray' # if class is not background, draw the box if y_pred!=nb_class-1: draw.rectangle([rx1, ry1, rx2, ry2], outline=box_color) return y_pred, is_objectCheck your implementationtest_img = Image.new('RGB', img_size, color=bg_color) draw = ImageDraw.Draw(test_img) x = 20 y = 40 target = add_object(draw, 0, x, y) plt.subplot(121) plt.imshow(test_img) plt.axis('off') print('original target',target) target = np.array(target) y_pred, is_object = draw_box_grid(draw, target, row=0, col=0, is_prediction=False) plt.subplot(122) plt.imshow(test_img) plt.axis('off') plt.show() print('predicted:',y_pred, ', is object?',is_object)original target [1, 0, 0, 0.625, 1.25, 0.28125, 0.15625, 1]**EXPECTED OUTPUT**:if you're using default $3$ classes $64\times 64$ image with $2\times 2$ gridoriginal target [1, 0, 0, 0.625, 1.25, 0.375, 0.15625, 1]predicted: 0 , is object? True --- 2 - Draw Bounding Box FunctionNow we define function to iterate over grid and draw the bounding box in it.Here's step by step of this function* First we pad the image with zero pixels as a place to write the prediction class* Loop over the grid * Draw the target box * If prediction is provided, draw the prediction box * write the predicted class on top and bottom of the image note that the code only designed for $2\times 2$ grid * set the text color. For example **green** if it's the correct class, **pink** if it's wrong or having low confidence, and **black** if prediction is not provided --- **EXERCISE:** Implement the function as defined abovedef draw_bbox(np_img, target, prediction=None, pad=10, threshold=0.1): # rescale np_img by multiplying it with 255 img = np_img * 255 # add white padding as a place to write class prediction img = np.pad(img, ((pad,pad),(pad,pad), (0,0)), 'constant', constant_values=255) # generate image img = Image.fromarray(img.astype('uint8'), 'RGB') img_draw = ImageDraw.Draw(img) font = ImageFont.truetype(r'font.ttf', 10) for row in range(grid_w): for col in range(grid_h): # get current target for this grid target_grid = target[col*grid_h+row] # call draw_box_grid() function with input img_draw, target_grid, and pad=pad y_act,_ = draw_box_grid(img_draw, target_grid, pad=pad) if prediction is not None: # get current prediction for this grid prediction_grid = prediction[col*grid_h+row] # call draw_box_grid() function with input # img_draw, prediction_grid, row, col, # pad=pad, is_prediction=True, and threshold=threshold y_pred, conf = draw_box_grid(img_draw, prediction_grid, row, col, pad=pad, is_prediction=True, threshold=threshold) # set color for predicted class writing if y_pred == y_act and conf: # correct detection fill = 'DarkGreen' s = str(y_act)+'-'+str(y_pred) else: # wrong detection or low confidence fill = 'DeepPink' s = str(y_act)+'-'+str(y_pred)+' (?)' else: # drawing target s = str(y_act) fill = 'black' # draw text class target/prediction img_draw.text((row+row*cell_w+pad, col+img_h*col+1+col*8), s, fill=fill, font=font) # convert img into a numpy array img = np.array(img) return imgCheck your implementationYou should see that the "objects" are correctly localized with its bounding boxes.plt.figure(figsize=(15,8)) for i in range(10): img = draw_bbox(x_train[i], y_train[i]) plt.subplot(2,5,i+1) plt.imshow(img) plt.axis('off') plt.tight_layout() plt.show()------[Part 3] YOLO DetectionAnd we're arrive at the main function, the YOLO Detection itselfFirst we define our classification model. The original YOLO network consists of $24$ convolutional layers alternating between $3\times 3$ convolution and $1\times 1$ bottleneck layers to reduce the features space from preceding layers, followed by $2$ FC layers from a $448\times 448$ image input. But for our simple exercise, we won't need that big network.So let's define our own --- 1 - Define Input ShapeUse Functional API to build the model. --- **EXERCISE:** Define input and output shape# input shape is a tuple of image height, image width, and 3 channel input_shape = (img_h, img_w, 3) # total grid is grid width * grid height total_grid = grid_w * grid_h # anchor length is number class + number anchor * 5 anchor_length = nb_class + nb_anchor*5Check your implementationprint('input shape :', input_shape) print('total grid :', total_grid) print('anchor length :', anchor_length)input shape : (64, 64, 3) total grid : 4 anchor length : 8**EXPECTED OUTPUT**:if you're using default $3$ classes $64\times 64$ image with $2\times 2$ gridinput shape : (64, 64, 3)total grid : 4anchor length : 8 --- 2 - Define Classification ModelUse Functional API to build the model. --- **EXERCISE:** Build model implementing the architecture as follow * Input layer of (img_h,img_w,3) * conv layer with 16 filters of 1x1 without activation * conv layer with 32 filters of 3x3 without activation * LeakyReLU activation with alpha 0.01 * MaxPool2D layer * conv layer with 16 filters of 3x3 without activation * conv layer with 32 filters of 3x3 without activation * LeakyReLU activation with alpha 0.01 * MaxPool2D layer * Flatten layer * Fully connected layer with 256 neurons and activation sigmoid * Fully connected layer with output_shape neurons and activation sigmoid * Reshape layer to reshape output activation to (grid, anchors) shape# define Input() layer with shape=input_shape input_tensor = Input(shape=input_shape) # add conv2d to input_tensor with 16 filters, kernel size 1 x = Conv2D(16, kernel_size=1)(input_tensor) # add conv2d to x with 32 filters, kernel size 3 x = Conv2D(32, kernel_size=3)(x) # add leakyrelu to x alpha=0.3 x = LeakyReLU()(x) # add maxpool2d layer to x x = MaxPool2D()(x) # add conv2d to x with 16 filters, kernel size 3 x = Conv2D(16, kernel_size=3)(x) # add conv2d to x with 32 filters, kernel size 3 x = Conv2D(32, kernel_size=3)(x) # add leakyrelu to x alpha=0.3 x = LeakyReLU()(x) # add maxpool2d layer to x x = MaxPool2D()(x) # add flatten layer to x x = Flatten()(x) # add dense layer to x with 256 neurons and sigmoid activation x = Dense(256, activation='sigmoid')(x) # add dense layer to x with total_grid * anchor_length neurons and sigmoid activation x = Dense(total_grid*anchor_length, activation='sigmoid')(x) # add reshape layer to x with input (total_grid, anchor_length) output_tensor = Reshape((total_grid, anchor_length))(x) # initialize model by calling Model() function with input input_tensor and output_tensor model = Model(input_tensor, output_tensor)Now to check your implementationprint(model.summary())Model: "functional_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 64, 64, 3)] 0 _________________________________________________________________ conv2d (Conv2D) (None, 64, 64, 16) 64 _________________________________________________________________ conv2d_1 (Conv2D) (None, 62, 62, 32) 4640 _________________________________________________________________ leaky_re_lu (LeakyReLU) (None, 62, 62, 32) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 31, 31, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 29, 29, 16) 4624 ______________________________________________________[...]**EXPECTED OUTPUT**:if you're using default $3$ classes $64\times 64$ image with $2\times 2$ gridinput_? (InputLayer) [(None, 64, 64, 3)] 0 conv2d (Conv2D) (None, 64, 64, 16) 64 ...dense_? (Dense) (None, 32) 8224 reshape (Reshape) (None, 4, 8) 0 Total params: 1,406,896Trainable params: 1,406,896Non-trainable params: 0_________________________________________________________________ Visualize the Network architectureplot_model(model, show_shapes=True, show_layer_names=False, rankdir='LR', expand_nested=False, dpi=60 )--- 3 - Define LossYOLO uses sum-squared error between the predictions and the ground truth to calculate loss. The loss function composes of:* the **classification loss.*** the **localization loss** (errors between the predicted boundary box and the ground truth).* the **confidence loss** (the objectness of the box). --- a. Classification LossIf an object is detected, the classification loss at each cell is the squared error of the class conditional probabilities for each class:$$ \sum_{i=0}^{S^2}{1}_i^\text{obj} \sum_{c \in \text{classes}}\big(p_i(c)-\hat{p_i}(c)\big)^2$$ --- b. Localization LossThe localization loss measures the errors in the predicted boundary box locations and sizes. We only count the box responsible for detecting the object.$$ x\lambda_\text{coord}\sum_{i=0}^{S^2}\sum_{j=0}^{B}{1}_{ij}^\text{obj} \big[(x_i-\hat{x}_i)^2+(y_i-\hat{y}_i)^2\big]\\+\lambda_\text{coord}\sum_{i=0}^{S^2}\sum_{j=0}^{B}\rm{1}_{ij}^\text{obj} \big[(\sqrt{w_i}-\sqrt{\hat{w}_i})^2+(\sqrt{h_i}-\sqrt{\hat{h}_i})^2\big]$$ --- c. Confidence LossIf an object is detected in the box, the confidence loss (measuring the objectness of the box) is:$$\sum_{i=0}^{S^2}\sum_{j=0}^{B}{1}_{ij}^\text{obj}(C_i-\hat{C}_i)^2$$If an object is **NOT** detected in the box, the confidence loss is:$$\lambda_\text{noobj}\sum_{i=0}^{S^2}\sum_{j=0}^{B}{1}_{ij}^\text{noobj}(C_i-\hat{C}_i)^2$$ --- d. Complete ImplementationNow don't be disheartened, we already prepared the loss implementation for you. So read the code carefully.def custom_loss(y_true, y_pred): grid = np.array([ [[float(x),float(y)]]*nb_anchor for y in range(grid_h) for x in range(grid_w)]) # get the actual and predicted class y_true_class = y_true[...,0:nb_class-1] y_pred_class = y_pred[...,0:nb_class-1] # get the actual and predicted box pred_boxes = K.reshape(y_pred[...,nb_class:], (-1,grid_w*grid_h,nb_anchor,5)) true_boxes = K.reshape(y_true[...,nb_class:], (-1,grid_w*grid_h,nb_anchor,5)) # get predicted coordinates and confidence y_pred_xy = pred_boxes[...,0:2] + K.variable(grid) y_pred_wh = pred_boxes[...,2:4] y_pred_conf = pred_boxes[...,4] # get actual coordinates and confidence y_true_xy = true_boxes[...,0:2] y_true_wh = true_boxes[...,2:4] y_true_conf = true_boxes[...,4] # calculate classification loss clss_loss = K.sum(K.square(y_true_class - y_pred_class), axis=-1) # calculate localization loss xy_loss = K.sum(K.sum(K.square(y_true_xy - y_pred_xy),axis=-1)*y_true_conf, axis=-1) wh_loss = K.sum(K.sum(K.square(K.sqrt(y_true_wh) - K.sqrt(y_pred_wh)), axis=-1)*y_true_conf, axis=-1) # non-max suppression intersect_wh = K.maximum(K.zeros_like(y_pred_wh), (y_pred_wh + y_true_wh)/2 - K.abs(y_pred_xy - y_true_xy) ) intersect_area = intersect_wh[...,0] * intersect_wh[...,1] true_area = y_true_wh[...,0] * y_true_wh[...,1] pred_area = y_pred_wh[...,0] * y_pred_wh[...,1] # calculate intersection over union union_area = pred_area + true_area - intersect_area iou = intersect_area / union_area # calculate confidence loss conf_loss = K.sum(K.square(y_true_conf*iou - y_pred_conf)*y_true_conf, axis=-1) # sum all losses d = clss_loss + xy_loss + wh_loss + conf_loss return d--- 4 - Compile ModelNow compile model by adding the custom loss and Adam optimizer --- **EXERCISE:** compile model using custom_loss and Adam optimizer with lr=0.001from tensorflow.keras.optimizers import Adam # initialize Adam optimizer with lr=0.001 myAdam = Adam(learning_rate=0.001) # compile model with loss=custom_loss and optimizer=myAdam model.compile(optimizer=myAdam, loss=custom_loss)--- 5 - Train ModelTrain the model for 80 epochshist = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=512, epochs=80)Train on 5000 samples, validate on 1000 samples Epoch 1/80 5000/5000 [==============================] - ETA: 0s - loss: 0.6297WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training_v1.py:2048: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version. Instructions for updating: This property should not be used in TensorFlow 2.0, as updates are applied automatically. 5000/5000 [==============================] - 1s 280us/sample - loss: 0.6297 - val_loss: 0.5482 Epoch 2/80 5000/5000 [==============================] - 1s 164us/sample - loss: 0.4931 - val_loss: 0.4304 Epoch 3/80 5000/5000 [==============================] - 1s 162us/sample - loss: 0.3954 - val_loss: 0.3611 Epoch 4/80 5000/5000 [==============================] - 1s 163us/sample - loss: 0.3432 - val_loss: 0.3217 Epoch 5/80 5000/5000 [==============================] - 1s 164us/sample - loss: 0.2999 - val_loss: 0.2810 E[...]**EXPECTED OUTPUT**:your loss should start around 0.6 and end around 0.01 after 80 epochs Visualize training historyplt.rcParams['figure.figsize'] = [8, 4] plt.subplots_adjust(wspace=0.2) plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Val']) plt.show()--- 6 - Test DetectionLet's show the detection results --- a. Using Data TrainFirst, let's see the performance on training datay_pred = model.predict(x_train) plt.figure(figsize=(15,8)) for i in range(10): img = draw_bbox(x_train[i], y_train[i], y_pred[i], threshold=0.1) plt.subplot(2,5,i+1) plt.imshow(img) plt.axis('off') plt.tight_layout() plt.show()You should see that most of the objects are detected with quite precisionthe **pink** label and **blue** detection box means low confidence or wrong classificationyou can train it longer to get more precision on the detection boxyou can also lower the threshold when drawing the detection box --- b. Using Data ValidationNow view the detection results on validation datay_pred_val = model.predict(x_val) plt.figure(figsize=(15,8)) for i in range(10): img = draw_bbox(x_val[i], y_val[i], y_pred_val[i], threshold=0.1) plt.subplot(2,5,i+1) plt.imshow(img) plt.axis('off') plt.tight_layout() plt.show()--- c. Using Data TestLastly, let's generate another 20 new data, and test it# generate 20 new data x_test, y_test = generate_data(20) # predict new data y_pred_test = model.predict(x_test) plt.figure(figsize=(15,12)) for i in range(20): img = draw_bbox(x_test[i], y_test[i], y_pred_test[i], threshold=0.1) plt.subplot(4,5,i+1) plt.imshow(img) plt.axis('off') plt.tight_layout() plt.show()Robust Principal Component Analysis Classifying faces.import numpy as np import matplotlib.pyplot as plt import pandas as pd from mpl_toolkits.mplot3d import Axes3D from matplotlib.image import imread import os import scipy.io ## set plotting paramenters as default for the rest of the notebook plt.rcParams["figure.figsize"] = [10,4] plt.rc('font', family='serif') plt.rc('xtick', labelsize=13) plt.rc('ytick', labelsize=13) plt.rcParams.update({'legend.fontsize': 11}) plt.rcParams.update({'axes.labelsize': 15}) plt.rcParams.update({'font.size': 15}) # play with O(n^2) and O(n*log(n)) # Quick refresher that the DFT and FFT scale with O(n^2) and O(n*log(n)), respectively nf = np.linspace(1,100) plt.plot(nf, nf**2, label=r"$O(n^2)$") plt.plot(nf, nf*np.log(nf), label=r"$O(n \log{n})$") plt.xlabel("number of computations") plt.title("time to compute") plt.legend()Understand $O(n^2)$ vs $O(n \log{n})$ time complexity Eigenfaces Import the **.mat faces dataset, then span an eigenface space and use it to classify poeple and also use it to represent another pictures, e.g. al botoncito.** Find the PCA using:\begin{align*}{\bf B} &= {\bf X - \bar{X}} \\\rightarrow {\bf B} &= {\bf U\Sigma V^*} \end{align*}mat_contents = scipy.io.loadmat(os.path.join('/', "home", "igodlab", "Documents", "DataDriven", "DATA", 'allFaces.mat')) ## loads the **.mat file as a Python dictionary faces = mat_contents['faces'] ## images of faces (each of them is flattened) m = int(mat_contents['m']) ## actual shape of each image n = int(mat_contents['n']) ## actual shape of each image ntot = int(mat_contents["person"]) ## total #of people = 38 nfaces = mat_contents["nfaces"][0] ## #of pictures for the same person, total=38 people print("'faces' matrix contains pictures as the columns. Every person has 59 to 64 different \ pictures so the total number of columns is the sum of 'nfaces' vector") faces.shape ## example plot one of the faces nper = 34 ## #of person npic = 44 ith = sum(nfaces[:nper-1])+(npic-1) ## 44-th picture of person: nper=34 ith_face = np.reshape(faces[:,ith], (m,n)).T ## reshape and transpose to get the rigth format plt.imshow(ith_face) plt.axis("off") plt.set_cmap("gray") plt.show() ## compute the eigenface space nper_train = int(0.95*len(nfaces)) ntrain = sum(nfaces[:nper_train]) Xtrain = faces[:, :ntrain] ## training set avg_face = np.tile(np.mean(Xtrain, axis=1), (np.shape(Xtrain)[1], 1)).T B = Xtrain - avg_face U, S, VT = np.linalg.svd(B, full_matrices=False) ## plot the average face and the first 7 modes fig, axes = plt.subplots(2,4,figsize=(15,8)) for i in range(4): if i == 0: axes[0,0].imshow(np.reshape(avg_face[:,0], (m,n)).T) axes[0,0].set_title("Average face") axes[0,0].axis("off") else: axes[0,i].imshow(np.reshape(U[:,i], (m,n)).T) axes[0,i].set_title(r"$u_{:.0g}$".format(i)) axes[0,i].axis("off") axes[1,i].imshow(np.reshape(U[:,i+4], (m,n)).T) axes[1,i].set_title(r"$u_{:.0g}$".format(i+4)) axes[1,i].axis("off") ## import this function for case (iii) from github, same authors of the paper referenced from OptHT import optht ### optimal hard thereshold, method 3 #gamma = 1 beta = np.shape(B)[1]/np.shape(B)[0] lmbda = (2*(beta+1)+8*beta/((beta+1)+(beta**2+14*beta+1)**(1/2)))**(1/2) #tau = lmbda*np.sqrt(np.shape(faces)[0])*gamma r_opt = optht(beta, S) tau = 1264.0306430252317 ## define the cutoff value r = len(S)-1 ## use total number -1 because is extremly small ## plot plt.figure(figsize=(14,4)) plt.subplot(1,2,1) plt.semilogy(S[:r],'.') plt.hlines(tau, 0, r, linestyle="--", color="r") plt.semilogy(S[:r_opt], "r.") plt.xlim(0.0-50, r+50) plt.ylabel(r"$\sigma_r$") plt.xlabel(r"$r$") plt.subplot(1,2,2) plt.plot(np.cumsum(S[:r])/sum(S[:r]), ".") plt.plot(np.cumsum(S[:r_opt])/sum(S[:r]), "r.") plt.vlines(r_opt, 0, sum(S[:r_opt])/sum(S[:r]), linestyle="--", color="r") plt.hlines(sum(S[:r_opt])/sum(S[:r]), 0.0, r_opt, linestyle="--", color="r") plt.xlim(0.0-50, r+50) plt.ylabel(r"cumsum[$\sigma_r$]") plt.xlabel(r"$r$") ## show noisy eigenface-space U's n_ht = 800 plt.imshow(np.reshape(U[:,n_ht], (m,n)).T) plt.axis("off") plt.show()Example of an eigenface (PCA) past the threshold, in this case number 800## built classifier prototype Xtest = faces[:,ntrain:] ## collection set of faces for the two people of the test set ## plot fig2 = plt.figure() axes = fig2.add_subplot(111, projection='3d') pcax = [3,4, 5] ## 3 PCA axis for j in range(np.shape(Xtest)[1]): x = U[:,pcax[0]].T @ Xtest[:,j] y = U[:,pcax[1]].T @ Xtest[:,j] z = U[:,pcax[2]].T @ Xtest[:,j] if (j >= 0) and (j < nfaces[nper_train]): axes.scatter(x,y,z, marker="s", color="purple", s=40) else: axes.scatter(x,y,z, marker="o", color="b", s=40) axes.view_init(elev=0, azim=0) ## fix the 3D view axes.scatter([], [], [], marker='s',color='purple', label="person 37") axes.scatter([], [], [], marker='o',color='b', label="person 38") axes.set_xlabel("PC"+str(pcax[0]+1)) axes.set_ylabel("PC"+str(pcax[1]+1)) axes.set_zlabel("PC"+str(pcax[2]+1)) axes.legend() U.T.shape⊕ [The world's simplest Python template engine — Makina Corpus](https://makina-corpus.com/blog/metier/2016/the-worlds-simplest-python-template-engine)'Name: {person[name]}, age: {person[age]}'.format(person={'name': 'Eric', 'age': 41}) class Song(object): title = 'Where is my mind' 'My favorite song is: {song.title}'.format(song=Song()) "My name is {name} and I am {age}".format(name="Eric", age=41) "My name is %s and I am %d" % ("Eric", 41) '{:^20}'.format('centered') 'My name is {name.upper}'.format(name='eric') # An interesting feature of format() is the format specification: # instead of just inserting a field with {field}, # we can specify a format like this: {field:spec}. "{:.3}".format(3.14159) import string class SuperFormatter(string.Formatter): def format_field(self, value, spec): if spec == 'call': return value() else: return super(SuperFormatter, self).format_field(value, spec) sf=SuperFormatter() sf.format('My name is {name.upper:call}', name="eric") class SuperFormatter(string.Formatter): """World's simplest Template engine.""" def format_field(self, value, spec): if spec.startswith('repeat'): template = spec.partition(':')[-1] if type(value) is dict: value = value.items() return ''.join([template.format(item=item) for item in value]) elif spec == 'call': return value() elif spec.startswith('if'): return (value and spec.partition(':')[-1]) or '' else: return super(SuperFormatter, self).format_field(value, spec) import unittest class TestSuperFormatterMethods(unittest.TestCase): """tests.""" def test_basic(self): sf = SuperFormatter() self.assertEqual( sf.format('a is {a}', a="A"), "a is A" ) def test_repeat_list(self): sf = SuperFormatter() self.assertEqual( sf.format('''Table of contents: {chapters:repeat:Chapter {{item}} }''', chapters=["I", "II", "III", "IV"]), '''Table of contents: Chapter I Chapter II Chapter III Chapter IV ''' ) def test_repeat_dict(self): sf = SuperFormatter() self.assertEqual( sf.format( '''Books: {books:repeat:"{{item[1]}}" by {{item[0]}} ---- }''', books={ '': '', '': '', }), '''Books: "Notre Dame de Paris" by ---- "" by ---- ''' ) def test_call(self): sf = SuperFormatter() self.assertEqual( sf.format('My name is {name.upper:call}', name="eric"), 'My name is ERIC' ) def test_if_static(self): sf = SuperFormatter() self.assertEqual( sf.format('Action: Back / Logout {manager:if:/ Delete}', manager=True), 'Action: Back / Logout / Delete' ) def test_if_with_fields(self): sf = SuperFormatter() self.assertEqual( sf.format('Action: Back / Logout {manager:if:/ Delete {id}}', manager=True, id=34), 'Action: Back / Logout / Delete 34' ) unittest.main()E ====================================================================== ERROR: /Users/xiaofeiwu/Library/Jupyter/runtime/kernel-e6142983-7aa9-4f76-b06b-3c75e1e290cb (unittest.loader._FailedTest) ---------------------------------------------------------------------- AttributeError: module '__main__' has no attribute '/Users/xiaofeiwu/Library/Jupyter/runtime/kernel-e6142983-7aa9-4f76-b06b-3c75e1e290cb' ---------------------------------------------------------------------- Ran 1 test in 0.001s FAILED (errors=1)Occupation Introduction:Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. Step 1. Import the necessary librariesimport pandas as pdStep 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user). Step 3. Assign it to a variable called users.users = pd.read_table('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user', sep='|', index_col='user_id') users.head()View the qualitative feedback:for i, f in data_df['feedback'].iteritems(): print(f) printRanking was quite challenging, especially given that several statements were highly similar or conceptually related. I think it would be helpful to have some very stupid arguments thrown in so that you can have greater variability in your measurement. Glad to see you taking an interest in social psychology, little sister! ranking is difficult, scoring and/or selecting is easier Ticking the 5 most relevant points to support my argument is the easiest form of feedback. Placing the different points in order is the most difficult and requires the most time as you have to read through each point multiple times in order to make comparisons and form the list in the order you want. Ranking the points on a scale of 1-10 is also relatively easy, but may not give the most accurate results as I often just end up choosing a random number in the region of important (6-10) or unimportant (which in this case I just left as 1 as I had 5 points already). I liked the first metric (separate score[...]function [regret] = set_regret(profile,S,T)% Regret of set S for set Tif isempty(T) regret = 0; returnend[I,J] = find(ismember(profile,S));[~,ind] = unique(I,'first');first_indices_S = J(ind);[I,J] = find(ismember(profile,T));[~,ind] = unique(I,'first');first_indices_T = J(ind);regret = sum((first_indices_T < first_indices_S).*(1./first_indices_T));enddef set_regret(profile, S, T): ''' Minimax Regret Set (naive method) ''' if len(T) == 0: regret = 0; return regret T = set([2,3,7,11]) S = set([1,5]) len(T) == 0 profile = [list(np.random.permutation([1,2,3,4,5,6,7,8,9,10])) for i in range(10)] # first_indices_T = np.array([[i for i,elem in enumerate(x) if x[i] in T][0] + 1 for x in profile]) # first_indices_S = np.array([[i for i,elem in enumerate(x) if x[i] in S][0] + 1 for x in profile]) # regret = np.sum((first_indices_T < first_indices_S)*(1./first_indices_T)); # regret import itertools k = 3 permutations1 = itertools.permutations([1,2,3,4,5,6,7,8,9,10], k) output_regret = np.inf permutations = None for S in permutations1: max_reg = 0 permutation = None permutations2 = itertools.permutations([1,2,3,4,5,6,7,8,9,10], k) for T in permutations2: first_indices_T = np.array([[i for i,elem in enumerate(x) if x[i] in T][0] + 1 for x in profile]) first_indices_S = np.array([[i for i,elem in enumerate(x) if x[i] in S][0] + 1 for x in profile]) regret = np.sum((first_indices_T < first_indices_S)*(1./first_indices_T)); if regret > max_reg: max_reg = regret if max_reg < output_regret: output_regret = max_reg permutations = S output_regret permutations import pickle q1_sort_profile = data_df['question1'][data_df['question1']['qustion_type'] == 1][[1,2,3,4,5,6,7,8,9,10]].values with open('./q1_sort_profile.p','wb') as file_: pickle.dump(q1_sort_profile, file_) q2_sort_profile = data_df['question2'][data_df['question2']['qustion_type'] == 1][[1,2,3,4,5,6,7,8,9,10]].values with open('./q2_sort_profile.p','wb') as file_: pickle.dump(q2_sort_profile, file_) q3_sort_profile = data_df['question3'][data_df['question3']['qustion_type'] == 1][[1,2,3,4,5,6,7,8,9,10]].values with open('./q3_sort_profile.p','wb') as file_: pickle.dump(q3_sort_profile, file_) def rate_to_rank(question_num): rate_profile = data_df['question{}'.format(question_num)][data_df['question{}'.format(question_num)]['qustion_type'] == 3][[1,2,3,4,5,6,7,8,9,10]].values rate_list = [] for i in range(len(rate_profile)): unique, counts = np.unique(rate_profile[i], return_counts=True) # print "before", unique, counts for k in range(len(counts)): if counts[k]>1: cntr = 1 while np.sum(rate_profile[i]==unique[k])>1: indices = np.where(rate_profile[i]==unique[k]) idx = np.random.choice(indices[0]) rate_profile[i][idx] += cntr*.0000001 cntr+=1 ranking = zip(list(rate_profile[i]), range(10)) # print(sorted(ranking)) # print([x[1]+1 for x in reversed(sorted(ranking))]) rate_list.append([x[1]+1 for x in reversed(sorted(ranking))]) rate_list = np.array(rate_list) # print(rate_list) with open('./q{}_rate_profile.p'.format(question_num),'wb') as file_: pickle.dump(rate_list, file_) rate_to_rank(1) rate_to_rank(2) rate_to_rank(3) for k in range(1,4): for typ in ['rate','sort']: with open('./q{}_{}_results.p'.format(k,typ), 'rb') as file_: results = pickle.load(file_) print("Question {}, Type: {}".format(k,typ)) print results for i in range(len(results)): print(question_mapping['question{}'.format(k)]['answers']['{}'.format(results[i])]) print('\n') # print(question_mapping['question1']['answers']['6']) # print(question_mapping['question1']['answers']['9']) # print(question_mapping['question1']['answers']['8']) # print # print(question_mapping['question2']['answers']['3']) # print(question_mapping['question2']['answers']['5']) # print(question_mapping['question2']['answers']['1']) # print # print(question_mapping['question3']['answers']['6']) # print(question_mapping['question3']['answers']['6']) # print(question_mapping['question3']['answers']['10']) def calc_spearman(question_num): rate_profile = data_df['question{}'.format(question_num)][data_df['question{}'.format(question_num)]['qustion_type'] == 3][[1,2,3,4,5,6,7,8,9,10]].values rate_list = [] for i in range(len(rate_profile)): unique, counts = np.unique(rate_profile[i], return_counts=True) # print "before", unique, counts for k in range(len(counts)): if counts[k]>1: cntr = 1 while np.sum(rate_profile[i]==unique[k])>1: indices = np.where(rate_profile[i]==unique[k]) idx = np.random.choice(indices[0]) rate_profile[i][idx] += cntr*.0000001 cntr+=1 ranking = zip(list(rate_profile[i]), range(10)) # print(sorted(ranking)) # print([x[1]+1 for x in reversed(sorted(ranking))]) rate_list.append([x[1]+1 for x in reversed(sorted(ranking))]) rate_list = np.array(rate_list) srs = [] for i in range(len(rate_list)): for j in range(i+1,len(rate_list)): # print spearmanr(rate_list[i],rate_list[j]) srs.append(spearmanr(rate_list[i],rate_list[j])) return srsRating Spearman Corr Coeffsr_list_1 = calc_spearman(1) print(np.mean([sr_list_1[i][0] for i in range(len(sr_list_1))])) sr_list_2 = calc_spearman(2) print(np.mean([sr_list_2[i][0] for i in range(len(sr_list_2))])) sr_list_3 = calc_spearman(3) print(np.mean([sr_list_3[i][0] for i in range(len(sr_list_3))]))0.0020202020202 -0.037037037037 -0.00826446280992Ranking Spearman Corr Coeffdef avg_scc_q(question_num): sort_profile = data_df['question{}'.format(question_num)][data_df['question{}'.format(question_num)]['qustion_type'] == 1][[1,2,3,4,5,6,7,8,9,10]].values srs = [] wc = [] kt = [] mw = [] for i in range(len(sort_profile)): for j in range(i+1,len(sort_profile)): # print spearmanr(rate_list[i],rate_list[j]) srs.append(spearmanr(sort_profile[i],sort_profile[j])) wc.append(wilcoxon(sort_profile[i],sort_profile[j])) kt.append(kendalltau(sort_profile[i],sort_profile[j])) mw.append(mannwhitneyu(sort_profile[i],sort_profile[j])) print(np.mean([srs[i][0] for i in range(len(srs))])) print(np.mean([wc[i][0] for i in range(len(wc))])) print(np.mean([kt[i][0] for i in range(len(kt))])) print(np.mean([mw[i][0] for i in range(len(mw))])) avg_scc_q(1) avg_scc_q(2) avg_scc_q(3)/Users/soph/anaconda/envs/py27/lib/python2.7/site-packages/scipy/stats/morestats.py:2397: UserWarning: Warning: sample size too small for normal approximation. warnings.warn("Warning: sample size too small for normal approximation.")Edge samplingedge_sampling=False if edge_sampling: sampling_rates=[0.1, 0.3, 0.5, 0.7, 0.9] sampler = ParametersZip(EdgeSampler(), {'desired_fraction' : sampling_rates}) result1=GraphPipeline(graphs, measures, sampler=sampler).execute(repetitions=5) if edge_sampling: display(Markdown("# edge sampling")) plot(result1)Node Samplingnode_sampling=False if node_sampling: sampler = ParametersZip(NodeSampler(), {'desired_fraction' : sampling_rates}) result2=GraphPipeline(graphs, measures, sampler=sampler).execute(repetitions=5) if node_sampling: display(Markdown("# node_sampling")) plot(result2)snowball sampling#def start_func(graph): # return np.random.choice(graph.get_vertices(), int(graph.num_vertices()*0.01), replace=False) #sampler = ParametersZip(SnowballSampler(start_func), {'desired_fraction' : sampling_rates, # 'retain_factor' : 1}) #result3=GraphPipeline(graphs, measures, sampler=sampler).execute(repetitions=5) #display(Markdown("# snowball sampling")) #plot(result3)Similarity sampling: Keep top edges by similaritysampler = ParametersZip(SimilarityEdgeSampler2(), {'sim_type':'jaccard', 'retain_factor' : [0.1,0.3,0.5,0.7,0.9,1]}) result4 = GraphPipeline(graphs, measures, sampler=sampler).execute(repetitions=1) display(Markdown("# similarity sampling")) plot(result4, maxk=1000, show_std=False)#[np.logical_and(result4.retain_factor != 0.5, result4.retain_factor != 0.3 )]) def plot2(result, measure="degree", cmap=None, **kwargs): plt.set_cmap("Reds") for name in np.unique(result.name): display(Markdown(f"# {name}")) result_tmp = result[result.name ==name] for measure in ["degree"]:#, "pagerank", "katz"]:#, "katz", "betweenness"]: display(Markdown(f"### {measure}")) fig = plot_top_places(result_tmp, measure=measure, groupby="alpha", cmap=cmap, **kwargs) display(fig) def to_alpha(alpha, values): return values ** alpha samp = AdditionalParamWrapper(SimilarityEdgeSampler(), partial(change_function_params, to_alpha), "alpha") #samp.function sampler_2 = ParametersZip(samp, {'sim_type':'jaccard', 'alpha' : [0, 0.01, 0.1, 0.5, 1.0, 2.0]}) result5 = GraphPipeline(graphs, measures, sampler=sampler_2).execute(repetitions=10) plot2(result5, maxk=1000, show_std=False, cmap=my_cmap) raise NotImplementedError def plot3(result, measure="degree", cmap=None, **kwargs): plt.set_cmap("Reds") for name in np.unique(result.name): display(Markdown(f"# {name}")) result_tmp = result[result.name ==name] for measure in ["degree"]:#, "pagerank", "katz"]:#, "katz", "betweenness"]: display(Markdown(f"### {measure}")) fig = plot_distr(result_tmp, groupby="retain_factor", cmap=cmap, **kwargs) display(fig) plot_distr_outer(result4, normalize=False, show_std=False)Challenge 1.1myinput = '/home/fmuinos/projects/adventofcode/2016/ferran/inputs/input1.txt'Solution 1def update_direction(face, turn): rotate = {'L': [-1,1], 'R': [1,-1]} new_face = (rotate[turn][0]*face[1], rotate[turn][1]*face[0]) return new_face def update_state(pos, face, turn, dist): new_face = update_direction(face, turn) new_pos = (pos[0] + dist * new_face[0], pos[1] + dist * new_face[1]) return new_pos, new_face def parse_list(path): with open(path, 'rt') as f: mystr = f.read() mylist = mystr.rstrip('\n').split(', ') return mylist def solver(path): hints = parse_list(path) pos = (0,0) face = (0,1) for hint in hints: turn = hint[0] dist = int(hint[1:]) pos, face = update_state(pos, face, turn, dist) return abs(pos[0]) + abs(pos[1]) %%time print(solver(myinput))307 CPU times: user 0 ns, sys: 0 ns, total: 0 ns Wall time: 3.54 msSolution 2 (recursive)def follow_hints(hints, pos, face): if len(hints) == 0: return abs(pos[0]) + abs(pos[1]) else: hint = hints[0] turn = hint[0] dist = int(hint[1:]) rotate = {'L': [-1,1], 'R': [1,-1]} new_face = (rotate[turn][0]*face[1], rotate[turn][1]*face[0]) new_pos = (pos[0] + dist * new_face[0], pos[1] + dist * new_face[1]) return follow_hints(hints[1:], new_pos, new_face) %%time hints = parse_list(myinput) print(follow_hints(hints, [0,0], [0,1]))307 CPU times: user 4 ms, sys: 0 ns, total: 4 ms Wall time: 2.67 msChallenge 1.2def first_repeated(path): hints = parse_list(path) pos = (0,0) face = (0,1) visited = set([]) for hint in hints: turn = hint[0] dist = int(hint[1:]) for i in range(dist): pos, new_face = update_state(pos, face, turn, 1) if not pos in visited: visited.add(pos) else: return abs(pos[0]) + abs(pos[1]) face = new_face first_repeated(myinput)**Author:** Importsimport tensorflow as tf print("TensorFlow version:", tf.__version__) import matplotlib.pyplot as plt from sklearn.model_selection import KFold import numpy as npLoading Datamnist = tf.keras.datasets.fashion_mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 print('Train: X=%s, y=%s' % (x_train.shape, y_train.shape)) print('Test: X=%s, y=%s' % (x_test.shape, y_test.shape))Train: X=(60000, 28, 28), y=(60000,) Test: X=(10000, 28, 28), y=(10000,)EDAfor i in range(9): plt.subplot(330 + 1 + i) plt.imshow(x_train[i], cmap=plt.get_cmap('gray'))Define Modelmodel = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(10), tf.keras.layers.Softmax() # is this fine? ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])Cross Validation Loop and Model Trainingdef cross_val(x, y, n_folds=5): scores, histories = list(), list() # prepare cross validation kfold = KFold(n_folds, shuffle=True, random_state=1) counter = 0 for train_ix, test_ix in kfold.split(x): # select rows for train and test x_train, y_train, x_test, y_test = x[train_ix], y[train_ix], x[test_ix], y[test_ix] # fit model history = model.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test), verbose=2) # evaluate model _, acc = model.evaluate(x_test, y_test, verbose=2) print('\n------------------------------Fold #{counter} Validation Acc {acc}------------------------------\n'.format(counter = counter + 1, acc = acc * 100.0)) # append scores scores.append(acc) histories.append(history) counter += 1 return scores, histories scores, histories = cross_val(x_train, y_train) test = model.evaluate(x_test, y_test, verbose=2)313/313 - 1s - loss: 0.3727 - accuracy: 0.8897 - 659ms/epoch - 2ms/stepMetricsfor i in range(len(histories)): # plot loss plt.title('Cross Entropy Loss') plt.plot(histories[i].history['loss'], color='green', label='train') plt.plot(histories[i].history['val_loss'], color='red', label='val') plt.legend(['train', 'val'], loc='upper left') plt.xlabel('epoch') plt.ylabel('loss') plt.show() for i in range(len(histories)): # plot accuracy plt.title('Classification Accuracy') plt.plot(histories[i].history['accuracy'], color='green', label='train') plt.plot(histories[i].history['val_accuracy'], color='red', label='val') plt.legend(['train', 'val'], loc='upper left') plt.xlabel('epoch') plt.ylabel('acc') plt.show()Analyzing Model Predictionsdef plot_img(i, pred, ground_truth, img): pred, ground_truth, img = pred[i], ground_truth[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(pred) if predicted_label == ground_truth: color = 'green' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(pred), class_names[ground_truth]), color=color) def plot_probas(i, pred, ground_truth): pred, ground_truth = pred[i], ground_truth[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plot= plt.bar(range(10), pred, color="black") plt.ylim([0,1]) predicted_label = np.argmax(pred) plot[predicted_label].set_color('red') plot[ground_truth].set_color('blue') predictions = model.predict(x_test) class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] num_rows = 5 num_cols = 5 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) plt.title("Predictions on test set, DNN") for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_img(i, predictions, y_test, x_test) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_probas(i, predictions, y_test)Plotting with Pandas Program so far ***- Python Basics- Python Programming Constructs- Data Structures- Functions- Object Oriented Programming in Python- NumPy- Pandas What are we going to learn today?***- Python Data Visualization Libraries - Matplotlib - Matplotlib basics - Barplot - Histograms - Box Plots - Scatter Plots - Seaborn- Plot Types - Plot Customization Python Data Visualization LibrariesPython is vastly popular among data scientists, one key reason being its strength with data analysis. Data Visualization is a key skill for aspiring data scientists. It helps us to interpret data easily and convey information in an effective way. Why is it important?1. Explore data to get a better grasp of the data.2. Report insights to convey their results to somebody else.*** Image source : https://matplotlib.org/*** Introduction:Arguably, the most popular data visualization library with Python is Matplotlib. It produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shell, Jupyter notebook, web application servers, and four graphical user interface toolkits.Matplotlib makes hard things easy to interpret. You can generate plots, histograms, power spectra, bar charts, errorcharts, scatterplots, etc., with just a few lines of code.It is used along with NumPy and Pandas to provide an environment that is an effective open source alternative for MatLab. It can also be used with graphics toolkits like PyQt and wxPython.Here, you will learn to build various types of plots and to customize them to make them more visually appealing and interpretable.*** --> Matplotlib Basics***At the core of it, we need to remember some important things1. The canvas to "draw" data(stored in the computer's memory)2. The axes3. "Show" that dataEverything else is just details. Technical Details***# Import the required Libraries from matplotlib import pyplot as plt import numpy as np # Set it up to work with Jupyter Notebook # (this is a Jupyter command, not Python code) %matplotlib inlineSample***# Create data def create_sample_chart(): x_series = np.arange(10) y_series = x_series ** 2 #Plotting to our canvas in memory plt.plot(x_series, y_series) #Title of our canvas plt.title('String Title Here') #X-axis label plt.xlabel('X Axis Title Here') #Y_axis label plt.ylabel('Y Axis Title Here') #Showing what we plotted plt.show(); create_sample_chart()Mini-Challenge - 1***Create a graph for `y = sin(x)`. X values should be from 0 to 10, incrementing in steps of 0.1Remember to label the axes and the chart title appropriately.def sine_wave(): '''Write your code here''' sine_wave()Let's do this with the Weather data!***Quick Refresher:The Weather Dataset is a time-series data set with per-hour information about the weather conditions at a particular location. It records Temperature, Dew Point Temperature, Relative Humidity, Wind Speed, Visibility, Pressure, and Conditions. Read the data set# Data Introduction import pandas as pd weather_df = pd.read_csv('../data/weather_2012.csv', parse_dates=True, index_col='Date/Time') weather_df.head(5)How did the temperature vary amongst the months of the year?***Since we want to compare amongst months, we need one value for each month. Let's pick the mean for doing this.Generate a line chart that visualizes the temperature readings in the month of January:* x-values should be the values should be the months.* y-values should be the mean values in the Temp (C) column per month.Display the plot. Creating the data in the required formatmonthly_data = weather_df.groupby(weather_df.index.month).mean() monthly_data.head(3) x_series = monthly_data.index y_series = monthly_data['Temp (C)']Plotting: Step 1plt.plot(x_series, y_series) plt.title('Temperature Trend, 2012') plt.xlabel('Month') plt.ylabel('Temp (C)') plt.show()Plotting: Step 2The chart we plotted is decent, but we aren't done yet. The X axis is labelled correctly ("Month"), but the values themselves are 2, 4, etc, instead of January, February, etc. Mini-Challenge - 2***Generate the same plot as above, but this time the values of the "Month" should be January, February etc instead of 1,2,3,etc.# First, get calendar month names import calendar calendar_months = calendar.month_name[1:] print(calendar_months) # now we have the names, let's use them instead ''' Your code goes here''' # which x_series values to replace by which names '''Your code goes here'''Plotting: Step 3Almost done, but not quite. We need the X-axis labels to be a bit more legible. Let's rotate them a bit. Mini-Challenge - 3***Generate the same graph as above, but with the X-axis labels rotated, so that they are legible.'''Your code goes here'''Other Plot Types***The chart we have so far worked with is called a Line Chart. There are other types of plots. Some important examples are:- Bar Plots- Histograms- Box Plots- Scatter Plots Barplot***Some of the important characteristics of a barplot are:1. It is a chart or graph that presents grouped data with rectangular bars with lengths proportional to the values that they represent.2. They can be both vertical and horizontal.3. It shows the relationship between a numerical variable and a categorical variable.4. Bar charts can also show big changes in data over time.5. Line graphs are useful for displaying smaller changes in a trend over time. Bar graphs are better for comparing larger changes or differences in data among groups.6. Bar graphs are an effective way to compare items between different groups.Types of bar chart- 1. Vertical 2. Horizontal3. StackedThe only difference in plotting a bar chart from plotting a line chart is you call **`plt.bar()`** instead of **`plt.plot()`** Let's see how barplot looks !***Let's find the variation in mean visibility, aggregated by day of the weekimport matplotlib.pyplot as plt import calendar def bar_plot(): weekly_data = weather_df.groupby(weather_df.index.dayofweek).mean() plt.bar(weekly_data.index, weekly_data['Visibility (km)']) plt.title('Visibility by week, 2012') plt.xlabel('Day of week') plt.ylabel('Visibility (km)') plt.xticks(weekly_data.index, calendar.day_abbr, rotation=45) plt.show() bar_plot()Histogram***Some important characteristics of histogram are:1. It is a display of statistical information that uses bars to show the frequency of data items in successive numerical intervals of equal size.2. They can be both vertical and horizontal.3. It has only a numerical variable as input and shows its distribution.4. It divides up the range of possible values in a data set into classes or groups.5. Generally, a histogram will have bars of equal width, although this is not the case when class intervals vary in size.6. It is generally used when dealing with large data sets (greater than 100 observations).7. A histogram can also help detect any unusual observations (outliers) or any gaps in the data.8. In a histogram, it is the area of the bar that indicates the frequency of occurrences for each bin. Since a histogram is a plot of frequency(X) vs X, it only needs one series.# Sample histogram x = np.arange(0, 10, 0.1) y1 = (((x - 3) ** 3 ) - 100) + np.random.randint(-20, 20, size=len(x)) plt.hist(y1) plt.show()Mini-Challenge - 4***Find the distribution of values in the Wind Speed columndef hist_plot(): '''Your code goes here''' hist_plot()Box Plot***In descriptive statistics, a box plot is a method for graphically depicting groups of numerical data through their quartiles.Some important characteristics of a box plot are:1. A boxplot is a standardized way of displaying the distribution of data based on the five number summary: minimum, first quartile, median, third quartile, and maximum.2. Simplest possible box plot displays the full range of variation (from min to max), the likely range of variation (the IQR), and a typical value (the median). Not uncommonly real datasets will display surprisingly high maximums or surprisingly low minimums called outliers. has provided a precise definition for two types of outliers: (a) Outliers are either 3×IQR or more above the third quartile or 3×IQR or more below the first quartile. (b) Suspected outliers are are slightly more central versions of outliers: either 1.5×IQR or more above the third quartile or 1.5×IQR or lesser below the first quartile. Now, lets discuss some important definitions that we commonly encounter while generating a boxplot.**Median**-The median (middle quartile) marks the mid-point of the data and is shown by the line that divides the box into two parts. Half the scores are greater than or equal to this value and half are less.**Inter-quartile range**-The middle “box” represents the middle 50% of scores for the group. The range of scores from lower to upper quartile is referred to as the inter-quartile range. The middle 50% of scores fall within the inter-quartile range.**Upper quartile**-Seventy-five percent of the scores fall below the upper quartile.**Lower quartile**-Twenty-five percent of scores fall below the lower quartile.**Whiskers**-The upper and lower whiskers represent scores outside the middle 50%. Whiskers often (but not always) stretch over a wider range of scores than the middle quartile groups.# Sample boxplot x= np.arange(0, 10, 0.1) y = np.exp(x) plt.boxplot(y) plt.show()Mini-Challenge - 5***Draw a boxplot for wind speeddef box_plot(): '''Your code goes here''' box_plot()Scatter Plot***Scatter plots are similar to line graphs in that they use horizontal and vertical axes to plot data points. However, they have a very specific purpose. Scatter plots show how much one variable is affected by another. The relationship between two variables is called their correlation .Some important characteristics of a scatter plot are:1. A scatter plot can indicate the presence or absence of an association or relationship between two variables. * If some association or relationship exists, the data will tend to cluster on or around some line or curve that cuts through the plotted points. * If no association or relationship exists, the data will appear spread out.2. When some association or relationship exists between two variables, a scatter plot can indicate the direction of the relationship. * When high values of one variable correspond to high values of another variable, and low values of one variable correspond to low values of another variable, we say the relationship is positive because there is a direct relationship between the two variables. * When high values of one variable correspond to low values of another variable, and low values of one variable correspond to high values of another variable, we say the relationship is negative because there is an inverse relationship between the two variables.3. Calculating the correlation coefficient will give us a precise number and a scatter plot helps us find outliers, gain a more intuitive sense of how spread out the data is. 4. A scatter plot can be used to help determine if an association or relationship is linear or nonlinear.5. A scatter plot provides a visual revelation of outliers on dubiously measured or erroneously plotted data. Spotting correlation in scatter plots![](../images/KssJXmts69TqMUTHuuFVDQFu.bmp) Example![](../images/sample_scatterplot2.png)# Sample scatter plot x= np.arange(0, 10, 0.1) y1 = (((x - 3) ** 3 ) - 100) + np.random.randint(-20, 20, size=len(x)) y2 = (((3 - x) ** 3 ) + 50) + np.random.randint(-20, 20, size=len(x)) plt.scatter(x, y1, c='r') plt.scatter(x, y2, c='b') plt.show()Mini-Challenge - 6***Are the temperature and pressure correlated, according to the data in the month of January? Look at it by generating a scatter plotjan_df = weather_df['2012-01'] def scatter_plot(): '''Your code goes here''' scatter_plot()Plot Customizations***Matplotlib being a low level library, provides a lot of ways to customize your plots. Some examples include:- Configuring the line (color, width, ticks, etc)- Configuring the axes (setting limits, layout, etc.)- Figure and Sub Plots: Multiple plots on one canvas- Configuring the figure size- Adding legends, etc. Configuring the plotted line***It's easy to set colors, line types, etc. An example is below.fig, ax = plt.subplots() ax.plot(x, x**2, 'b.-') # blue line with dots ax.plot(x, x**2.5, 'g--') # green dashed line ax.plot(x, x**3, c='r') # red line color fig.show()/home/pranav/anaconda3/lib/python3.6/site-packages/matplotlib/figure.py:459: UserWarning: matplotlib is currently using a non-GUI backend, so cannot show the figure "matplotlib is currently using a non-GUI backend, "`.figure()` and `.axes()`*****Figure:** This object essentially returns a new figure to work with. Axes need to be added to the figure to plot on it. The Figure is the overall window or page that everything is drawn on. You can create multiple independent Figures. A Figure can have several other things in it(subtitle, legend, color bar etc).**Axes:** To the figure you add Axes. The Axes is the area on which the data is plotted with functions such as plot() and scatter() and that can have ticks, labels, etc. associated with it. This explains why Figures can contain multiple Axes.The methods on axes work the same way as **`.plot()`**def two_plots(): x = np.array([0, 1, 2, 3, 4, 5]) y = x ** 2 # Create Figure (empty canvas) fig = plt.figure() # Add set of axes to figure axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes axes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3]) # inset axes # (0.2x left, 0.5x bottom) to (0.4x width, 0.3x height) # Larger Figure Axes 1 axes1.plot(x, y, 'b') axes1.set_xlabel('X_label_axes1') axes1.set_ylabel('Y_label_axes1') axes1.set_title('Axes 1 Title') # Insert Figure Axes 2 axes2.plot(y, x, 'r') axes2.set_xlabel('X_label_axes2') axes2.set_ylabel('Y_label_axes2') axes2.set_title('Axes 2 Title'); two_plots()`.subplots()`***The plt.subplots() object will act as a more automatic axis manager and is used to creating multiple charts on the same figure. It is similar to `plt.figure()`, except use tuple unpacking to grab fig and axes.# Canvas of 2 by 2 subplots fig, axes = plt.subplots(nrows=2, ncols=2) # axes is an array of shape (2, 2)Mini-Challenge - 7*** Create a figure with four subplots, and plot `y = x ** 2` on the first and last plotsMake the color red in the first subplot, and a green dashed line in the lastdef sub_plots_example(): '''Your code goes here''' sub_plots_example()`.xlim` & `.ylim`We can configure the ranges of the axes using the set_ylim and set_xlim methods in the axis object, or axis('tight') for automatically getting "tightly fitted" axes ranges. `.tight_layout()`A common issue with matplotlib is overlapping subplots or figures. We can use **`fig.tight_layout()`** or **`plt.tight_layout()`** method, which automatically adjusts the positions of the axes on the figure canvas so that there is no overlapping content. `.figsize`figsize is a tuple of the width and height of the figure in inches `.legend()`You can use the label="label text" keyword argument when plots or other objects are added to the figure, and then using the legend method without arguments to add the legend to the figure. `.savefig()`***Matplotlib can generate high-quality output in a number formats, including PNG, JPG, EPS, SVG, PGF and PDF. fig.savefig("filename.png", dpi=200) Seaborn***Seaborn is a Python visualization library based on matplotlib. It provides a high-level interface for drawing attractive statistical graphics.Understanding matplotlib was essential for understanding how to create plots, but we'll be using Seaborn for general data analysis. Why? Because it's beautiful and easy!***Let's revisit a question we solved today: **What is the distribution of values in the Wind Speed column?**import seaborn as sns sns.distplot(weather_df['Wind Spd (km/h)'], bins=25)/home/pranav/anaconda3/lib/python3.6/site-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg. warnings.warn("The 'normed' kwarg is deprecated, and has been "TF-IDFtf-idf(英语:term frequency–inverse document frequency)是一种用于信息检索与文本挖掘的常用加权技术。tf-idf是一种统计方法,用以评估一字词对于一个文件集或一个语料库中的其中一份文件的重要程度。字词的重要性随着它在文件中出现的次数成正比增加,但同时会随着它在语料库中出现的频率成反比下降。import pandas as pd import jieba import time import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn import metrics from sklearn.externals import joblib from sklearn.utils import shuffle def chinese_word_cut(s): # 中文分词(jieba) return ' '.join(jieba.cut(s)) def train_and_predict(train_x, train_y, test_x, test_y): # 使用多项分布朴素贝叶斯分类器进行训练 clf = MultinomialNB().fit(train_x, train_y) # 在本地保存 model 参数 joblib.dump(clf, 'model.pkl') # 预测 test_x 中的结果 predicted = clf.predict(test_x) # 打印报表 print(metrics.classification_report(test_y, predicted)) print('accuracy_score: {}'.format(metrics.accuracy_score(test_y, predicted))) def read_vocab(path): # 读取 path 下文本的内容,返回 list https://github.com/goto456/stopwords f = open(path, 'r', encoding='utf8') res = f.readlines() f.close() return res加载训练集以及测试集train_data = pd.read_csv('cnews.train.txt', sep='\t', names=['label', 'content']) test_data = pd.read_csv('cnews.test.txt', sep='\t', names=['label', 'content']) # shuffle 一下 shuffle(train_data) shuffle(test_data)对于数据集中每一个句子进行中文分词train_data['content'] = train_data['content'].apply(chinese_word_cut) test_data['content'] = test_data['content'].apply(chinese_word_cut)根据词袋向量统计TF-IDF定义 TfidfVectorizer,将 train_data 以及 test_data 转换为词向量并计算 TF-IDF# 可以直接在 TfidfVectorizer 中传入停用词,设置词汇维度最大为 max_features tfidf = TfidfVectorizer(max_features=100000, stop_words=read_vocab('stop_word.txt')) # 拼接 train_data 以及 test_data 的作用是为了获得同一空间中的词汇向量(但实际中还是提前使用训练集做个映射表比较好,因为测试集中可能某些词汇在训练集中没有出现过) # 为了偷懒一起做方便很多 x = tfidf.fit_transform(train_data['content'].append(test_data['content'])) train_x = x[:len(train_data)] test_x = x[len(train_data):] train_y = train_data['label'] test_y = test_data['label']C:\Anaconda3\envs\mLearning\lib\site-packages\sklearn\feature_extraction\text.py:300: UserWarning: Your stop_words may be inconsistent with your preprocessing. Tokenizing the stop words generated tokens ['一些', '一何', '一切', '一则', '一方面', '一旦', '一来', '一样', '一般', '一转眼', '万一', '上下', '不仅', '不但', '不光', '不单', '不只', '不外乎', '不如', '不妨', '不尽', '不尽然', '不得', '不怕', '不惟', '不成', '不拘', '不料', '不是', '不比', '不然', '不特', '不独', '不管', '不至于', '不若', '不论', '不过', '不问', '与其', '与其说', '与否', '与此同时', '且不说', '且说', '两者', '个别', '为了', '为什么', '为何', '为止', '为此', '为着', '乃至', '乃至于', '之一', '之所以', '之类', '乌乎', '也好', '也罢', '二来', '于是', '于是乎', '云云', '云尔', '人们', '人家', '什么', '什么样', '介于', '仍旧', '从此', '从而', '他人', '他们', '以上', '以为', '以便', '以免', '以及', '以故', '以期', '以来', '以至', '以至于', '以致', '任何', '任凭', '似的', '但凡', '但是', '何以', '何况', '何处', '何时', '余外', '作为', '你们', '使得', '例如', '依据', '依照', '便于', '俺们', '倘使', '倘或', '倘然', '倘若', '假使', '假如', '假若', '傥然', '先不先', '光是', '全体', '全部', '关于', '其一', '其中', '其二', '其他', '其余', '其它', '其次', '具体地说', '具体说来', '兼之', '再其次', '[...]测试文章中词语的 TF-IDF 权重与文章类别之间的关系抽取 5 条数据分别计算其中词语的 TF-IDF 权重并降序输出前 10 个关键词,并与其真实类别对比tfidf.get_feature_names 可以获取当前模型中所有的词语,最大数量为 max_featuresword = tfidf.get_feature_names() for i in np.random.randint(train_x.shape[0], size=5): print('label: ', train_y[i]) arg_sort = np.argsort(-train_x[i].toarray()[0])[:10] for j in arg_sort: print(word[j], '\t', train_x[i].toarray()[0][j]) print('--------------------------------')label: 房产 流动人口 0.4082248955538784 计划生育 0.34829150970183487 改革 0.34125189927858635 深化 0.19670126794087855 资本金 0.19556879143230255 推进 0.1890876174017774 项目 0.15657100082162545 条例 0.1437456888734804 会议 0.13021221269175748 投资 0.12625010914409945 -------------------------------- label: 娱乐 激浪 0.4316732046209529 青春 0.3830683472008551 黄轩 0.3179488217376342 黄晓明 0.2286114607959972 龙舟 0.18052996401984894 陈乔恩 0.17527095996307465 梁柏坚 0.13644213118630136 车震 0.13438219859039577 法宝 0.1238574067351264 青春片 0.12252506778901526 -------------------------------- label: 教育 雅思考试 0.5728694476032157 考生 0.2701400489157494 史哲明 0.24672257560968106 雅思 0.22926765074731068 文化教育 0.17088583079652214 大使馆 0.15683943946480075 院校 0.1563748289754174 超过 0.14911140474311899 认可 0.14811921410744747 英国 0.12173639512952922 -------------------------------- label: 房产 农村 0.4475497399879565 吕祖善 0.35359914278201837 农民 0.3179448351651087 改造 0.26207820762374934 住房 [...]训练并预测train_and_predict(train_x, train_y, test_x, test_y)precision recall f1-score support 体育 1.00 1.00 1.00 1000 娱乐 0.93 0.99 0.96 1000 家居 0.97 0.38 0.55 1000 房产 0.62 0.92 0.74 1000 教育 0.91 0.94 0.93 1000 时尚 0.97 0.97 0.97 1000 时政 0.95 0.91 0.93 1000 游戏 0.98 0.97 0.97 1000 科技 0.95 0.99 0.97 1000 财经 0.95 0.99 0.97 1000 accuracy 0.91 10000 macro avg 0.92 0.91 0.90 10000 weighted avg 0.92 0.91 0.90 10000 accuracy_score: 0.906Datamodels module examplesfrom hero_db_utils.datamodels import datamodel from hero_db_utils.datamodels.fields import ForeignKeyField, AutoSerialField import pandas as pd import dotenv dotenv.load_dotenv(override=True); @datamodel class Store: address_id:ForeignKeyField("address","address_id") last_update:pd.to_datetime manager_staff_id:ForeignKeyField("staff", "staff_id") = None store_id:AutoSerialField() = None @datamodel class Address: address:str address2:str district:str city_id:ForeignKeyField("city", "city_id") postal_code:str phone:str last_update:pd.to_datetime address_id:AutoSerialField() = None @datamodel class City: city:str country_id:ForeignKeyField("country", "country_id") last_update:str city_id:AutoSerialField() = None @datamodel class Country: country:str last_update:pd.to_datetime country_id:AutoSerialField() = None @datamodel class Staff: first_name:str last_name:str address_id:ForeignKeyField("address","address_id") email:str store_id:ForeignKeyField("stores", "store_id") active:bool username:str password:str last_update:pd.to_datetime staff_id:AutoSerialField() = None %env HERO_POSTGRES_DBNAME=dvd_rentalsenv: HERO_POSTGRES_DBNAME=dvd_rentalsSelect all from a model in the database:Store.objects.all()Filter from the table:from hero_db_utils.queries.postgres import QueryOp Address.objects.filter(address_id=QueryOp.value_in([1,2]))Join on related table for column "address_id":Store.objects.fetch_related("address_id")Retrieve an object from the database:store = Store.objects.get(store_id=2) store.dataUpdate a model in the database:store.update(address_id=3) store.address_id Store.objects.all()Create a new staff member:Staff.objects.all() new_staff = Staff( first_name="Harry", last_name="Potter", active = True, store_id=1, address_id=2, email="", username="theboywholived", password="", last_update="now" ) new_staff.data # Insert staff in the database: new_staff.insert() # Check it exists in the database: Staff.objects.all()$\left(x-a\right)^2+\left(y-b\right)^2=r^2$ If $i$ and $j$ represent the horizontal and vertical distances from the starting point $P$ to the center of the circle, then $c_1 = p_1+i$ and $c_2=p_2+j$. That was convenenient. If, on the other hand, all we have is the radius of the circle... $$\left(p_1-a\right)^2+\left(p_2-b\right)^2=r^2$$ $$\left(q_1-a\right)^2+\left(q_2-b\right)^2=r^2$$ So, with numbers obtained from the `gcode` block in question capitalized, we need to solve the following system of equations for $a$ and $b$. $$\left(X_0-a\right)^2+\left(Y_0-b\right)^2=R^2$$ $$\left(X-a\right)^2+\left(Y-b\right)^2=R^2$$ $X_0$ and $Y_0$ represent the current tool position in the equation above, and are therefore known quantities at the time of computation.x, y, z, X, X0, Y, Y0, Z, Z0, R = symbols("x y z X X0 Y Y0 Z Z0 R") e1 = (x-X0)**2 + (y-Y0)**2 e2 = (x-X)**2 + (y-Y)**2[Here](https://math.stackexchange.com/questions/1781438/finding-the-center-of-a-circle-given-two-points-and-a-radius-algebraically) is the answer from Stack Exchange. That question / answer also references [this](http://mathforum.org/library/drmath/view/54490.html) newsgroup message from who knows when?init_printing() help(init_printing) from enum import IntEnum class CircularPlane(IntEnum): XY = 17 ZX = 18 YZ = 19 xa = (X - X0) / 2 ya = (Y - Y0) / 2 x0, y0 = X0 + xa, Y0 + ya a = sqrt(xa**2 + ya**2) b = sqrt(R**2 - a**2) x3 = x0 + b*ya / a x4 = x0 - b*ya / a y3 = y0 - b*xa / a y4 = y0 + b*xa / a x3 y3 x3.simplify() y3.simplify() params = dict(zip([X0, Y0, X, Y, R], [1, 4, 5, 1, 13/2])) x3 = x3.subs(params) y3 = y3.subs(params) x4 = x4.subs(params) y4 = y4.subs(params) T1, T2 = symbols("T1, T2") C1 = Point3D(x3.subs(params), y3.subs(params)) x3, y3, x4, y4 from IPython.display import Image Image("png/circles0000.png")$$AC_1=0$$args1 = [-x3, -y3] args2 = [1/params[R]] * 3 Q0 = Point3D(params[X0], params[Y0], 0).translate(*args1).scale(*args2).evalf() Q = Point3D(params[X], params[Y], 0).translate(*args1).scale(*args2).evalf() print("1/R:", 1/params[R]) print("Q0:", Q0) print("Q:", Q)1/R: 0.15384615384615385 Q0: Point3D(0.246153846153846, 0.969230769230769, 0) Q: Point3D(0.861538461538462, 0.507692307692308, 0)Get the angles...theta1 = atan(Q0.y / Q0.x) theta1 def rad2deg(t): return ((t % pi) * 180 / pi).evalf() def deg2rad(t): return (pi * t / 180 % (pi / 2)).evalf() from anglr import Angle dir(Angle) help(Angle.__init__) P = Point3D() Q P.translate(*-Q) rad2deg(theta1) theta2 = atan(Q.y / Q.x) rad2deg(theta2) help(Angle.angle_between) t1 = Angle(theta1) t2 = Angle(theta2) t3 = t1.angle_between_clockwise(t2) from math import pi as PI PI t3.radians > PI import matplotlib.path matplotlib.path.Path.arc(t1.degrees, t2.degrees).vertices.transpose()[0] help(map) help(numpy.full) import numpy public(numpy) public(numpy.ndarray) help(numpy.ndarray.transpose) public(matplotlib.path.Path) from enum import Enum class Quadrants(Enum): I = "I" II = "II" III = "III" IV = "IV" class Directions(Enum): CW = 0 CCW = 1 def quadrant(P): if P.x >= 0: if P.y > 0: return Quadrants.I else: return Quadrants.IV else: if P.y > 0: return Quadrants.II else: return Quadrants.III def get_angle(P): return atan(P.y / P.x) def rotate(*args, radians=(pi/2).evalf(), degrees=90): pass def direction(P1, P2): """ Return the direction travelled to reach P2 from P1 via the shortest path. """ t1 = get_angle(P1) t2 = get_angle(P2) if not quadrant(P1) == Quadrants.I: pass if quadrant(P1) == Quadrants.I: if quadrant(P2) == Quadrants.I: if t1 - t2 > 0: return Directions.CW else: return Directions.CCW if quadrant(P2) == Quadrants.II: return CCW if quadrant(P2) == Quadrants.III: if t2 < t1 + 180: return Directions.CCW else: return Directions.CCW if quadrant(P2) == Quadrants.IV: return Directions.CW rotate T1 = Matrix([[1/params[R], 0, -x3], [0, 1/params[R], -y3]])$\begin{matrix} -1 & 3 \\ 2 & -4 \end{matrix}$v3 = Matrix([x3, y3, 1]) v4 = Matrix([x4, y4, 1]) v3 T1 * v3 T1 * v4 import matplotlib.pyplot as plt plt.plot([v3[0], v3[1]]) v3 pwd from os import chdir chdir('../../') from py.startup import * import mpl_toolkits.mplot3d plt.ion() fig = plt.figure() ax = fig.add_subplot(111, projection='3d')Description: solutions to Case Study 5Version: 1.2.3.20210401Author: and Last editors: Case Study 5This case study explores various options for model selection on a clustering task.**The task** involves comparing multiple model selection metrics on a Gaussian mixture model (GMM) using the digits dataset projected into the 2D principal component analysis (PCA) space.**The objective measure** is to use visualisation to evaluate the models selected with metrics by observing which digits are being grouped or split. 1. Importing ModulesThis case study is accomplished in the context of using [`scikit-learn`](https://scikit-learn.org/stable/) as a tool to explore relevant techniques.**NOTE: It has been found and validated that different versions of scikit-learn can produce different results in some parts even with the same code. Please use the version 0.24.1 to get the same results.**# NOTE: Uncomment the following line to force installing scikit-learn 0.24.1. #!pip install -U --force-reinstall --no-cache --no-warn-script-location --user scikit-learn==0.24.1 import warnings from matplotlib import pyplot as plt from matplotlib.cm import get_cmap from matplotlib.patches import Ellipse from sklearn.model_selection import KFold from sklearn.mixture import GaussianMixture from sklearn.datasets import load_digits from sklearn.decomposition import PCA from sklearn.metrics import silhouette_score from sklearn.model_selection import cross_val_score from sklearn.preprocessing import scale import numpy as np warnings.filterwarnings('ignore', category = UserWarning)2. Loading DataThe [`load_digits()`](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) function is used to load the digits dataset. The [`scale()`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.scale.html) function standardises the data.X, y = load_digits(return_X_y = True) X = scale(X) n_digits = len(np.unique(y)) print('Number of digits:', n_digits) print('Number of samples:', len(y)) # Equal to "X.shape[0]". print('Number of features:', X.shape[1])Number of digits: 10 Number of samples: 1797 Number of features: 64As per the task specification, the data is projected into 2D using the [`PCA`](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) class to perform PCA to keep 2 principal components (PC).X_pca = PCA(2).fit_transform(X) # Plot the projected data. plt.figure(figsize = (8, 6)) scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c = y, cmap = 'tab20', s = 10) plt.title('Digits Dataset Projected into 2D PCA Space') plt.xlabel('PC 1') plt.ylabel('PC 2') plt.legend(*scatter.legend_elements()) plt.show()3. Model SelectionIdeally, the number of mixture components `k` equals `n_digits` (i.e., 10) as per the data. Here the range of `k` is set to `n_digits ± 5`. A covariance matrix type representation is selected from the list `['diag', 'full', 'spherical', 'tied']`. 4 model selection metrics are applied to select the optimal `k` and the covariance matrix type preferred by each metric. These metrics include the Bayesian information criterion (BIC), Akaike information criterion (AIC), silhouette score, and cross-validation (CV). The [`GuassianMixture`](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html) class is used to build a GMM. The [`silhouette_score()`](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html) function can help to compute the silhouette. The [`KFold`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html) class is used to perform 6-fold CV. The CV results are in the form of log-likelihood on hold-out observations. The [`cross_val_score()`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html) function can help to compute the CV log-likelihood.ks = np.arange(n_digits - 5, n_digits + 6) # The range of k to select cov_types = ['diag', 'full', 'spherical', 'tied'] metrics = ['BIC', 'AIC', 'Silhouette score', 'CV log-likelihood'] # Representations of the 4 model selection metrics. gmms = [] # A list storing each covariance matrix type's GMMs. scores = {metrics[0]: [], metrics[1]: [], metrics[2]: [], metrics[3]: []} # A dictionary storing each metric's scores. best_params = {} # A dictionary storing the optimal k and the covariance matrix type preferred by each metric. Each key's value is a list like [k, cov_type]. for i, cov_type in enumerate(cov_types): gmms.append([GaussianMixture(k, covariance_type = cov_type, random_state = 0).fit(X_pca) for k in ks]) scores[metrics[0]].append([gmm.bic(X_pca) for gmm in gmms[i]]) # Compute BIC. scores[metrics[1]].append([gmm.aic(X_pca) for gmm in gmms[i]]) # Compute AIC. scores[metrics[2]].append([silhouette_score(X_pca, gmm.predict(X_pca)) for gmm in gmms[i]]) # Compute the silhouette score. scores[metrics[3]].append([np.mean(cross_val_score(gmm, X_pca, y, cv = KFold(6))) for gmm in gmms[i]]) # Compute the CV log-likelihood.A function is defined to reuse code.def select_best_params(metric: str = metrics[0]) -> None: ''' Select the optimal k and the covariance matrix type preferred by a specified metric. Parameters ---------- metric : one of the metric in the metric list ''' best_ks = [] best_scores = [] xticks_locs = np.arange(len(ks)) plt.figure(figsize = (8, 4)) for i, cov_type in enumerate(cov_types): scores_metric = scores[metric][i] best_score_index = np.argmax(scores_metric) if metric == metrics[2] or metric == metrics[3] else np.argmin(scores_metric) # The higher silhouette score or CV log-likelihood, the better. Otherwise, the lower, the better. best_ks.append(ks[best_score_index]) best_scores.append(scores_metric[best_score_index]) plt.bar(xticks_locs + 0.2 * i - 0.3, scores_metric, label = cov_type, width = 0.2) # Plot bars indicating this covariance matrix type's scores. plt.title(metric + ' Per Model') plt.xlabel('K') plt.xticks(xticks_locs, ks) plt.ylabel(metric) adjustment = 0.01 if metric == metrics[2] or metric == metrics[3] else 10 plt.ylim(np.min(scores[metric]) - adjustment, np.max(scores[metric]) + adjustment) # Narrow down y-axis limits properly to make the chart look better. plt.legend(bbox_to_anchor = (1.2, 0.5), loc = 'right') plt.show() best_score_index = np.argmax(best_scores) if metric == metrics[2] or metric == metrics[3] else np.argmin(best_scores) # The higher silhouette score or CV log-likelihood, the better. Otherwise, the lower, the better. best_params[metric] = [best_ks[best_score_index], cov_types[best_score_index]] print('Best parameters:', best_params[metric]) # Print the best parameters like [k, cov_type].Score results are returned respectively for the 4 model selection metrics.select_best_params() # Use my function to select the optimal k and the covariance matrix type preferred by BIC. select_best_params(metrics[1]) # Use my function to select the optimal k and the covariance matrix type preferred by AIC. select_best_params(metrics[2]) # Use my function to select the optimal k and the covariance matrix type preferred by the silhouette. select_best_params(metrics[3]) # Use my function to select the optimal k and the covariance matrix type preferred by CV.4. EvaluationThe best parameters suggested by each metric are evaluated by visualising the Gaussian mixture components and covariance ellipses. A function is defined to reuse code.gmms_best = [GaussianMixture(best_params[metric][0], covariance_type = best_params[metric][1]).fit(X_pca) for metric in metrics] colours = get_cmap('tab20_r').colors def plot_components(gmm: GaussianMixture) -> None: ''' Plot the Gaussian mixture components and covariance ellipses. Parameters ---------- gmm : a fitted GMM ''' plt.figure(figsize = (8, 6)) scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c = gmm.predict(X_pca), cmap = 'tab20', s = 10) ax = plt.gca() for i, colour in enumerate(colours[:gmm.get_params()['n_components']]): if gmm.covariance_type == cov_types[0]: # 'diag' cov = np.diag(gmm.covariances_[i][:2]) if gmm.covariance_type == cov_types[1]: # 'full' cov = gmm.covariances_[i][:2, :2] if gmm.covariance_type == cov_types[2]: # 'spherical' cov = np.eye(gmm.means_.shape[1]) * gmm.covariances_[i] if gmm.covariance_type == cov_types[3]: # 'tied' cov = gmm.covariances_[:2, :2] v, w = np.linalg.eigh(cov) u = w[0] / np.linalg.norm(w[0]) angle = np.arctan2(u[1], u[0]) angle = 180 * angle / np.pi # Convert to degrees. v = 2 * np.sqrt(2) * np.sqrt(v) ell = Ellipse(gmm.means_[i, :2], v[0], v[1], 180 + angle, color = colour) # Draw a covariance ellipse. ell.set_clip_box(ax.bbox) ell.set_alpha(0.7) ax.add_artist(ell) ax.set_aspect('equal', 'datalim') plt.legend(*scatter.legend_elements())Visualisation is done respectively for the 4 model selection metrics.plot_components(gmms_best[0]) # Use my function to plot the Gaussian mixture components and covariance ellipses for BIC. plot_components(gmms_best[1]) # Use my function to plot the Gaussian mixture components and covariance ellipses for AIC. plot_components(gmms_best[2]) # Use my function to plot the Gaussian mixture components and covariance ellipses for the silhouette score. plot_components(gmms_best[3]) # Use my function to plot the Gaussian mixture components and covariance ellipses for CV.GEE score testsThis notebook uses simulation to demonstrate robust GEE score tests. These tests can be used in a GEE analysis to compare nested hypotheses about the mean structure. The tests are robust to miss-specification of the working correlation model, and to certain forms of misspecification of the variance structure (e.g. as captured by the scale parameter in a quasi-Poisson analysis).The data are simulated as clusters, where there is dependence within but not between clusters. The cluster-wise dependence is induced using a copula approach. The data marginally follow a negative binomial (gamma/Poisson) mixture.The level and power of the tests are considered below to assess the performance of the tests.import pandas as pd import numpy as np from scipy.stats.distributions import norm, poisson import statsmodels.api as sm import matplotlib.pyplot as pltThe function defined in the following cell uses a copula approach to simulate correlated random values that marginally follow a negative binomial distribution. The input parameter `u` is an array of values in (0, 1). The elements of `u` must be marginally uniformly distributed on (0, 1). Correlation in `u` will induce correlations in the returned negative binomial values. The array parameter `mu` gives the marginal means, and the scalar parameter `scale` defines the mean/variance relationship (the variance is `scale` times the mean). The lengths of `u` and `mu` must be the same.def negbinom(u, mu, scale): p = (scale - 1) / scale r = mu * (1 - p) / p x = np.random.gamma(r, p / (1 - p), len(u)) return poisson.ppf(u, mu=x)Below are some parameters that govern the data used in the simulation.# Sample size n = 1000 # Number of covariates (including intercept) in the alternative hypothesis model p = 5 # Cluster size m = 10 # Intraclass correlation (controls strength of clustering) r = 0.5 # Group indicators grp = np.kron(np.arange(n/m), np.ones(m))The simulation uses a fixed design matrix.# Build a design matrix for the alternative (more complex) model x = np.random.normal(size=(n, p)) x[:, 0] = 1The null design matrix is nested in the alternative design matrix. It has rank two less than the alternative design matrix.x0 = x[:, 0:3]The GEE score test is robust to dependence and overdispersion. Here we set the overdispersion parameter. The variance of the negative binomial distribution for each observation is equal to `scale` times its mean value.# Scale parameter for negative binomial distribution scale = 10In the next cell, we set up the mean structures for the null and alternative models# The coefficients used to define the linear predictors coeff = [[4, 0.4, -0.2], [4, 0.4, -0.2, 0, -0.04]] # The linear predictors lp = [np.dot(x0, coeff[0]), np.dot(x, coeff[1])] # The mean values mu = [np.exp(lp[0]), np.exp(lp[1])]Below is a function that carries out the simulation.# hyp = 0 is the null hypothesis, hyp = 1 is the alternative hypothesis. # cov_struct is a statsmodels covariance structure def dosim(hyp, cov_struct=None, mcrep=500): # Storage for the simulation results scales = [[], []] # P-values from the score test pv = [] # Monte Carlo loop for k in range(mcrep): # Generate random "probability points" u that are uniformly # distributed, and correlated within clusters z = np.random.normal(size=n) u = np.random.normal(size=n//m) u = np.kron(u, np.ones(m)) z = r*z +np.sqrt(1-r**2)*u u = norm.cdf(z) # Generate the observed responses y = negbinom(u, mu=mu[hyp], scale=scale) # Fit the null model m0 = sm.GEE(y, x0, groups=grp, cov_struct=cov_struct, family=sm.families.Poisson()) r0 = m0.fit(scale='X2') scales[0].append(r0.scale) # Fit the alternative model m1 = sm.GEE(y, x, groups=grp, cov_struct=cov_struct, family=sm.families.Poisson()) r1 = m1.fit(scale='X2') scales[1].append(r1.scale) # Carry out the score test st = m1.compare_score_test(r0) pv.append(st["p-value"]) pv = np.asarray(pv) rslt = [np.mean(pv), np.mean(pv < 0.1)] return rslt, scalesRun the simulation using the independence working covariance structure. We expect the mean to be around 0 under the null hypothesis, and much lower under the alternative hypothesis. Similarly, we expect that under the null hypothesis, around 10% of the p-values are less than 0.1, and a much greater fraction of the p-values are less than 0.1 under the alternative hypothesis.rslt, scales = [], [] for hyp in 0, 1: s, t = dosim(hyp, sm.cov_struct.Independence()) rslt.append(s) scales.append(t) rslt = pd.DataFrame(rslt, index=["H0", "H1"], columns=["Mean", "Prop(p<0.1)"]) print(rslt)Mean Prop(p<0.1) H0 0.509966 0.096 H1 0.055055 0.840Next we check to make sure that the scale parameter estimates are reasonable. We are assessing the robustness of the GEE score test to dependence and overdispersion, so here we are confirming that the overdispersion is present as expected._ = plt.boxplot([scales[0][0], scales[0][1], scales[1][0], scales[1][1]]) plt.ylabel("Estimated scale")Next we conduct the same analysis using an exchangeable working correlation model. Note that this will be slower than the example above using independent working correlation, so we use fewer Monte Carlo repetitions.rslt, scales = [], [] for hyp in 0, 1: s, t = dosim(hyp, sm.cov_struct.Exchangeable(), mcrep=100) rslt.append(s) scales.append(t) rslt = pd.DataFrame(rslt, index=["H0", "H1"], columns=["Mean", "Prop(p<0.1)"]) print(rslt)Mean Prop(p<0.1) H0 0.436004 0.14 H1 0.052685 0.84Checking Partial Derivatives on a Subset of a Model Includes and ExcludesWhen you have a model with a large number of components, you may want to reduce the number of components you check so that the output is small and readable. The `check_partials` method has two arguments: “includes” and “excludes” that help you specify a reduced set. Both of these arguments are lists of strings that default to None. If you specify “includes”, and give it a list containing strings, then only the components whose full pathnames match one of the patterns in those strings are included in the check. Wildcards are acceptable in the string patterns. Likewise, if you specify excludes, then components whose pathname matches the given patterns will be excluded from the check.You can use both arguments together to hone in on the precise set of components you wish to check.from openmdao.utils.notebook_utils import get_code from myst_nb import glue glue("code_src62", get_code("openmdao.test_suite.components.paraboloid.Paraboloid"), display=False):::{Admonition} `Paraboloid` class definition :class: dropdown{glue:}`code_src62`:::import openmdao.api as om from openmdao.test_suite.components.paraboloid import Paraboloid prob = om.Problem() model = prob.model sub = model.add_subsystem('c1c', om.Group()) sub.add_subsystem('d1', Paraboloid()) sub.add_subsystem('e1', Paraboloid()) sub2 = model.add_subsystem('sss', om.Group()) sub3 = sub2.add_subsystem('sss2', om.Group()) sub2.add_subsystem('d1', Paraboloid()) sub3.add_subsystem('e1', Paraboloid()) model.add_subsystem('abc1cab', Paraboloid()) prob.setup() prob.run_model() prob.check_partials(compact_print=True, includes='*c*c*') prob.check_partials(compact_print=True, includes=['*d1', '*e1']) prob.check_partials(compact_print=True, includes=['abc1cab']) prob.check_partials(compact_print=True, includes='*c*c*', excludes=['*e*'])Arabic Digit Recognition OverviewThis notebook builds an OCR for handwritten Arabic digits, research in OCR or optical character recognition started a long time ago in order to allow the computer to understand the words in any visual image, but the peak in OCR performances did happen in the deep learning era as it introduced advanced methods and techniques in order to achieve the OCR's outstanding outcomes and uses. The dataset used will be MADbase from the electronics engineering department, in the American University in Cairo, and the CNN will be build using Keras from TensorFlow. DatasetThe dataset is composed of 70,000 digits written by 700 participants. Each participant wrote each digit (from 0 to 9) twenty times (ten times only used in our database – the other ten times may be used later in writer verification research). To ensure including different writing styles, the database was gathered from different institutions: Colleges of Engineering and Law, School of Medicine, the Open University (whose students span a wide range of ages), a high school, and a governmental institution. Forms were scanned with 300 dpi resolution then digits are automatically extracted, categorized, and bounded by bounding boxes. We adjusted the scanner to produce binary images directly; so we did not need to binarize the resulting images. Some noisy and corrupted digit images were edited manually. The database is partitioned into two sets: a training set (60,000 digits – 6000 images per class) and a test set (10,000 digits – 1000 images per class). Writers of training set and test set are exclusive. Ordering of including writers to test sets are randomized to make sure that writers of test set are not from a single institution (to ensure variability of the test set).http://datacenter.aucegypt.edu/shazeem/ Data Exploring & Preprocessing 1- Let's import our libraries that we will use in this project.import tensorflow as tf # The main framework we will build our model with. import numpy as np # Used for mathimatical operations. import pandas as pd # Will be used to load our data frame. import cv2 # Used for image processing. from matplotlib import pyplot as plt # Used for plottin our data. from tensorflow.keras.utils import to_categorical # Utility in Tensorflow to convert our true category values.2- Now we wil mount google drive for loading our data.from google.colab import drive drive.mount('/content/gdrive')Mounted at /content/gdrive3- We will use Pandas library to read our data and load it into our data frame, our data is stored in CSV format so we will use the appropriate function to load it.path = '/content/gdrive/MyDrive/Datasets/Kaggle Arabic Digits' # Here we specify the path to our data location on my drive train_data_x = pd.read_csv(path + '/csvTrainImages 60k x 784.csv', header=None) # Then we load the training images. train_data_y = pd.read_csv(path + '/csvTrainLabel 60k x 1.csv', header=None) # Training labels. test_data_x = pd.read_csv(path + '/csvTestImages 10k x 784.csv', header=None) # Testing images. test_data_y = pd.read_csv(path + '/csvTestLabel 10k x 1.csv', header=None) # Testing labels.4- Now let's examine our data properties.We will find that we have 60000 training image with their labels in the training set and 10000 in the testing set, along with a total of 10 classes which idecates the number of Arabic digits.The number of pixels in each image is 784, so we can conclude the pixels per image by getting the square root of the number of pixels which will give us 28, so the dimensions of our image is 28x28x1 as it is a grey scale image, we will use this piece of information for preprocessing our images.print('We have %d training images each contains %d pixels.' %(train_data_x.shape[0], train_data_x.shape[1])) print('We have %d training labels each contains %d classes.' %(train_data_y.shape[0], len(train_data_y.value_counts()))) print('We have %d testing images each contains %d pixels.' %(test_data_x.shape[0], test_data_x.shape[1])) print('We have %d testing labels each contains %d classes.' %(test_data_y.shape[0], len(test_data_y.value_counts())))We have 60000 training images each contains 784 pixels. We have 60000 training labels each contains 10 classes. We have 10000 testing images each contains 784 pixels. We have 10000 testing labels each contains 10 classes.We notice also that there are 6000 image per class.train_data_y.value_counts()Let's see what our images looks like.fig = plt.figure(figsize=(8, 8)) # Setting the figure size. columns = 4 # Selecting the number of columns. rows = 5 # Selectin the number of rows. for i in range(1, columns*rows +1): # Looping through rows & columns. img = test_data_x.iloc[i].to_numpy().reshape((28,28)) # Reshaping the image into its size 32x32 fig.add_subplot(rows, columns, i) # Adding the image to the plot plt.imshow(img, cmap='gray') # Showing the image using plt plt.show() # Finally shpwing the whole plot containing all the subplots5- Now we define a function for us to preprocess the data,We will start with reshaping the image to be with the size of 28x28, so that the training array will be of size of imagesx28x28, then we will pass through each image to flip and rotate them as they are rotated, then we will reshape the entire array with the imagex28x28x1, and the 1 stands for our grey scale images, lastly we will normalize our images by dividing by 255 for pixels normalization.def preprocess_data(train_data_x): train_data_x = train_data_x.to_numpy().reshape((train_data_x.shape[0], 28, 28)).astype('uint8') for i in range(len(train_data_x)): train_data_x[i] = cv2.rotate(train_data_x[i], cv2.ROTATE_90_CLOCKWISE) # Rotating the images. train_data_x[i] = np.flip(train_data_x[i], 1) # Flipping the images train_data_x = train_data_x.reshape([-1, 28, 28, 1]).astype('uint8') # Reshaping into the required size. train_data_x = train_data_x.astype('float32')/255 # Here we normalize our images. return np.asarray(train_data_x) train_x = preprocess_data(train_data_x) # Returns an array of dimensions (60000,28,28,1). test_x = preprocess_data(test_data_x) # Returns an array of dimensions (60000,28,28,1).6- Now we preprocess our labels by converting them to the categorcal form.train_y = to_categorical(train_data_y.values.astype('int32') # Returns an array of dimentions (13340, 28). , num_classes=10) test_y = to_categorical(test_data_y.values.astype('int32') # Returns an array of dimentions (3360, 28). , num_classes=10)7- We will now shuffle our training and test sets as we will get better results than using the data's classes in sequential form.from sklearn.utils import shuffle # Importing shuffle function from sklearn library. train_x, train_y = shuffle(train_x, train_y) # Now we shuffle x & y in the training set. test_x, test_y, shuffle(test_x, test_y) # Then x & y in our testing set.Building Model 8- We will now create our model's architecture, We will use keras for the creation of our model, we will start by creating a function for use to create our model, we will set the activation, optimizer and our initializing method as variables for us to easly modifiy it in the hyper-parameter tuning phase.We will start by creating our first convolutional layer and setting up the input shape, we will create additional pooling layer along with a batch normalization layer, then we will add three convolutional layers with the same structure but the the double size of filters each layer.Then we will flatten our layer preparing it for the fully connected layers, we will use a small neurons numbered layer with a drop out layer, batch normalization and we will add an L2 regularizer so we will control the overfitting.def create_model(activation='relu', optimizer='adam', kernel_initializer='he_normal'): model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(32, (3,3), padding='same', input_shape=(28, 28, 1), activation= activation, kernel_initializer=kernel_initializer), tf.keras.layers.MaxPool2D(2,2), tf.keras.layers.BatchNormalization(), tf.keras.layers.Conv2D(64, (3,3), padding='same', activation= activation, kernel_initializer=kernel_initializer), tf.keras.layers.MaxPool2D(2,2), tf.keras.layers.Dropout(0.2), tf.keras.layers.BatchNormalization(), tf.keras.layers.Conv2D(128, (3,3), padding='same', activation= activation, kernel_initializer=kernel_initializer), tf.keras.layers.MaxPool2D(2,2), tf.keras.layers.Dropout(0.2), tf.keras.layers.BatchNormalization(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(32, activation= activation, kernel_initializer=kernel_initializer, kernel_regularizer='l2'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation= 'softmax', kernel_initializer=kernel_initializer) ]) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) return model model = create_model() # Now we created an instance of a model with our custom architefture. model.summary() # Then we display our model's summary.Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_3 (Conv2D) (None, 28, 28, 32) 320 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 14, 14, 32) 0 _________________________________________________________________ batch_normalization_4 (Batch (None, 14, 14, 32) 128 _________________________________________________________________ conv2d_4 (Conv2D) (None, 14, 14, 64) 18496 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 7, 7, 64) 0 _________________________________________________________________ dropout_3 (Dropout) (None, 7, 7, 64) 0 ______________________________________________________[...]Lets train our model.model = create_model(optimizer='adam', # We create our model with the specified hyper parameters kernel_initializer='normal', activation='relu') from keras.callbacks import ModelCheckpoint # We will import a call back to save the best epoch's weights checkpointer = ModelCheckpoint(filepath='weights.hdf5', verbose=1, save_best_only=True) history = model.fit(train_x, train_y, validation_split= 0.3, # The model will split the data into 30% of validation. epochs=10, # We will run the model for 30 epochs batch_size=64, # We will have a batch size of 64 callbacks=[checkpointer]) # Finally we will use the imported callbackEpoch 1/10 657/657 [==============================] - 73s 109ms/step - loss: 0.3922 - accuracy: 0.9707 - val_loss: 0.1152 - val_accuracy: 0.9862 Epoch 00001: val_loss improved from inf to 0.11523, saving model to weights.hdf5 Epoch 2/10 657/657 [==============================] - 72s 109ms/step - loss: 0.0821 - accuracy: 0.9902 - val_loss: 0.0798 - val_accuracy: 0.9878 Epoch 00002: val_loss improved from 0.11523 to 0.07978, saving model to weights.hdf5 Epoch 3/10 657/657 [==============================] - 71s 108ms/step - loss: 0.0726 - accuracy: 0.9909 - val_loss: 0.0806 - val_accuracy: 0.9889 Epoch 00003: val_loss did not improve from 0.07978 Epoch 4/10 657/657 [==============================] - 71s 108ms/step - loss: 0.0672 - accuracy: 0.9917 - val_loss: 0.0682 - val_accuracy: 0.9913 Epoch 00004: val_loss improved from 0.07978 to 0.06818, saving model to weights.hdf5 Epoch 5/10 657/657 [==============================] - 71s 108ms/step - loss: 0.0642 - accuracy: 0.9924 - val_loss:[...]11- Now lets load the best epoch's weights and then evaluate our model using the test set.model.load_weights('weights.hdf5') # Loading the best weights model.evaluate(test_x, test_y) # Evaluating our model313/313 [==============================] - 4s 14ms/step - loss: 0.0642 - accuracy: 0.990912- Lets plot our training journy to check the performance and verify that the model is not overfitting.# PLOT LOSS AND ACCURACY %matplotlib inline import matplotlib.image as mpimg import matplotlib.pyplot as plt #----------------------------------------------------------- # Retrieve a list of list results on training and test data # sets for each training epoch #----------------------------------------------------------- acc=history.history['accuracy'] val_acc=history.history['val_accuracy'] loss=history.history['loss'] val_loss=history.history['val_loss'] epochs=range(len(acc)) # Get number of epochs #------------------------------------------------ # Plot training and validation accuracy per epoch #------------------------------------------------ plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Training and validation accuracy') plt.legend(['train', 'val'], loc='upper left') plt.ylabel('accuracy') plt.xlabel('epoch') plt.figure() #------------------------------------------------ # Plot training and validation loss per epoch #------------------------------------------------ plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Training and validation loss') plt.legend(['train', 'val'], loc='upper left') plt.ylabel('accuracy') plt.xlabel('epoch') plt.title('Training and validation loss')12- Lets save our model for later usemodel.save('/content/my_model/my_model.h5')Testing Model Now lets load our model and test it on our custom images.model = tf.keras.models.load_model('my_model/my_model.h5') # Now we load the modelIn the following function we mapped the categorical output with the Arabic letters to help us better identifing the classes.def convert_categorical_label_to_real_label(categorical_label): real_labels = [] real_labels.extend([x for x in range(10)]) return real_labels[categorical_label]Finally lets test them on custom images.for i in range(1, 5): test = cv2.imread('z' + str(i) + '.PNG') test = cv2.cvtColor(test, cv2.COLOR_BGR2GRAY) test = cv2.resize(test, (28, 28)) plt.imshow(test, cmap='gray') plt.show() test = np.reshape(test, (-1, 28, 28, 1)) test = test.astype('float32')/255 print(convert_categorical_label_to_real_label(np.argmax(model.predict(test))))Measurements by (https://gitlab.ethz.ch/tec/research/permasense/engineering/blob/master/docs/dpp/dpp_inventory.ods)# in µA quiescent_currents = [1.8,1.8,1.0,2.6,1.0,1.4,2.1,1.9,1.7,3.2,1.7,1.3,1.4,1.5,1.6,1.0,3.0,1.6,1.5,1.5,5.0,0.9,3.6,1.6,2.1,3.5,1.4,1.6,1.3,1.6,0.9,1.3,1.7,1.1,2.2,1.2,1.8,1.4,2.6,1.4,1.5,2.3,5.0,1.4,1.2,0.8,5.2,1.4,1.3,2.8,1.4,1.0,1.0,2.6,2.7,2.0,2.3,1.8,2.8,2.2,1.7,3.6,1.0,1.7,1.1,1.8,2.0,3.8,1.6,2.5,2.6,2.7,1.3,2.7,2.0,2.3,2.4,4.0,2.0,1.4,1.3,1.9,1.9,1.2,1.6,1.4,1.3,] %matplotlib inline import matplotlib.pyplot as plt %config InlineBackend.figure_format = 'svg' plt.hist(quiescent_currents, 100, facecolor='k') plt.title("Distribution of quiescent current over 87 DPP2 LoRa\nfor lowest possible power-consuming state\n(STM32L4 shutdown mode, SX1262 cold sleep, no RTC)") plt.ylabel("Count") plt.xlabel("Supply Current [µA]") plt.gca().xaxis.grid(True) plt.show()Tutorial This notebook demonstrates the usage of `redshifted_gaussian_fields` to generate realizations of an intensity field on the sky generated by redshifted 21cm emission with a chosen power spectrum. Table of Contents1  Define Power Spectrum2  Compute Cross-frequency Covariance3  Generate Random Realizations4  Save and Restore Completed Calculationsimport numpy as np import matplotlib.pyplot as plt from astropy import cosmology from redshifted_gaussian_fields import generator plt.rcParams['figure.figsize'] = [10,8] plt.rcParams['xtick.labelsize'] = 'large' plt.rcParams['ytick.labelsize'] = 'large' def plot_power_spectrum(k_axis, Pspec): plt.plot(k_axis, Pspec(k_axis), label=r'$P(k)$') plt.plot(k_axis, (k_axis/0.2)**(-2.7), label=r'$ \propto k^{-2.7}$') plt.legend(loc='upper right', fontsize=16) plt.yscale('log') plt.xscale('log') plt.ylim(1e-6,1e4) plt.xlabel(r'$k$ (Mpc$^{-1}$)',fontsize=20) plt.ylabel(r'$P(k)$ (K$^2$)',fontsize=20) plt.show()Define Power Spectrum Begin by defining the power spectrum. In this example we will choose a power spectrum that is effectively a power law over the given range.k0 = np.logspace(-2.,1.,11) a = k0**(-2.7) normalization_point = 0.2 normalization_amplitude = 1. Pspec = generator.ParameterizedGaussianPowerSpectrum(a, k0, renormalization=(normalization_point, normalization_amplitude), term_type='flat_gauss') k_axis = np.logspace(-2.5,1.5,1001) plot_power_spectrum(k_axis, Pspec)The components used to define the power spectrum do not span all the possible power spectra, but they are flexible enough to produce a variety of forms. For example, we might add a run on the spectral index:# Using the same locations parameters,but with a different set # of amplitude parameters a_2 = k0**(-2.7 - 0.1*np.log(k0)) Pspec_2 = generator.ParameterizedGaussianPowerSpectrum(a_2, k0, renormalization=(normalization_point, normalization_amplitude), term_type='flat_gauss') plot_power_spectrum(k_axis, Pspec_2)Compute Cross-frequency Covariance Next we choose the parameters over which to evaluate the cross-covariance function which is used to produce sky model realizations.Frequency $\nu$ points in MHz, 140MHz to 150MHz with a channel width of 0.1MHz:nu_axis = np.linspace(140., 150., 101, endpoint=True) del_nu = 0.1Angular harmonic order $\ell$, from 0 to 100ell_axis = np.arange(101)A choice of cosmological parameters is needed to specify the comoving distance $r$ as a function of $\nu$. This package uses an `astropy` cosmology object for this purpose:cosmo = cosmology.Planck15Choose the number of samples used to compute the frequency channel integralsNp = 15Now we can initialize a GaussianCosmologicalFieldGenerator object, which is used to manage the various computations and well as saving to disk and restoring completed calculations.gcfg = generator.GaussianCosmologicalFieldGenerator(cosmo, Pspec, nu_axis, del_nu, ell_axis, Np=Np)Then we can compute the cross-frequency angular power spectrum over the specified parameter domain for the chosen 3D power spectrum `Pspec`. This is most computationally intensive step and for useful ranges of the parameters may take minutes to hours depending on the number of frequency samples, angular modes, number of terms in the power spectrum function, number of points used in the channelization integral, and whether a matrix element cutoff value $\epsilon$ can be effectively utilized.%%time gcfg.compute_cross_frequency_angular_power_spectrum()CPU times: user 2min 42s, sys: 3.09 s, total: 2min 45s Wall time: 26 sThe computational scaling in terms of* $N_\nu$, the number of frequency samples* $N_\ell$, the number of angular modes* $N_i$, the number of power spectrum terms* $N_p$, the number of samples per frequency channelis approximately $\mathcal{O}(N_\nu^2 N_\ell N_i N_p^2)$ if $\epsilon = 0$. However if $\epsilon > 0$, it becomes $\sim \mathcal{O}(N_\nu N_\ell N_i N_p^2)$, with an overall nearly-constant factor that depends on $\epsilon$ and the small-$k_0$ extent of the power spectrum. The computed cross-frequency angular power spectrum is stored in a $N_\ell \times N_\nu \times N_\nu$ arrayprint(gcfg.barC.shape) plt.imshow(gcfg.barC[50]); plt.plot(gcfg.nu_axis, gcfg.barC[50,0,:], '.') plt.plot(gcfg.nu_axis, gcfg.barC[50,50,:], '.') plt.plot(gcfg.nu_axis, gcfg.barC[50,75,:], '.');Generate Random Realizations Generate realizations of the harmonic coefficients of the specific intensity of the sky. Each realization is generated from the input random seed so that the same realization can be re-generated on demand.seed = 2983 a_lm = gcfg.generate_realization(seed) a_lm2 = gcfg.generate_realization(seed) np.allclose(a_lm, a_lm2)Generate a healpix map sampling of the specific intensity with resolution specified by `nside`:nside = 128 hmap = gcfg.generate_healpix_map_realization(seed, nside)/lustre/aoc/projects/hera/zmartino/anaconda3/envs/main/lib/python3.7/site-packages/healpy/sphtfunc.py:824: UserWarning: Sigma is 0.000000 arcmin (0.000000 rad) sigma * 60 * 180 / np.pi, sigma /lustre/aoc/projects/hera/zmartino/anaconda3/envs/main/lib/python3.7/site-packages/healpy/sphtfunc.py:829: UserWarning: -> fwhm is 0.000000 arcmin sigma * 60 * 180 / np.pi * (2.0 * np.sqrt(2.0 * np.log(2.0)))The first axis of the array is frequnency, the second axis is healpix pixels.print(hmap.shape, hmap.dtype)(101, 196608) float64The frequency spectrum of at a point in the map:plt.plot(gcfg.nu_axis, hmap[:,0], '.-') plt.ylabel('Jy / sr') plt.xlabel('MHz');Tutorial 2: Inside CrypTensorsNote: This tutorial is optional, and can be skipped without any loss of continuity to the following tutorials.In this tutorial, we will take a brief look at the internals of ```CrypTensors```. Using the `mpc` backend, a `CrypTensor` is a tensor encrypted using secure MPC protocols, called an `MPCTensor`. In order to support the mathematical operations required by the `MPCTensor`, CrypTen implements two kinds of secret-sharing protocols: arithmetic secret-sharing and binary secret-sharing. Arithmetic secret sharing forms the basis for most of the mathematical operations implemented by `MPCTensor`. Similarly, binary secret-sharing allows for the evaluation of logical expressions.In this tutorial, we'll first introduce the concept of a `CrypTensor` ptype (i.e. private-type), and show how to use it to obtain `MPCTensors` that use arithmetic and binary secret shares. We will also describe how each of these ptypes is used, and how they can be combined to implement desired functionality.#import the libraries import crypten import torch #initialize crypten crypten.init() #Disables OpenMP threads -- needed by @mpc.run_multiprocess which uses fork torch.set_num_threads(1)WARNING:root:module 'torchvision.models.mobilenet' has no attribute 'ConvBNReLU'ptype in CrypTenCrypTen defines the `ptype` (for private-type) attribute of an `MPCTensor` to denote the kind of secret-sharing protocol used in the `CrypTensor`. The `ptype` is, in many ways, analogous to the `dtype` of PyTorch. The `ptype` may have two values: - `crypten.mpc.arithmetic` for `ArithmeticSharedTensors`- `crypten.mpc.binary` for `BinarySharedTensors`We can use the `ptype` attribute to create a `CrypTensor` with the appropriate secret-sharing protocol. For example:#Constructing CrypTensors with ptype attribute #arithmetic secret-shared tensors x_enc = crypten.cryptensor([1.0, 2.0, 3.0], ptype=crypten.mpc.arithmetic) print("x_enc internal type:", x_enc.ptype) #binary secret-shared tensors y = torch.tensor([1, 2, 1], dtype=torch.int32) y_enc = crypten.cryptensor(y, ptype=crypten.mpc.binary) print("y_enc internal type:", y_enc.ptype)x_enc internal type: ptype.arithmetic y_enc internal type: ptype.binaryArithmetic secret-sharingLet's look more closely at the `crypten.mpc.arithmetic` ptype. Most of the mathematical operations implemented by `CrypTensors` are implemented using arithmetic secret sharing. As such, `crypten.mpc.arithmetic` is the default ptype for newly generated `CrypTensors`. Let's begin by creating a new `CrypTensor` using `ptype=crypten.mpc.arithmetic` to enforce that the encryption is done via arithmetic secret sharing. We can print values of each share to confirm that values are being encrypted properly. To do so, we will need to create multiple parties to hold each share. We do this here using the `@mpc.run_multiprocess` function decorator, which we developed to execute crypten code from a single script (as we have in a Jupyter notebook). CrypTen follows the standard MPI programming model: it runs a separate process for each party, but each process runs an identical (complete) program. Each process has a `rank` variable to identify itself.Note that the sum of the two `_tensor` attributes below is equal to a scaled representation of the input. (Because MPC requires values to be integers, we scale input floats to a fixed-point encoding before encryption.)import crypten.mpc as mpc import crypten.communicator as comm @mpc.run_multiprocess(world_size=2) def examine_arithmetic_shares(): x_enc = crypten.cryptensor([1, 2, 3], ptype=crypten.mpc.arithmetic) rank = comm.get().get_rank() crypten.print(f"\nRank {rank}:\n {x_enc}\n", in_order=True) x = examine_arithmetic_shares()Rank 0: MPCTensor( _tensor=tensor([ 4977840465844292698, 234311463858409737, -8644375101282040029]) plain_text=HIDDEN ptype=ptype.arithmetic ) Rank 1: MPCTensor( _tensor=tensor([-4977840465844227162, -234311463858278665, 8644375101282236637]) plain_text=HIDDEN ptype=ptype.arithmetic )Binary secret-sharingThe second type of secret-sharing implemented in CrypTen is binary or XOR secret-sharing. This type of secret-sharing allows greater efficiency in evaluating logical expressions. Let's look more closely at the `crypten.mpc.binary` ptype. Most of the logical operations implemented by `CrypTensors` are implemented using arithmetic secret sharing. We typically use this type of secret-sharing when we want to evaluate binary operators (i.e. `^ & | >> <<`, etc.) or logical operations (like comparitors).Let's begin by creating a new `CrypTensor` using `ptype=crypten.mpc.binary` to enforce that the encryption is done via binary secret sharing. We can print values of each share to confirm that values are being encrypted properly, as we did for arithmetic secret-shares.(Note that an xor of the two `_tensor` attributes below is equal to an unscaled version of input.)@mpc.run_multiprocess(world_size=2) def examine_binary_shares(): x_enc = crypten.cryptensor([2, 3], ptype=crypten.mpc.binary) rank = comm.get().get_rank() crypten.print(f"\nRank {rank}:\n {x_enc}\n", in_order=True) x = examine_binary_shares()Rank 0: MPCTensor( _tensor=tensor([-3617348383499570248, -581960226550774565]) plain_text=HIDDEN ptype=ptype.binary ) Rank 1: MPCTensor( _tensor=tensor([-3617348383499570246, -581960226550774568]) plain_text=HIDDEN ptype=ptype.binary )Using Both Secret-sharing ProtocolsQuite often a mathematical function may need to use both additive and XOR secret sharing for efficient evaluation. Functions that require conversions between sharing types include comparators (`>, >=, <, <=, ==, !=`) as well as functions derived from them (`abs, sign, relu`, etc.). For a full list of supported functions, please see the CrypTen documentation.CrypTen provides functionality that allows for the conversion of between ptypes. Conversion between ptypes can be done using the `.to()` function with a `crypten.ptype` input, or by calling the `.arithmetic()` and `.binary()` conversion functions.from crypten.mpc import MPCTensor @mpc.run_multiprocess(world_size=2) def examine_conversion(): x = torch.tensor([1, 2, 3]) rank = comm.get().get_rank() # create an MPCTensor with arithmetic secret sharing x_enc_arithmetic = MPCTensor(x, ptype=crypten.mpc.arithmetic) # To binary x_enc_binary = x_enc_arithmetic.to(crypten.mpc.binary) x_from_binary = x_enc_binary.get_plain_text() # print only once crypten.print("to(crypten.binary):") crypten.print(f" ptype: {x_enc_binary.ptype}\n plaintext: {x_from_binary}\n") # To arithmetic x_enc_arithmetic = x_enc_arithmetic.to(crypten.mpc.arithmetic) x_from_arithmetic = x_enc_arithmetic.get_plain_text() # print only once crypten.print("to(crypten.arithmetic):") crypten.print(f" ptype: {x_enc_arithmetic.ptype}\n plaintext: {x_from_arithmetic}\n") z = examine_conversion()to(crypten.binary): ptype: ptype.binary plaintext: tensor([1., 2., 3.]) to(crypten.arithmetic): ptype: ptype.arithmetic plaintext: tensor([1., 2., 3.])Data SourcesCrypTen follows the standard MPI programming model: it runs a separate process for each party, but each process runs an identical (complete) program. Each process has a `rank` variable to identify itself.If the process with rank `i` is the source of data `x`, then `x` gets encrypted with `i` as its source value (denoted as `src`). However, MPI protocols require that both processes to provide a tensor with the same size as their input. CrypTen ignores all data provided from non-source processes when encrypting.In the next example, we'll show how to use the `rank` and `src` values to encrypt tensors. Here, we will have each of 3 parties generate a value `x` which is equal to its own `rank` value. Within the loop, 3 encrypted tensors are created, each with a different source. When these tensors are decrypted, we can verify that the tensors are generated using the tensor provided by the source process.(Note that `crypten.cryptensor` uses rank 0 as the default source if none is provided.)@mpc.run_multiprocess(world_size=3) def examine_sources(): # Create a different tensor on each rank rank = comm.get().get_rank() x = torch.tensor(rank) crypten.print(f"Rank {rank}: {x}", in_order=True) # world_size = comm.get().get_world_size() for i in range(world_size): x_enc = crypten.cryptensor(x, src=i) z = x_enc.get_plain_text() # Only print from one process to avoid duplicates crypten.print(f"Source {i}: {z}") x = examine_sources()Rank 0: 0 Rank 1: 1 Rank 2: 2 Source 0: 0.0 Source 1: 1.0 Source 2: 2.0Load the datanb_classes = 10 (X_train, y_train), (X_test, y_test) = mnist.load_data() # convert y_train and y_test to categorical binary values Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) X_train.shapePreprocessing the data# Reshape them to batch_size, width,height,#channels X_train = X_train.reshape(60000, 28, 28, 1) X_test = X_test.reshape(10000, 28, 28, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # Normalize the values X_train /= 255 X_test /= 255 print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples')60000 train samples 10000 test samplesDefine the teacher model# Teacher model input_shape = (28, 28, 1) # Input shape of each image # Hyperparameters nb_filters = 64 # number of convolutional filters to use pool_size = (2, 2) # size of pooling area for max pooling kernel_size = (3, 3) # convolution kernel size teacher = Sequential() teacher.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) teacher.add(Conv2D(64, (3, 3), activation='relu')) teacher.add(MaxPooling2D(pool_size=(2, 2))) teacher.add(Dropout(0.25)) # For reguralization teacher.add(Flatten()) teacher.add(Dense(128, activation='relu')) teacher.add(Dropout(0.5)) # For reguralization teacher.add(Dense(nb_classes)) teacher.add(Activation('softmax')) # Note that we add a normal softmax layer to begin with teacher.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) print(teacher.summary())WARNING:tensorflow:From /Users/arhumsavera/anaconda/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer. WARNING:tensorflow:From /Users/arhumsavera/anaconda/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version. Instructions for updating: Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`. _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 26, 26, 32) 320 _________________________________________________________________ [...]Define the student model# Student model that is stand-alone. We will evaluate its accuracy compared to a teacher trained student model student = Sequential() student.add(Flatten(input_shape=input_shape)) student.add(Dense(32, activation='relu')) student.add(Dropout(0.2)) student.add(Dense(nb_classes)) student.add(Activation('softmax')) #sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) student.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) student.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= flatten_2 (Flatten) (None, 784) 0 _________________________________________________________________ dense_3 (Dense) (None, 32) 25120 _________________________________________________________________ dropout_3 (Dropout) (None, 32) 0 _________________________________________________________________ dense_4 (Dense) (None, 10) 330 _________________________________________________________________ activation_2 (Activation) (None, 10) 0 ================================================================= Total params: 25,450 Trainable params: 25,450 Non-trainable params: 0 _________________________________________________________________Training the teacher model# Train the teacher model as usual epochs = 4 batch_size = 256 teacher.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_test, Y_test))WARNING:tensorflow:From /Users/arhumsavera/anaconda/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. Train on 60000 samples, validate on 10000 samples Epoch 1/4 60000/60000 [==============================] - 76s 1ms/step - loss: 0.3577 - acc: 0.8881 - val_loss: 0.0741 - val_acc: 0.9783 Epoch 2/4 60000/60000 [==============================] - 75s 1ms/step - loss: 0.1049 - acc: 0.9688 - val_loss: 0.0464 - val_acc: 0.9841 Epoch 3/4 60000/60000 [==============================] - 80s 1ms/step - loss: 0.0771 - acc: 0.9772 - val_loss: 0.0383 - val_acc: 0.9869 Epoch 4/4 60000/60000 [==============================] - 78s 1ms/step - loss: 0.0628 - acc: 0.9816 - val_loss: 0.0363 - val_acc: 0.9869Define a new model that outputs only teacher logitsfor layer in teacher.layers: print(layer.name) # Raise the temperature of teacher model and gather the soft targets # Set a tempature value temp = 7 #Collect the logits from the previous layer output and store it in a different model teacher_WO_Softmax = Model(teacher.input, teacher.get_layer('dense_2').output)Define a manual softmax function# Define a manual softmax function def softmax(x): return np.exp(x)/(np.exp(x).sum())Understanding the concept of temperature in softmax activation# For example, just grab the first image and lets see how softening of probabilities work intermediate_output = teacher_WO_Softmax.predict(X_test[0].reshape(1,28,28,1)) print(softmax(intermediate_output)) pixels = X_test[0] pixels = pixels.reshape((28, 28)) plt.imshow(pixels) plt.show() # logits for the first number in test dataset x = intermediate_output[0] plt.figure(figsize=(20, 10)); temperature = [1,3,7,10,20,50] for temp in temperature: plt.plot((softmax(x/temp)), label='$T='+str(temp)+'$', linewidth=2); plt.legend(); plt.xlabel('classes ->'); plt.ylabel('probability'); plt.xlim([0, 10]); plt.show()[[4.0839434e-09 7.6483259e-10 8.8748038e-08 2.2979354e-07 5.6975698e-11 1.0589407e-10 7.1403348e-12 9.9999791e-01 1.2292533e-08 1.7573556e-06]]Prepare the soft targets and the target data for student to be trained uponteacher_train_logits = teacher_WO_Softmax.predict(X_train) teacher_test_logits = teacher_WO_Softmax.predict(X_test) # This model directly gives the logits ( see the teacher_WO_softmax model above) # Perform a manual softmax at raised temperature train_logits_T = teacher_train_logits/temp test_logits_T = teacher_test_logits / temp Y_train_soft = softmax(train_logits_T) Y_test_soft = softmax(test_logits_T) # Concatenate so that this becomes a 10 + 10 dimensional vector Y_train_new = np.concatenate([Y_train, Y_train_soft], axis=1) Y_test_new = np.concatenate([Y_test, Y_test_soft], axis =1) Y_train_new.shape Y_test_new.shape X_train.shape Y_train_new[0]Prepare the student model that outputs probabilities with and without temperature# Remove the softmax layer from the student network student.layers.pop() # Now collect the logits from the last layer logits = student.layers[-1].output # This is going to be a tensor. And hence it needs to pass through a Activation layer probs = Activation('softmax')(logits) # softed probabilities at raised temperature logits_T = Lambda(lambda x: x / temp)(logits) probs_T = Activation('softmax')(logits_T) output = concatenate([probs, probs_T]) # This is our new student model student = Model(student.input, output) student.summary()__________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== flatten_2_input (InputLayer) (None, 28, 28, 1) 0 __________________________________________________________________________________________________ flatten_2 (Flatten) (None, 784) 0 flatten_2_input[0][0] __________________________________________________________________________________________________ dense_3 (Dense) (None, 32) 25120 flatten_2[0][0] __________________________________________________________________________________________________ dropout_3 (Dropout) (None, 32) 0 dense_3[0][0] __________[...]Declare knowledge distillation loss function# This will be a teacher trained student model. # --> This uses a knowledge distillation loss function # Declare knowledge distillation loss def knowledge_distillation_loss(y_true, y_pred, alpha): # Extract the one-hot encoded values and the softs separately so that we can create two objective functions y_true, y_true_softs = y_true[: , :nb_classes], y_true[: , nb_classes:] y_pred, y_pred_softs = y_pred[: , :nb_classes], y_pred[: , nb_classes:] loss = alpha*logloss(y_true,y_pred) + logloss(y_true_softs, y_pred_softs) return loss # For testing use regular output probabilities - without temperature def acc(y_true, y_pred): y_true = y_true[:, :nb_classes] y_pred = y_pred[:, :nb_classes] return categorical_accuracy(y_true, y_pred) student.compile( #optimizer=optimizers.SGD(lr=1e-1, momentum=0.9, nesterov=True), optimizer='adadelta', loss=lambda y_true, y_pred: knowledge_distillation_loss(y_true, y_pred, 0.1), #loss='categorical_crossentropy', metrics=[acc] )Train the student modelstudent.fit(X_train, Y_train_new, batch_size=256, epochs=5, verbose=1, validation_data=(X_test, Y_test_new)) # This is a standalone student model (same number of layers as original student model) trained on same data # for comparing it with teacher trained student. n_student = Sequential() n_student.add(Flatten(input_shape=input_shape)) n_student.add(Dense(32, activation='relu')) n_student.add(Dropout(0.2)) n_student.add(Dense(nb_classes)) n_student.add(Activation('softmax')) #sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) n_student.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) n_student.fit(X_train, Y_train, batch_size=256, epochs=epochs, verbose=1, validation_data=(X_test, Y_test)) n_student.evaluate(X_test, Y_test) n_student.metrics_names preds = student.predict(X_test)[: , :nb_classes] from sklearn.metrics import accuracy_scoreUn premier regard sur un réseau de neurones- Premier exemple concret de réseau de neurones, qui utilise le package Python Keras pour apprendre à classer chiffres écrits à la main. - Le problème que nous essayons de résoudre ici est de classer les images en niveaux de gris de chiffres manuscrits (28 pixels sur 28 pixels) dans leur 10 catégories (0 à 9). - Le jeu de données que nous allons utiliser est le MNIST, un jeu de données classique de la communauté d’apprentissage automatique, qui a été presque aussi longtemps que le champ lui-même et a été très intensément étudié. C'est un ensemble de 60 000 images d'apprentissage, plus 10 000 de testsfrom keras.datasets import fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()Etudions les données d'apprentissage :import matplotlib.pyplot as plt %matplotlib inline plt.imshow(train_images[33]) print(train_labels[33]) len(train_labels) train_labelsNotre flux de travail sera le suivant: d’abord, nous présenterons à notre réseau de neurones les données d’entraînement, `train_images` et` train_labels`.Le réseau apprendra ensuite à associer des images et des étiquettes. Enfin, nous demanderons au réseau de produire des prédictions pour `test_images`, et nous vérifierons si ces prédictions correspondent aux étiquettes de `test_labels`.from keras import models from keras import layers network = models.Sequential() network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,))) network.add(layers.Dense(10, activation='softmax'))WARNING:tensorflow:From C:\Users\s4d-asus-14\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer.Ici, notre réseau consiste en une séquence de deux couches `Dense`, qui sont des couches densément connectées.La deuxième (et dernière) couche est une couche "softmax", ce qui signifie qu’elle renverra un tableau de 10 scores de probabilitéNous devons choisir trois éléments supplémentaires dans le cadre de l’étape "compilation":- Une fonction de perte: voici comment le réseau sera capable de mesurer la qualité de son travail sur ses données de formation, et donc comment il le sera capable de se diriger dans la bonne direction.- Un optimiseur: c'est le mécanisme par lequel le réseau se mettra à jour en fonction des données qu'il voit et de sa fonction de perte.- Mesures : pendant la formation et les tests. Ici, nous ne nous intéresserons qu'à la précision (la fraction des images qui ont été correctement classifié).network.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) network.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_1 (Dense) (None, 512) 401920 _________________________________________________________________ dense_2 (Dense) (None, 10) 5130 ================================================================= Total params: 407,050 Trainable params: 407,050 Non-trainable params: 0 _________________________________________________________________Avant l'apprentissage, nous allons prétraiter nos données en les transformant dans la forme attendue par le réseau et en les redimensionnant de manière à ce que toutes les valeurs soient en mémoire.Nos images étaient stockées dans un tableau de formes `(60000, 28, 28)` de type `uint8` avec des valeurs dans l'intervalle `[0, 255]`. Nous le transformons en un `tableau float32` de forme` (60000, 28 * 28) `avec des valeurs comprises entre 0 et 1.train_images = train_images.reshape((60000, 28 * 28)) train_images = train_images.astype('float32') / 255 test_images = test_images.reshape((10000, 28 * 28)) test_images = test_images.astype('float32') / 255Nous devons également coder les étiquettes de manière catégorielle : Nous sommes maintenant prêts à apprendre notre réseau via un appel à la méthode `fit` du réseau :network.fit(train_images, train_labels, epochs=5, batch_size=128,validation_data=(test_images, test_labels))Train on 60000 samples, validate on 10000 samples Epoch 1/5 60000/60000 [==============================] - 3s 43us/step - loss: 0.2758 - acc: 0.8991 - val_loss: 0.3355 - val_acc: 0.8814 Epoch 2/5 60000/60000 [==============================] - 3s 42us/step - loss: 0.2615 - acc: 0.9038 - val_loss: 0.3909 - val_acc: 0.8633 Epoch 3/5 60000/60000 [==============================] - 3s 42us/step - loss: 0.2512 - acc: 0.9068 - val_loss: 0.3580 - val_acc: 0.8811 Epoch 4/5 60000/60000 [==============================] - 3s 42us/step - loss: 0.2409 - acc: 0.9102 - val_loss: 0.3873 - val_acc: 0.8707 Epoch 5/5 60000/60000 [==============================] - 3s 42us/step - loss: 0.2334 - acc: 0.9129 - val_loss: 0.3435 - val_acc: 0.8917Deux quantités sont affichées pendant l’apprentissage : la "perte" du réseau sur les données d’apprentissage et la précision du réseau sur les données d'entraînement.Nous atteignons rapidement une précision de 0,989 (soit 98,9%) sur les données d’apprentissage. Vérifions maintenant que notre modèle fonctionne bien sur le jeu de tests:test_loss, test_acc = network.evaluate(test_images, test_labels) print('test_acc:', test_acc)test_acc: 0.8663Hurricane Florence: Potential Employment Impacts_First Draft: 10 September 2018__Last Draft: 12 September 2018_.It is possible for hurricanes to throw off employment counts if they cause workers to miss work. While shutdowns due to this factor are important, they can generate large distortions in the data that need to be distinguished from other transitory and secular factors.Employment in the Establishment Survey (the source of headline employment) is tallied as the number of people on establishment payrolls during _any part_ of the week on which the 12th of the month falls.Currently, Hurricane Florence is on path to make landfall at early Wednesday at the earliest. In North Carolina mandatory evacuations of the Outer Banks area were ordered at [noon Monday](https://weather.com/safety/hurricane/news/2018-09-10-hurricane-florence-preparations-southeast-coast-north-carolina). Voluntary evacuations of UNC-Wilmington were to begin on noon Monday. At around 3pm EDT evacuations of the coastal area of South Carolina were [ordered to begin at noon Tuesday](https://www.postandcourier.com/news/evacuations-ordered-for-sc-coast-as-hurricane-florence-nears-effective/article_ecae59d2-b507-11e8-b430-c30c881683a8.html).Interest in "Emergency Evacuation" at [Google Trends](https://trends.google.com/trends/explore?date=now%201-d&geo=US-NC&q=%2Fm%2F058th7) only began to pick up in the mid-afternoon in both South and North Carolina. There do not appear to be any major traffic jams on _outgoing_ highways in the evacuation areas as of 6pm EDT. This suggests that evacuations have yet to begin in earnest. There does appear to be regular rush-hour traffic in and around the major metro areas of Charleston, Myrtle Beach, and Wilmington. This suggests that most people were at work today.All of this evidence leads me to infer that:1. Few people evacuated on Monday. - Timing of evacuation orders. - Lack of traffic jams. - Several days until earliest landfall.1. Most people went to work on Monday. - Inferred lack of evacuations. - Rush hour traffic.1. **(Therefore,) The effect of Florence on reported September employement in the Carolinas (and the greater U.S.) will be negligible.** - Inferred that most people worked Monday. - People who worked on Monday will be counted in payroll and, therefore, as employed.Below, I discuss one example of a hurricane that did significantly affect employment (Irma) and one example of a hurricane that did not affect employment (Charley). Both hurricanes occurred around the week of the 12th of the month. However, Irma evacuations began in earnest on the weekend before employment counts while Charley evacuations did not occur until later in the week.If Florence did affect employment significantly, it would probably have to be in October. However, this would only happen if Florence is so devastating that large parts of the economy are still shut down in the following month. This was the case with , which is discussed last. ![Forecasted Path of Hurricane Florence, 10 September 2018 @5pm CST](https://www.nhc.noaa.gov/storm_graphics/AT06/refresh/AL062018_earliest_reasonable_toa_34+png/205721_earliest_reasonable_toa_34.png)_**Forecasted Path of Hurricane Florence, 10 September 2018 @5pm CST**_ (Source: [National Hurricane Center](https://www.nhc.noaa.gov/refresh/graphics_at1+shtml/205721.shtml?mltoa34contents))%%html import pandas as pd import numpy as npEffect of Irma on FloridaIrma struck during the survey reference week. The BLS discussed this in the [release](https://www.bls.gov/news.release/archives/empsit_10062017.htm) and [Commissioner's statement](https://www.bls.gov/news.release/jec.nr0.htm). Overall, employment was down 33,000 for the month (unrevised) but up 18,000 in the [revised statistics](https://www.bls.gov/news.release/archives/empsit_11032017.htm).From [Wikipedia](https://en.wikipedia.org/wiki/Effects_of_Hurricane_Irma_in_Florida): With both the Atlantic and Gulf coasts of the state threatened, record evacuations ensued with an estimated 6.5 million people relocating statewide.[Georgia](https://en.wikipedia.org/wiki/Hurricane_IrmaOther_states) also issued evacuation orders for all areas east of I-95, which hugs the coast.%%html There is a clear break for Florida but not for Georgia.fle = dict(Aug= 8602.0, Sep=8435.1, Oct=8614.7) for k,v in fle.items(): print('{}\t{}'.format(k,v)) aoa = (fle['Aug'] + fle['Oct'])/2 print() print('Average of August and October: {}'.format(aoa)) print('Difference between average and observed Septemer:\n\tThousands: {}\n\tPercent: {}'.format(fle['Sep']-aoa, np.log(fle['Sep']/aoa)))Aug 8602.0 Sep 8435.1 Oct 8614.7 Average of August and October: 8608.35 Difference between average and observed Septemer: Thousands: -173.25 Percent: -0.020331091151930616Now let's see how this looks in the national picture. I've plotted the employment change for both the U.S. and Florida:%%html How much of the weak employment growth for September 2017 can be accounted for by Florida? Let's suppose that without a storm:1. Measured US employment in September was at the mean of August and October.1. Measured Florida employment in September was at the mean of August and October.So then we have:aoan = (221+271)/2 unemp = ((aoan-14) # Difference between assumed and measured -((aoa-fle['Aug']) # Counterfactual differnce FL -(fle['Sep']-fle['Aug']) # Trust Difference Florida ) ) print('Number of employees unaccounted for by Florida decline: {:4.3f} thous.'.format(unemp)) print('Percent of employment difference accounted for by Florida: {:4.3f}%'.format(100*(aoan-unemp)/aoan))Number of employees unaccounted for by Florida decline: 58.750 thous. Percent of employment difference accounted for by Florida: 76.118%So it appears that Florida can account for a large majority of the weak employment result for September. Effect of (Aug. 2004) on FloridaCharley was a Category 4 hurricane that struck Florida on August 13, 2004. This was the Friday of the reporting period. Two million were ordered to evacuate. However, the evacuations did not occur until later in the week, which meant that there was no discernible impact on employment both overall or in Florida. The BLS noted this in their [report for August](https://www.bls.gov/news.release/history/empsit_09032004.txt). In September additional hurricanes hit and that [did affect employment](https://www.bls.gov/news.release/history/empsit_10082004.txt).%%html Effects of Katrina on LouisianaBetween [1.2](https://www.nhc.noaa.gov/data/tcr/AL122005_Katrina.pdf) and [1.5](https://www.nap.edu/read/11840/chapter/5) million people evacuated Louisiana during and after Katrina. Employment in Louisiana fell for two straight months.Although fewer people evacuated Louisiana than Florida during Irma (6.5m), the decline in employment was much larger on a per-evacuee basis. The persistence of the decline suggests that this higher ratio is attributable to the severity of the storm.Overall U.S. unemployment took a dive in September, 2005. The series was much more volatile during that period, though, so I won't try to say how much of it was do to Katrina. See also the [BLS](https://www.bls.gov/katrina/cpscesquestions.htm)%%html Average Hours Worked- South Carolina - Beaufort - Charleston - Georgetown - HorryHere is the game plan:- Three main bits of data to bring together: 1. Focus on workers paid by the hour. - Generally, these are where the swings in hours worked are going to come from. _Show this empirically?_ - From CPS can get information about people who are paid by the hour. - Get estimates for the percentage of each 2-digit sector that are paid by the hour. 1. Look at counties in flood zones. - Expect that this is where most of the evacuations and damage will be. - Can get this data from the [BLS Flood Zone Area Maps](https://www.bls.gov/cew/hurricane_zones/maps.htm). - Focus on South Carolina, North Carolina, and [maybe] southern Virginia. - Have info about share of employees in each given flood zone. 1. Get employment in each county in each sector: - Can get this from the [BLS Quarterly Census of Employment and Wages](https://www.bls.gov/cew/)- Combine the data: 1. Estimate share of workers in each county paid by the hour: - Merge the paid-by-the-hour and county employment statistics. 1. Assume: - Always assume that: 1. The share of workers paid by the hour is independent of location in the county. 1. Worker hours are uniformly distributed across a 5-day work week. Alternatively, assume that the average worker has their work week uniformly distributed. 1. Need to make parameter assumptions/judgment calls about: 1. Share of people that are evacuating: - Which flood zones? - Or just all in county? - Any other counties? - How many places will stay open? 1. How long average evacuation will persist. (Up to end of week.)%load_ext autoreload %autoreload 2 import sys import pandas as pd import numpy as np import hurricane_florence_code as hfData Sources# Sector Statistics # (Note: Adds path to custom module on MW machine. Send email for module) sec = hf.sector_stats() # Counties in flood zone cty = hf.flood_counties() # Employment by sector in flood counties (as of 2018:1) emp = hf.flood_county_employment_stats()Share of each Supersector $\times$ County that are paid by the hour- From CPS- For now, just use copy+pasted table from the SDA data tool. (Get full data extract when ready.)- Convert industry from CPS to NAICS supersector codes.# Combine County and Industry Paid by hour datasets emp = emp.merge(sec, on='ind', how='left') # Number of employmees in each (ind, county) paid by hour emp['n_pbh'] = emp['paidhour'] * emp['emp'] # More info for viewing emp = cty.merge(emp, on='fips') emp.head() # Average Hours per day based on 5-day workweek emp['hrs'] = emp['workweek'] / 5 # Total employment in US in August 2018 emp['emp_us'] = emp_us = 126939000 # Total Weekly Hours in US emp['ww_us'] = ww_us = 34.5 # By State emp = hf.state_emp_hrs(emp) %%html Impacts# HYPERPARAMETERS days_off = 4 pct_off = 0.95 hf.compute_hours_impact(emp, days_off, pct_off)United States Workweek (Florence) Workweek (Usual) Percent Diff. 34.412 34.500 -0.254% ----------------------------------------------------------------- BY STATE:Libraries Needed for Scriptimport numpy as np #Library containing advanced mathematical functions and other. import pandas as pd #Library containing powerful datastructure called a DataFrameData Importdf_rawdata_testdataset = pd.read_csv('https://github.com/JonathanAspeling/zindi_sendychallange/blob/master/csv/Test.csv',encoding = 'ASCII') print('Success importing') df_rawdata_trainingdataset = pd.read_csv('https://github.com/JonathanAspeling/zindi_sendychallange/blob/master/csv/Train.csv',encoding = 'ASCII') df_rawdata_driverinfo = pd.read_csv('https://github.com/JonathanAspeling/zindi_sendychallange/blob/master/csv/Riders.csv',encoding = 'ASCII')Лабораторная работа 5 (2.7) Работа с множествами в языке Python**Цель работы:** приобретение навыков по работе с множествами при написании программ с помощью языка программирования Python версии3.x. Вариант 6 Индивидуальное задание: **Задание 1** Определить результат выполнения операций над множествами. Считать элементы множества строками. ![image.png](attachment:image.png)u = set("abcdefghijklmnopqrstuvwxyz") A = {'b', 'f', 'g', 'm', 'o'} B = {'b', 'g', 'h', 'l', 'u'} C = {'e', 'f', 'm'} D = { 'e', 'g', 'l', 'p', 'q', 'u', 'v'} X = (A.difference(B)).union(C.intersection(D)) print("Ответ\n") print(f"x = {X}") A_u = u.difference(A) B_u = u.difference(B) Y = (A_u.intersection(B_u)).difference(C.union(D)) print(f"y = {Y}")Ответ x = {'e', 'o', 'f', 'm'} y = {'t', 'd', 'c', 'k', 'i', 's', 'w', 'r', 'j', 'a', 'n', 'z', 'y', 'x'}**Introdução ao SVM** O SVM é um modelo bastante popular e versátil capaz de realizar classificações, regressões e detecção de outliers. Esse modelo funciona muito bem para classes linearmente separáveis. **O que são classes linearmente separáveis?** Classes linearmente separáveis podem ser entendidas, de modo genérico, como classes que podem ser facilmente separadas por uma linha reta.from sklearn import datasets import pandas as pd import seaborn as sns iris = datasets.load_iris() iris_df = pd.DataFrame(data=iris.data, columns=iris.feature_names) iris_df['label'] = iris.target iris_df['species'] = pd.Categorical.from_codes(iris.target, iris.target_names) iris_df.head() # plotar em pares as características do DataFrame como o pacote Seaborn sns.pairplot(iris_df[['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)', 'species']], hue='species')**Classificando o Iris dataset**from sklearn import svm from sklearn.model_selection import train_test_split X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=13)Instanciar o classificadorclassificador = svm.SVC(C=1.0)Treinamento do classificadorclassificador.fit(X_train, y_train)Predição do conjunto de testeclassificador.predict(X_test)Score (acurácia) do modeloclassificador.score(X_test, y_test)**Métricas de avalição de um classificador** Criando um relatório de classificaçãofrom sklearn.metrics import classification_report y_pred = classificador.predict(X_test) print(classification_report(y_test, y_pred, target_names=iris.target_names))precision recall f1-score support setosa 1.00 1.00 1.00 14 versicolor 0.86 1.00 0.92 12 virginica 1.00 0.89 0.94 19 accuracy 0.96 45 macro avg 0.95 0.96 0.96 45 weighted avg 0.96 0.96 0.96 45EDA-04: Unsupervised Learning - Clustering Bagian ke-02(C) - 2020tau-data Indonesia ~ https://tau-data.id/eda-04/# Run this cell ONLY if this notebook run from Google Colab # Kalau dijalankan lokal (Anaconda/WinPython) maka silahkan install di terminal/command prompt # Lalu unduh secara manual file yang dibutuhkan dan letakkan di folder Python anda. !pip install --upgrade umap-learn !wget https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/tau_unsup.py # Importing Modules untuk Notebook ini import warnings; warnings.simplefilter('ignore') import time, umap, numpy as np, tau_unsup as tau, matplotlib.pyplot as plt, pandas as pd, seaborn as sns from matplotlib.colors import ListedColormap from sklearn import cluster, datasets from sklearn.metrics.pairwise import pairwise_distances_argmin from sklearn.preprocessing import StandardScaler from itertools import cycle, islice from sklearn.metrics import silhouette_score as siluet from sklearn.metrics.cluster import homogeneity_score as purity from sklearn.metrics import normalized_mutual_info_score as NMI sns.set(style="ticks", color_codes=True) random_state = 99Review EDA-03* Pendahuluan Unsupervised Learning* k-Means, k-Means++, MiniBatch k-Means* internal & External Evaluation* Parameter Tunning EDA-04* Hierarchical Clustering* Spectral Clustering* DBScan* Clustering Evaluation Revisited Linkages Comparisons* single linkage is fast, and can perform well on non-globular data, but it performs poorly in the presence of noise.* average and complete linkage perform well on cleanly separated globular clusters, but have mixed results otherwise.* Ward is the most effective method for noisy data.* http://scikit-learn.org/stable/auto_examples/cluster/plot_linkage_comparison.htmlsphx-glr-auto-examples-cluster-plot-linkage-comparison-pytau.compare_linkages()Pros* No assumption of a particular number of clusters (i.e. k-means)* May correspond to meaningful taxonomies Cons* Once a decision is made to combine two clusters, it can’t be undone* Too slow for large data sets, O(𝑛2 log(𝑛))# Kita akan menggunakan data yang sama dengan EDA-03 df = sns.load_dataset("iris") X = df[['sepal_length','sepal_width','petal_length','petal_width']].values C = df['species'].values print(X.shape) df.head() # Hierarchical http://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html#sklearn.cluster.AgglomerativeClustering hierarchical = cluster.AgglomerativeClustering(n_clusters=3, linkage='average', affinity = 'euclidean') hierarchical.fit(X) # Lambat .... dan menggunakan banyak memori O(N^2 log(N)) C_h = hierarchical.labels_.astype(np.int) C_h[:10] # Dendogram Example # http://seaborn.pydata.org/generated/seaborn.clustermap.html g = sns.clustermap(X, method="single", metric="euclidean") # Scatter Plot of the hierarchical clustering results X2D = umap.UMAP(n_neighbors=5, min_dist=0.3, random_state=random_state).fit_transform(X) fig, ax = plt.subplots() ax.scatter(X2D[:,0], X2D[:,1], c=C_h) plt.show()Evaluasi Hierarchical Clustering?* Silhoutte Coefficient, Dunn index, or Davies–Bouldin index* Domain knowledge - interpretability* External Evaluation Read more here: https://www.ims.uni-stuttgart.de/document/team/schulte/theses/phd/algorithm.pdf# Spectral : http://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html spectral = cluster.SpectralClustering(n_clusters=3) spectral.fit(X) C_spec = spectral.labels_.astype(np.int) sns.countplot(C_spec) C_spec[:10] fig, ax = plt.subplots() ax.scatter(X2D[:,0], X2D[:,1], c=C_spec) plt.show() # DBSCAN http://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html # tidak membutuhkan input parameter k!!!... sangat bermanfaat untuk clustering data yang besar dbscan = cluster.DBSCAN(eps=0.8, min_samples=5, metric='euclidean') dbscan.fit(X) C_db = dbscan.labels_.astype(np.int) sns.countplot(C_db) C_db[:10] # apa makna cluster label -1? sum([1 for i in C_db if i==-1]) fig, ax = plt.subplots() ax.scatter(X2D[:,0], X2D[:,1], c=C_db) plt.show() try: # Should work in Google Colab !wget https://raw.githubusercontent.com/christopherjenness/DBCV/master/DBCV/DBCV.py except: pass # Download manually on windows import dbcv dbcv.DBCV(X, C_db)Example usageHere we will demonstrate how to use `snapedautility` package for your data science project. Importsimport snapedautility from palmerpenguins import load_penguins print(snapedautility.__version__)0.1.0Sample DataWe will be using the penguins data as an example.df = load_penguins() df.head()Plot HistogramsTo generate histograms and bar plots for your data, import the `plot_histograms` function from the module `snapedautility.plot_histograms`.from snapedautility.plot_histograms import plot_histograms plot_histograms(df, ["species", "bill_length_mm", "island"], 100, 100)Plot CorrelationTo generate the correlation plots for your data, import the `plot_corr` function from the module `snapedautility.plot_corr`.from snapedautility.plot_corr import plot_corr plot_corr(df, ["bill_length_mm", "bill_depth_mm", 'species'])Detect OutliersTo detect the outliers in your data, import the `detect_outliers` function from the module `snapedautility.detect_outliers`.from snapedautility.detect_outliers import detect_outliers [lq, hq], chart = detect_outliers(df["body_mass_g"], 250, 250) print(f"The lower bound and the upper bound are {round(lq, 2)} and {round(hq, 2)}") chartThe lower bound and the upper bound are 1750.0 and 6550.0Copyright 2020 , Apache-2.0 LicenseLicensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. **A Guide to creating citations using only a DOI***Note: This Notebook seeks to retrieve citations for papers as 3 outputs; JSON, BibTeX and HTML. The citations in this notebook are retrieving the following information on a paper: Title, Authors, Journal Name, Volume Number, Page Range, Year, Hyperlink(s) to the Article and DOI of the Article. If you are looking for more detailed citations or other outputs then please refer to step 3 after completing steps 1 & 2!* You only need to click run on each cell in this notebook and everything should populate normally!Step 1. Get DOI of the paper you want to citeStep 2. Use DOI to search for the Bibcode -- Enter DOI in prompt provided -- Output will be the Bibcode for the paperStep 3. The Bibcode will automatically be populated for you to search in ADS method. The Output is customizable, all formats possible. This method includes links to paper by DOI url & ADS urlNote. (This May Happen to Some Users) Error! My paper was not in ADS! There is no Bibcode for the paper! -- If there is no Bibcode then please Move to Step 4Step 4. Use DOI to search in Urllib method -- your DOI is already populated from step 1 in Urllib Method -- Output will be the full citation as a plain text/JSON output. Output is customizable as HTML through Step 5, or in BibTeX format through Step 6Step 5. The full JSON output will be populated from the Urllib method to encode it as HTML Step 6. The DOI is already populated and you will recieve the full BibTeX citation for the paper using the GScholar Method *All Done!* Now you have the citation for your paper in 3 different formats! Before Doing Anything, Import the Necessary Modules# Import these modules # Step 2 Bibcode import requests import json # Step 4 urllib import urllib.request from urllib.error import HTTPError #Step 5 html encoder import html # Step 6 bibtex import re import logging from bs4 import BeautifulSoup from html.entities import name2codepoint from urllib.request import quote, Request, urlopenStep 1. Enter your DOI and TokenYou can retrieve the DOI for your paper in many different ways.1. What is a DOI? - The DOI is a unique alphanumeric string assigned by the International DOI Foundation, to identify content and provide a persistent link to its location on the Internet. It is written in the general format of '10.1000/xyz123'2. Where Can I find a DOI? - The DOI should be written on the top left or top right corner of your paper, it is written as 'DOI:10.1000/xyz123' - The DOI should be listed in the details or citation section on the publishers website where you have found your paper - The DOI may also be written as a link, next to the papers information on the publishers website, the link is written as https://doi.org/10.1000/xyz123 or https://dx.doi.org/10.1000/xyz1233. How do I run this notebook? - In order to use this notebook please type in "ENTER HERE" spots the DOI in the '10.1000/xyz123' format, *NOT* as hyperlink - You only need to run each cell after entering your DOI, everything else will populate for you# Enter your token here: You need this for using step 2 and 3 # A token can only be used once you have an account on NASA/ADS https://ui.adsabs.harvard.edu/. # Once you have an account click on 'Account', then click on 'Customize Settings' on the dropdown menu. # In 'Customize Settings' there is a panel to the left of the screen, if you scroll down that panel you will see 'API Token'. # Click on 'API Token' and then click on 'generate a new key'. # If your using the django application you can enter your token in the settings file and Import ADS_TOKEN from settings.py token=" Your Token " doi = input("Enter DOI Here: ")Step 2. Retrive Bibcodedef get_citeproc_authors(cpd_author): if cpd_author is None: return None names = [] for author in cpd_author: try: family = author['family'].title() except KeyError: name = author['name'] names.append(name) continue try: given = author['given'] except KeyError: # This author has first name names.append(family) continue initials = given.split() initials[0] = '{}.'.format(initials[0][0]) initials = ' '.join(initials) names.append('{} {}'.format(initials, family)) return ', '.join(names) def parse_citeproc_json(citeproc_json): """Parse the provided JSON into a Ref object.""" cpd = json.loads(citeproc_json) try: if cpd['type'] != 'article-journal': return None except KeyError: return None authors = get_citeproc_authors(cpd.get('author', '')) title = cpd.get('title', '').replace('\n', '') journal = cpd.get('container-title', '') volume = cpd.get('volume', '') page_start, page_end = cpd.get('page', ''), '' if page_start and '-' in page_start: page_start, page_end = page_start.split('-') article_number = cpd.get('article-number', '') doi = cpd.get('DOI', '') url = cpd.get('URL', '') try: year = cpd['issued']['date-parts'][0][0] except (KeyError, IndexError): year = None try: bibcode = cpd.get('bibcode', '') except (KeyError, IndexError): bibcode = None # # ============================================================================= # # OUTPUT # # ============================================================================= ref = [authors, title, journal, volume, year, page_start, page_end, doi, url, article_number, citeproc_json] return ref def get_citeproc_json_from_doi(doi): base_url = 'http://dx.doi.org/' url = base_url + doi req = urllib.request.Request(url) req.add_header('Accept', 'application/citeproc+json') try: with urllib.request.urlopen(req) as f: citeproc_json = f.read().decode() except HTTPError as e: if e.code == 404: raise ValueError('DOI not found.') raise return citeproc_json def get_source_from_doi(doi): citeproc_json = get_citeproc_json_from_doi(doi) ref = parse_citeproc_json(citeproc_json) return ref doi_fetched = get_source_from_doi(doi) rdoi = doi rdoi_bs = rdoi.replace("\\", "%2F") # Remove backslash and replace with URL code for backslash rdoi_fs = rdoi_bs.replace("/", "%2F") # Remove forwardslash and replace with URL code for backslash rurl = requests.get("https://api.adsabs.harvard.edu/v1/search/query?q=doi:"+rdoi_fs,\ params={"q":"*:*", "fl": "*", "rows":2000}, headers={'Authorization': 'Bearer ' + token}) todos = json.loads(rurl.text) todos_response = todos.get('response', '') Bibcode = (todos_response['docs'][0]['bibcode']) print("Bibcode:",Bibcode)Step 3. ADS Method Note before using ADS Method 1. Exporting using bibcodes require two things. - A Bibcode Number (which you got from step 2) - A Token--You will need to know what a Token is. It must be used whenever you want to access the ADS database. A token can only be used once you have an account on NASA/ADS https://ui.adsabs.harvard.edu/. Once you have an account click on 'Account' in the top right hand corner, then click on 'Customize Settings' on the dropdown menu. In 'Customize Settings' there is a panel to the left of the screen, if you scroll down that panel you will see 'API Token'. Click on 'API Token' and then click on 'generate a new key'. - You are technically using ADS's API when you are using this method. So for any questions/concerns please refer to the NASA/ADS API Information tool on GitHub https://github.com/adsabs/adsabs-dev-apiaccess-settings 2. The benefits of this method are the endless choices to customize your citation output. - You can get more information such as... the abstract, copyright, citation count, author affiliation, keywords, publication category and arXiv e-print number, etc. - You can search more than 1 bibcode at a time - You have more output options such as... EndNote, ProCite, RIS (Refman), RefWorks, MEDLARS, AASTeX, Icarus, MNRAS, Solar Physics (SoPh), DC (Dublin Core) XML, REF-XML, REFABS-XML, VOTables and RSS - This notebook does not display examples of all of these output format options, if you are interested in any of these choices or extra features please refer to http://adsabs.github.io/help/actions/export 3. The first option is to retrieve a citation where the output is in HTML unique character (JSON) format 4. The second option is to retrieve a citation where the output is in BibTeX format 6. The third option is to retrieve a citation where the output is in HTML converted character format *Overall you need to make an account on ADS in order to use this method.**If you do not want to make an account then use the BibTeX citation from step 6 and if you want, use steps 4 & 5 to retrieve html and JSON citation formats, in steps 4 & 5 you only need to enter the DOI to retrieve citations (the DOI is set to populate for you automatically)**However there are many benefits to using the ADS method, your citation output is completely customizable! So if your willing and you have your Bibcode then its recommended to use this method!* After running the cell below, you will recieve an HTML reference with the characters &, , and “ included# HTML with the characters &, <, >, and “ included payload = {"bibcode": ["{}".format(Bibcode)], "sort": "first_author asc", "format": '''{"ref_json": {"authors": "%I", "title": "%T", "journal": "%J", "volume": "%V", "start-page": "%p", "end-page": "%P", "year": %Y, "doi": "%d", "bibcode": "%u"}}''' } r = requests.post("https://api.adsabs.harvard.edu/v1/export/custom", \ headers={"Authorization": "Bearer " + token, "Content-type": "application/json"}, \ data=json.dumps(payload)) response_json = r.json() ref_json = json.loads(response_json['export'])['ref_json'] print('authors:', ref_json['authors']) print('title:', ref_json['title']) print('journal:', ref_json['journal']) print('volume:', ref_json['volume']) print('start-page:', ref_json['start-page']) print('end-page:', ref_json['end-page']) print('year:', ref_json['year']) print('doi:', ref_json['doi']) print('bibcode:', ref_json['bibcode'])After running the cell, you will recieve a BibTeX reference# BibTeX Reference # authors used to be %I payload = {"bibcode": ["{}".format(Bibcode)], "sort": "first_author asc", "format": '''{"ref_json": {"encoder": "%ZEncoding:latex\\bibitem", "journal": "%J", "title": "%T", "volume": "%V", "start-page": "%p", "end-page": "%P", "pages":"%pp", "year": %Y, "author": "%A", "doi": "%d", "key_format":"%ZAuthorSep:" and "", "bibcode": "%u"}}''' } r = requests.post("https://api.adsabs.harvard.edu/v1/export/custom", \ headers={"Authorization": "Bearer " + token, "Content-type": "application/json"}, \ data=json.dumps(payload)) response_json = r.json() ref_json = json.loads(response_json['export'])['ref_json'] print(f'@ARTICLE{{{doi},') print('\tauthor=', "{{{0}}},".format(ref_json['author'])) print('\ttitle=', '"{{{0}}}",'.format(ref_json['title'])) print('\tjournal=', "{{{0}}},".format(ref_json['journal'])) print('\tvolume=', "{{{0}}},".format(ref_json['volume'])) print('\tpages=', "{{{0}}},".format(ref_json['pages'])) #print('\tstart-page=', '({}),'.format(ref_json['start-page'])) #print('\tend-page=', '({}),'.format(ref_json['end-page'])) print('\tyear=', "{{{0}}},".format(ref_json['year'])) print('\tdoi=', "{{{0}}},".format(ref_json['doi'])) print('\tbibcode=', "{{{0}}}".format(ref_json['bibcode'])) print("}") # Note if this gives you an error then please remove "encoder": "%ZEncoding:latex\\bibitem", and enter a \ before each J, T, V, # etc. therefore "journal": "%J", would be changed to "journal": "%\J", thereby encoding the journal name into BibTeX format # this error occurs when the bibtex encoder cannot encode a section of the citation.After running the cell below, you will recieve an HTML reference with the characters &, , and “ converted to & amp; & lt; & gt; and & quot; respectively.# HTML with the characters &, <, >, and “ are converted to &, <, >, and ", respectively. payload = {"bibcode": ["{}".format(Bibcode)], "sort": "first_author asc", "format": '''{"ref_json": {"encoder": "%ZEncoding:html

", "authors": "%I", "title": "%T", "journal": "%J", "volume": "%V", "start-page": "%p", "end-page": "%P", "year": %Y, "doi": "%d", "bibcode": "%u"}}''' } r = requests.post("https://api.adsabs.harvard.edu/v1/export/custom", \ headers={"Authorization": "Bearer " + token, "Content-type": "application/json"}, \ data=json.dumps(payload)) response_json = r.json() ref_json = json.loads(response_json['export'])['ref_json'] print('authors:', ref_json['authors']) print('title:', ref_json['title']) print('journal:', ref_json['journal']) print('volume:', ref_json['volume']) print('start-page:', ref_json['start-page']) print('end-page:', ref_json['end-page']) print('year:', ref_json['year']) print('doi:', ref_json['doi']) print('bibcode:', ref_json['bibcode'])Step 4. Urllib methodIf you did not have a bibcode or you want a plain text reference then use this method#doi = input("Enter doi Here: ") doi_fetched = get_source_from_doi('{}'.format(doi)) # Below are the parameters for searching your citation # if you would like to add or change anything then refer to the initial code in step 2 above to make your changes reference = (doi_fetched[0],doi_fetched[1],doi_fetched[2],doi_fetched[3],doi_fetched[4],doi_fetched[5],doi_fetched[6],doi_fetched[7],doi_fetched[8],doi_fetched[9]) print ('Authors:', doi_fetched[0], '') print ('Title:', doi_fetched[1], '') print ('Journal:', doi_fetched[2], '') print ('Volume:', doi_fetched[3], '') print ('Year:', doi_fetched[4], '') print ('Page Start:', doi_fetched[5], '') print ('Page End:', doi_fetched[6], '') print ('Article Number:', doi_fetched[9],'') print ('DOI:', doi_fetched[7], '') print ('URL:', doi_fetched[8], '')Step 5. Encoding JSON in HTML Reference is populated from the Urllib Method# Here is the populated output from the urllib method # This will replace ("""& < " '> """ ) with (& < " ' >) s = html.escape( """& < " '> """ ) html.escape(s) html.escape("{}".format(reference))Step 6. BibTeX citation"""Library to query Google Scholar. Call the method query with a string which contains the full search string. Query will return a list of citations. """ GOOGLE_SCHOLAR_URL = "https://scholar.google.com" HEADERS = {'User-Agent': 'Mozilla/5.0'} FORMAT_BIBTEX = 4 FORMAT_ENDNOTE = 3 FORMAT_REFMAN = 2 FORMAT_WENXIANWANG = 5 logger = logging.getLogger(__name__) # we are using query in our code def query(searchstr, outformat=FORMAT_BIBTEX, allresults=False): """Query google scholar. This method queries google scholar and returns a list of citations. Parameters ---------- searchstr : str the query outformat : int, optional the output format of the citations. Default is bibtex. allresults : bool, optional return all results or only the first (i.e. best one) Returns ------- result : list of strings the list with citations """ logger.debug("Query: {sstring}".format(sstring=searchstr)) searchstr = '/scholar?q='+quote(searchstr) url = GOOGLE_SCHOLAR_URL + searchstr header = HEADERS header['Cookie'] = "GSP=CF=%d" % outformat request = Request(url, headers=header) response = urlopen(request) html = response.read() html = html.decode('utf8') # grab the links tmp = get_links(html, outformat) # follow the bibtex links to get the bibtex entries result = list() if not allresults: tmp = tmp[:1] for link in tmp: url = GOOGLE_SCHOLAR_URL+link request = Request(url, headers=header) response = urlopen(request) bib = response.read() bib = bib.decode('utf8') result.append(bib) return result def get_links(html, outformat): """Return a list of reference links from the html. Parameters ---------- html : str outformat : int the output format of the citations Returns ------- List[str] the links to the references """ if outformat == FORMAT_BIBTEX: refre = re.compile(r'OverviewAuthored By: and Welcome to your Introduction to Python Workshop!Today will cover the basics in order to get you familiar with general coding concepts as well as Python syntax. Python is arguably one of the most user-friendly languages. The syntax is fairly general and easy to learn and a lot of the conditionals and loops that we write such as if statements read exactly as you would write them in english. Some beginner topics we will cover are:- Variables- Operations- If, Elif, and Else statements- For and While Loops- Writing a Function- Control FlowWith these concepts we will work through short examples so you can gain confidence with the topics. Towards the end of the workshop we will put everything we've learned together in order to write your own program! Let's start off with introducing you to the file type we are working with. Most files will end with the extension `.py` these signify that they are Python files and tell your computer what language to execute them in. With this extension it is more likely that the code will exist in the file and when you execute it all of the output will print to a separate terminal window.This particular file is a `.ipynb` it is a type of Python file where both the input and the output are visible in the same file. You can press the play button on the left side of the code blocks to run the code they contain. Alternatively, you can click on a code block and hit `shift + enter` as a shortcut. Coding Best PracticesEvery developer has their own sense of style when it comes to coding. That style can be whatever you like as long as it is clean and easy to read. The goal is to make your code organized in such a way that a new programmer could look at it and understand what is happening. A big portion of organizing is proper commenting. A comment is a part of your code, denoted by a `` that will not be run and shown in the output of your solution. Run the following block and you will see that only the second line will be visible in the output# This is formatted as a comment print("This is not a comment")This is not a commentAnother important aspect is consistency. This includes things such as:- using tabs for indentation so they all match - always put variables in camelCase (where the first letter of each word is capitalized) or put them in snake_case (where there is an underscore between words) Program Input and OutputInput is information that the user can provide to the program either by a prompt or with arguments to functions. Output is what is printed to the console for the user to be able to see.These concepts utilize Python's built in functions. This means that the function name and definition is saved within the python software and you don't need to define it yourself. You utilize a function by typing it's name followed by a set of round brackets that contain the appropriate arguments. Some examples of these functions that you will see used later on include:- print()- type()- input() Output: Print Statements The `print()` function allows us to type a message that we want to user to see when the program is executed.print("Hello World!")Hello World!We can format our print statements by adding tabs or new lines by using these special characters:- To add a tab, use `\t`- To add a new line, use `\n`print("Hello\tWord") print("Hello\nWorld")Hello Word Hello WorldInput: Prompt User We can allow users to interact with our program by using the `input()` function. You can write a prompt in between the brackets to provide to the user.You can also save the answer the user provides by assigning it to a variable. We do this in python using the equal sign `=`. This means that we can use that variable to refer back to the user's answer later in our program.# Prompting a user for input name = input("Enter your name: ") print("Hello", name)Enter your name: Emily Hello EmilyData Types Basic Data Types In Python there are variables which are items that are given a name so they can be stored in memory.All of these variables can have specific types that help define what they are and what operations they can undergo.``` A string is any number or characters including punctuation and spaces that are surrounding by quotation marks"Purple is my favourite Colour" An integer is any whole number 1238846372 A float is any number that includes decimals5.27992.00 A boolean are the True & False keywordsTrueFalse A list is items contained with [ ] and separated by commas["Purple", 238, 5.27, True] A dictionary is a collection of key:value pairs, we can use the key to find it's matching value.{1: "Purple", 2: "Yellow", 3: "Red"}```It's important to think about what type of variable you are working with so you can apply the proper types to it.# First guess what the type should be and write it as a comment. # Then run the next block to see if you were right! a = 5 b = 2.0 print(a, "is of type", type(a)) print(b, "is of type", type(b))5 is of type 2.0 is of type Type CastingIn the previous section we learned about all of the different data types within Python. We saw how we can use the `type()` function in order to discover what type a value corresponds to. Sometimes we want to change what the type of a variable is and we can do that with **type casting**. By wrapping the variable you want to update, similar to how you would use a function, you can change it's type. - To change a variable to an int: use int()- To change a variable to a string: use str()- To change a variable to a float: float()anInt = 890 print(type(anInt)) convertToString = str(anInt) print(type(convertToString)) ListsLists are iterable. This means that you can go through each element in the list and apply a function or logic so that all items have had the effect. Lists can also be indexed. That means that you can specify what item you want back by using it's position. It is possible to have duplicates within a list as well.Positive numbers mean that you are counting from the front of the list back and negative numbers mean you are counting from the back of the list forward.# List Indexing myList = [5, 6, 23, 94, 12] # What do you think the following indexes would output? print(myList[0]) print(myList[-1]) print(myList[3])5 12 94You can also add to a list after it has already been created. This is done by using the `.append()` function.This function is different than the ones we've seen in earlier examples because we must supply a parameter within the brackets as well as before the command. `listOne.append("Hello")`This line of code says that we are going to add the word `"Hello"` to the end of the list named `listOne`.# Appending to a list listA = [1, 2, 3, 4] print(listA) # This line says: # Add the number 5 at the end of listA listA.append(5) print(listA)[1, 2, 3, 4] [1, 2, 3, 4, 5]It's also possible to add one list to the end of another. We do this using `.extend()`.# Extending a list listA = [1, 2, 3, 4] listB = [5, 6, 7, 8] print(listA) listA.extend(listB) print(listA)[1, 2, 3, 4] [1, 2, 3, 4, 5, 6, 7, 8]DictionariesDictionaries are a collection of items that are matched in key-value pairs. They are unordered which means they can't be searched and indexed the way a list can be so instead we access information from them by knowing the key for the value we are looking for. We can search for a value using a key but we cannot search for a key using a value. Every key that exists within a dictionary must be unique, this means there can't be any duplicates.In python we use square brackets to define a list [] and for dictionaries we define them using curly brackets {}. Similarly we separate each of the individual items using a comma, where each item or key-value pair is entered in the following form, key:value.# Searching for a dictionary key myDictionary = {1: "value", "key": 2} # If I search for "key" it will return the value associated to that word print(myDictionary["key"])2Now let's try and example where you can guess the output before running the next cell.# Searching for multiple keys randomDictionary = {18: "age", 2021: "year", "apple": "red", "banana": "yellow", "abc": 123, "favouriteNumber": 972} print(randomDictionary["apple"]) print(randomDictionary["abc"]) print(randomDictionary[18])red 123 ageSimilar to a list you can also add to a dictionary after it has already been created. Since dictionaries cannot have duplicates if you were to assign a different value to a key that already exists it would overwrite the previous value.fruitDictionary = {"apple": "red", "banana": "yellow"} # Add a value fruitDictionary["grapefruit"] = "pink" print(fruitDictionary) # Overwrite a value fruitDictionary["apple"] = "green" print(fruitDictionary){'apple': 'red', 'banana': 'yellow', 'grapefruit': 'pink'} {'apple': 'green', 'banana': 'yellow', 'grapefruit': 'pink'}Operators**Operators** are special symbols that can be used to perform arithmetic and logical computations. We can use operators to evaluate mathematical expressions and make decision within our code. The symbols are called **operators**, and the values that the operator operates on are called **operands**.Let's check out some of the most common types of operators. Arithmetic Operators Arithmetic operators are used to perform basic math. You can use multiple arithmetic operators in one statement, and just like regular math, these mathematical statements follow the order of BEDMAS.In this section, the terms *numeric values* and *numbers* refer to operands that are integers or floats.# Overview of arithmetic operators num1 = 10 num2 = 5 print("num1:", num1) print("num2:", num2) # Addition num3 = num1 + num2 print("Addition: 10 + 5 =", num3) # Subtraction num4 = num1 - num2 print("Subtraction: 10 - 5 =", num4) # Multiplication num5 = num1 * num2 print("Multiplication: 10 * 5 =", num5) # Exponents num6 = num1 ** num2 print("Exponent: 10^5 =", num6) # Modulus num7 = num1 % num2 print("Modulus: 10 mod 5 =", num7) # Simple division num8 = num1 / num2 print("Division: 10 / 5 =", num8)num1: 10 num2: 5 Addition: 10 + 5 = 15 Subtraction: 10 - 5 = 5 Multiplication: 10 * 5 = 50 Exponent: 10^5 = 100000 Modulus: 10 mod 5 = 0 Division: 10 / 5 = 2.0**Tip**: If you want to use the result of a math operation inside the round brackets of a function, you do not need to assign it to a variable. Instead, you can simply provide the operation as the parameter.num1 = 1 num2 = 2 sum = 1 + 2 # What will the following lines print? print(sum) print(num1 + num2) print(1 + 2) print(num1 + 2)3 3 3 3We can use multiple operators in one statement, and the order of operations will follow BEDMAS just like in traditional mathematics.# Order of operations print(1 + 2 + 3) # What will the following lines print? print(2 + 5 * 2) print((2 + 5) * 2) # Hint: the operation inside the brackets will take precedence6 12 14Addition and concatenation: `+`We can use the `+` operator to add two numbers together or put two strings together. We can add integers and floats together, including negative numbers.# Numeric Addition # Adding integers int1 = 10 int2 = 20 print(int1 + int2) # This will result in an integer value # Adding floats float1 = 2.5 float2 = 3.66 print(float1 + float2) # This will result in a float value30 6.16We can also use the + operator to combine strings together. This is called called **concatenation**. Concatenation **appends** a string to another string. This means that the resulting value will be equal to the combined operand strings in the order that the operands are presented.# String concatenation hello_string = "Hello" world_string = "World" hello_word = hello_string + world_string print(hello_word) # If we don't need to store the concatenated string in a variable, we can: # Using multiple strings separated by a comma as a parameter to the print function # will automatically add spaces between the strings. print(hello_string, world_string) # Or use the + operator to add any characters we want print(hello_string + " " + world_string + "!")HelloWorld Hello World Hello World!Note that you cannot add two variables together if they are different data types, unless the variables are numeric such as floats and integers.# Invalid addition example: adding a string and numeric value str1 = "hello" num1 = 4 # What do you think will happen when you uncomment the following line and run this code? print(str1 + num1)We can use type casting to get around this problem.print("hello" + str(4)) str1 = "4" int1 = 12 print(int(str1) + int1)16Subtraction: `-`The `-` operator can be used to subtract numbers. Unlike the `+` operator, we cannot use `-` with strings or other non-numeric data types.# Subtraction # Subtracting integers will result in an integer int_diff = 10 - 5 print(int_diff) # Subtracting floats will result in a float float_diff = 10.5 - 2.75 print(float_diff) # Subtracting integers from floats and vice versa will result in a float int_float_diff = 10 - 2.0 print(int_float_diff) # We can get a negative value as an answer as well negative_diff = 2 - 10 print(negative_diff)5 7.75 8.0 -8We cannot use the `-` operator with non-numeric data types. The following examples will throw an error:# Invalid subtraction example: attempting to subtract strings # Uncomment lines 7 and 8 to view the error rainbow_string = "Rainbow" bow_string = "bow" string_diff = rainbow_string - bow_string print(string_diff) # Invalid subtraction example: attempting to subtract lists # Uncomment lines 7 and 8 to view the error my_list1 = ['dog', 'cat', 'fish'] my_list2 = ['dog', 'cat'] list_diff = my_list1 - my_list2 print(list_diff)Multiplication: `*`We can use the `*` operator to multiply numbers, multiply strings by an integer, and multiply lists by an integer.# Multiplication # Multiplying integers will result in an integer int_mult = 10 * 5 print(int_mult) # Multiplying floats will result in a float float_mult = 10.5 * 2.275 print(float_mult) # Multiplying integers from floats and vice versa will result in a float int_float_mult = 10 * 2.0 print(int_float_mult)50 23.8875 20.0Recall that concatenation is the process of appending values together.Multiplying a **string** value by a **positive integer** value *n* results in a concatenated string with *n* repetitions.# Multiplying a string by an integer will result in a string with repetitions hello_str = "Hello" repeating_str = hello_str * 5 print(repeating_str)HelloHelloHelloHelloHelloMultiplying a **list** by a **positive integer** value *n* results in a new list containing the original list's elements appended *n* times.Similarly to the string example above, you cannot use the `*` operator between a list and a non-integer value.# Multiplying a list by an integer will result in a list with repeating elements my_list = ["apple", "banana", "carrot"] repeated_list = my_list * 3 print(repeated_list)['apple', 'banana', 'carrot', 'apple', 'banana', 'carrot', 'apple', 'banana', 'carrot']You cannot multiply a non-numeric value by a non-integer value.# Invalid multiplication example: multiplying a string by a non-integer value # Uncomment line 5 to view the error hello_str = "Hello" print(hello_str * 2.5) # Invalid multiplication example: multiplying a list by a non-integer value # Uncomment line 5 to view the error my_list = ["apple", "banana", "carrot"] print(my_list * 2.5)The `*` operator cannot be used on dictionaries because dictionaries are not allowed to contain duplicate keys.# Invalid multiplication example: multiplying a dictionary by an integer value # Uncomment line 5 to view the error my_dict = {"a": "apple", "b": "banana", "c": "carrot"} print(my_dict * 2)Division: `/` and `//`**The `/` operator is used to divide numeric values.**Performing this operation with two positive or negative numeric operands will always result in a **float**, even if both operands are integers. **The `//` operator is used to divide numeric values and round the result down to the nearest integer.**If we want to ensure that the result of a division operation returns an **integer**, we can use the `//` operator to perform **floor division**.This operator performs regular division and then rounds the float value **down** to the nearest integer. Let's try dividing some numbers to see what happens.# Simple division int1 = 10 int2 = 4 simple_division1 = int1 / int2 simple_division2 = int2 / int1 # What do you think the following lines will print? print("Simple division:") print("10 / 4 =", simple_division1) print("4 / 10 =", simple_division2)Simple division: 10 / 4 = 2.5 4 / 10 = 0.4Now let's try performing floor division with the same numbers we used in the previous example.# Floor division int1 = 10 int2 = 4 floor_division1 = int1 // int2 floor_division2 = int2 // int1 # What do you think the following lines will print? print("Floor division:") print("10 // 4 =", floor_division1) print("4 // 10 =", floor_division2)Floor division: 10 // 4 = 2 4 // 10 = 0Other arithmetic operatorsNow we have covered all of the most commonly used arithmetic operators.The following operators are beyond the scope of this introductory Python workshop, but we will briefly take a look at them. **Exponent**: `**`The `**` operator raises the left operand to the power of the right operand. This operator can only be used with numeric values.For example, the expression *x* \*\* *y* can be read as "*x* to the power of *y*."# Exponentiation print(2 ** 3)8**Modulus**: `%`The `%` operator returns the **remainder of a division operation** between two numeric values. In mathematics, this is referred to as the *modulus* or *mod* function.The expression *x* % *y* can be read as "the remainder of *x* divided by *y*."# Modulus print(10 % 4)2Shorthand NotationWhen we store a value in a variable, sometimes it is useful to directly update that variable by performing a mathematical operation on the value it currently holds. **Summary of Compound Operators*** `x += 1` is equivalent to `x = x + 1` * `x -= 2` is equivalent to `x = x - 2` * `x *= 3` is equivalent to `x = x * 3` * `x /= 4` is equivalent to `x = x + 4` * `x //= 5` is equivalent to `x = x // 5` * `x **= 6` is equivalent to `x = x ** 6` * `x %= 7` is equivalent to `x = x % 7` Comparison OperatorsComparison operators are used to compare values. These operators will return a boolean value `True` or `False` based on whether the condition is met or not. **Equal to: `==`**Returns true if the left operand is equal to right operand. If they are not equal, it will return false. Be careful not to confuse this with a single `=`, which is used to assign variables!The `==` operator can be used on all data types, and it can also be used on mathematical operations.# Equals # Numeric values print(3 == 100) print(2.5 == 2.5) # Strings print("Bob" == "Bob") # Lists list1 = ["a", "b"] list2 = ["c", "d"] print(list1 == list1) print(list1 == list2) # Dictionaries my_dict = {"a": "apple", "b": "banana", "c": "carrot"} my_dict1 = {"a": "apple", "b": "banana", "c1": "carrot"} print(my_dict == my_dict) print(my_dict == my_dict1) # Mathematical operations print(3 * 4 == 2 * 6)False True True True False True False True**Not equal to: `!=`**Returns true if the left operand does not equal the right operand. If they are equal, it will return false.# Not equals # Numeric values print(3 != 100) print(2.5 != 2.5) # Strings print("Bob" != "Mary") # Lists list1 = ["a", "b"] list2 = ["c", "d"] print(list1 != list1) print(list1 != list2) # Dictionaries my_dict = {"a": "apple", "b": "banana", "c": "carrot"} my_dict1 = {"a": "apple", "b": "banana", "c1": "carrot"} print(my_dict != my_dict) print(my_dict != my_dict1) # Mathematical operations print(3 * 4 != 2 * 6)True False True False True False True False**Greater Than: `>`**Returns true if the left operand is greater than the right operand. If not, it will return false.**Greater Than or Equal To: `>=`**Returns true if the left operand is greater than or equal to the right operand. If not, it will return false.# Greater Than print(3 > 100) print(10 > 2.5) print(10 > 10) # Greater Than or Equal To print(3 >= 100) print(10 >= 2.5) print(10 >= 10)False True False False True True**Less Than: `<`**Reurns true if the left operand is less than the right operand. If not, it will return false.**Less Than or Equal To: `<=`**Reurns true if the left operand is less than or equal to the right operand. If not, it will return false.# Less Than print(3 < 100) print(10 < 2.5) print(10 < 10) # Less Than or Equal To print(3 <= 100) print(10 <= 2.5) print(10 <= 10)True False False True False TrueLogical Operators The logical operators `and`, `or`, and `not` are used to determine if multiple conditions are met.# Logical Operators x = 2 > 3 # This is False y = 3 + 3 == 12 / 2 # This is True print(x and y) print(x or y) print(not x) print(not y)False True True FalseMembership OperatorsThe membership operators `in` and `not in` are used to see whether or not an object contains another object.# Membership Operators my_list = ["a", "b", "c"] user_input = input("Enter a letter: ") if (user_input in my_list): print("Your letter is in the list!") elif (user_input not in my_list): print("That letter is not in the list!")Now let's try combining logical and membership operators.# Example of logical and membership operators my_list = ["a", "b", "c"] if ("x" in my_list and "a" in my_list): print("x and a are both in the list!") elif ("x" in my_list or "a" in my_list): print("either x or a is in my_list, but not both!") else: print("x and a are not in in my_list!") if ("x" not in my_list and "a" in my_list): print("x isn't in my_list, but a is in my_list!")either x or a is in my_list, but not both! x isn't in my_list, but a is in my_list!Flow ControlUp until now, every line of code in each of our examples was executed.Sometimes we want certain parts of our code to be executed only if certain conditions are satisfied. This concept is called **flow control**. There are three main types of statements that we can use to accomplish this:- If...else statements- For loops- While loops If...else Statements If we want to run certain parts of code based on whether a condition is true or false, we can use `if`...`else` statements. Everything written under the `if` statement (i.e. the body) will be executed only if the expression is evaluated as true. Otherwise, that code will be skipped.# If statement if (100 > 20): print("This statement will be printed") if (2 + 3 != 5): print("This statement will not be printed")This statement will be printedWe can use the `else` statement to handle the case where the `if` statement is not met.# If...else if (100 < 20): print("The statement was true!") else: print("The statement was false!")The statement was false!We can be even more specific by using an `elif` statement, which stands for `else if`. Multiple `elif` statements can be used at one.# If...elif...else # Challenge: Change the values of num1 and num2 to execute different parts of this code! num1 = 14 num2 = 1 if (num1 + num2 < 10): print("The if statement was executed") elif (num1 + num2 == 15): print("The first elif statement was executed") elif (num1 + num2 == 20): print("The second elif statement was executed") else: print("The else statement was executed because none of the above conditions were met")The first elif statement was executedWe can also put `if` statements inside othere `if` statements. These are called **nested `if` statements**.Pay close attention to your indentation when using nested statements!# Nested if statements # Challenge: Change the value of num to execute different parts of this code! num = 4 if num != 0: if num > 0: print("The number is positive") else: print("The number is negative") else: print("The number is 0")The number is positiveFor loops The **for loop** is used to iterate over a sequence of values and perform some actions on each of them. You can use it with specific data types such as **strings**, **lists**, and **dictionaries**. *Iterating* over an object means that each individual item inside the object will be operated on sequentially. For lists, each iteration will look at one element at a time. For strings, each iteration will look at one character at a time.# Iterating over a list pet_list = ["dog", "cat", "fish", "hamster"] for pet in pet_list: print("I have a " + pet) # Iterating over a string hello_world = "Hello World!" for character in hello_world: print(character)H e l l o W o r l d !When iterating over a dictionary, the variable after the "for" refers to the *key* of items inside the dictionary. We can then reference the values correspondinig to the keys with regular dictionary notation. Let's see how we can use this:# Iterating over a dict pet_dict = {"dog": "a", "cat": "b", "fish": "c", "hamster": "d"} for pet in pet_dict.keys(): print("I have a " + pet + " named " + pet_dict[pet])I have a dog named a I have a cat named b I have a fish named c I have a hamster named dSometimes we want to perform some operation a specific number of times. In order to use the *for* loop with integer values, we have to use the function `range(number_of_repetitions)`. This function computes the range of numbers between 0 and `number_of_repetitions`, but the value won't actually contain the number `number_of_repetitions` itself since the range starts at 0 instead of 1.Let's take a look at this function's behaviour:# For loop number = 10 for number in range(number): print(number)0 1 2 3 4 5 6 7 8 9While LoopsThe **while loop** is used to perform some actions only while a specified condition is `True`. You can use it with specific data types such as **strings**, **lists**, and **dictionaries**. If the condition given in the `while` statement is `True`, the body of the while loop will be executed. After the code is completed, the `while` condition is evaluated again. This process is repeated until the `while` condition is evaluated as `False`.Because of this, it is extremely important to ensure that the body of your while loop contains an **exit condition**. This means that your program has to evalue to False.# While loop num1 = 4 num2 = 12 while (num1 < num2): print(num1) num1 = num1 + 1 # An infinite loop - be careful to avoid this! while True: print(".")Combining user input with flow controlNow that we've covered the basics, let's try to combine what you learned!first_name = input("Enter your first name: ") last_name = input("Enter your last name: ") full_name = first_name + " " + last_name print("Hello", full_name)Enter your first name: 10 Enter your last name: 6 Hello 10 6By default, the data type of a user's input is a **string**, even if they enter numeric values (such as integers or floats) or boolean values (`true` or `false`). We can **cast** the input to a different data type so we can perform operations on it. We'll see how this works in the following example. Let's see how comparison operators can be used with user input:# Running this code will cause an error age = input("Enter your age: ") # Let's see what happens when we try to use arithmetic operations on this user input: if(age < 16): # Uncomment this line! print("Sorry, looks like you can't drive yet!") else: print("Time to hit the road!") # Now let's try casting the input to an integer so we can perform operations on it age = int(input("Enter your age: ")) if(age < 16): print("Sorry, looks like you can't drive yet!") else: print("Time to hit the road!") # This code will accept an integer as input. magic_number = 7 guess = int(input("Try to guess the magic number! ")) if (guess == magic_number): print("Congratulations You got it!") elif (guess < magic_number): print("Your number is too low") else: print("Your number is too high!") # Notice that we didn't need to perform the "greater than" comparison here. # This is because we already covered all other possibilities # (if the number was equal or too small)Try to guess the magic number! 10 Your number is too high!Now let's see how we can use `for` and `while` loops with user input:# For loop user_number = int(input("Enter a number: ")) for number in range(user_number): print(number) # While loop user_number = int(input("Enter a number: ")) counter = 0 while (counter < user_number): print(counter) counter = counter + 1Enter a number: 4 0 1 2 3Writing FunctionsA function is a way to group specific code lines together so that they perform a specific task when they are called. Functions make our program more organized because it lets us reuse a function over again by only calling it's name rather than copying all of the lines of code multiple times in different places. Functions can accept parameters which are variables that are used within the code of the function and it can return a value as well so it can be used in other parts of the program. Function StructureEvery function will have a specific name. That name is what we will use to call it later on. When we call a function that means the code uses the function name and parameters within it such that it executes the function and performs the task it is built for.```def function_name(parameters): lines of code return True```- The keyword `def` marks that it is the start of a new function. - The function name is whatever unique identifier you choose. Most of the time it is a short description of the task the function performs.- Parameters are supplied in brackets but are optional.- The return statement can give back a value from the function so it can be used in other areas.It is important to remember that for a code line to be included in the function it has to be indented so that it shows up "underneath" or "inside" of the function definition header. This is normally done with a single tab.# Example def age(birth_year): """ This function takes the user's birth year and returns their age """ current_year = 2021 user_age = current_year - birth_year print("You are " + str(user_age) + " years old!") # Now we call it so that we can see the output age(1999)You are 22 years old!Indentation is very important! Anything you want included in your function has to be indented underneath otherwise the function won't know it belongs and issues will occur! This is the same concept as if statements and loops.# Error related to indentation level def age(birth_year): """ This function takes the user's birth year and returns their age """ current_year = 2021 user_age = current_year - birth_year print("You are " + str(user_age) + " years old!") # Error related to unknown variable since it isn't defined outside the function def age(birth_year): """ This function takes the user's birth year and returns their age """ current_year = 2021 user_age = current_year - birth_year print("You are " + str(user_age) + " years old!")Function ParametersParameters are values that are passed into the function so that it can be used within the code of the function. It is necessary to call the function with all the required parameters otherwise it will fail. This means if you define your function to have 2 variables but you only specify one it will not work.# Purposely Broken Example def favouriteThings(colour, food): print("Your favourite colour is: ", colour) print("Your favourite food is: ", food) # If you only call the function with a colour it will fail # Try running this block you will see an example error favouriteThings("blue") # Working Example def favouriteThings(colour, food): print("Your favourite colour is: ", colour) print("Your favourite food is: ", food) favouriteThings("blue", "pizza")Your favourite colour is: blue Your favourite food is: pizzaIf you run into a scenario where the value that is passed in as a parameter is usually a specific value you can choose to use a default value.A default value is when you set the variable equal to something so that the value is automatically selected and provided to the function. If you suddenly want to supply a different value that is still possible! If you supply the function definition with the value you would like it will override the default. Default arguments should always be at the end of the parameter list.# Here you can see that the default of pizza will be printed def favouriteThings(colour, food="pizza"): print("Your favourite colour is: ", colour) print("Your favourite food is: ", food) print("Example #1") favouriteThings("blue") # Here I will override that with the variable I want to be used instead def favouriteThings(colour, food="pizza"): print("Your favourite colour is: ", colour) print("Your favourite food is: ", food) print("Example #2") favouriteThings("blue", "pasta")Example #1 Your favourite colour is: blue Your favourite food is: pizza Example #2 Your favourite colour is: blue Your favourite food is: pastaReturn ValuesThe above example uses a `print` statement so that the user sees the answer whenever the function is called but we could instead use a `return` statement. A return statement would hold the value in memory instead of printing to the screen. This is valuable when you are doing intermediate calculations that the user doesn't need since they only want the end result.# Example def age(birth_year): """ This function takes the user's birth year and returns their age """ current_year = 2021 user_age = current_year - birth_year return "You are " + str(user_age) + " years old!" # Now we call it we see on the first line there isn't an output # For the second when the print statement is added it works age(1999) print(age(1999)) # You can also set the function equal to a variable and the data will be stored in that variable! your_age = age(1999) print(your_age)You are 22 years old! You are 22 years old!Pass StatementWhen planning your program you may want to name all of the functions you will be using without filling each of them out right away.That is where a pass statement is important. It allows you to tell the program that it should skip this portion of the code.def working_function(): pass def broken_function():Predicting next lab results 배경- 환자가 받은 다양한 치료로 근미래의 검사 결과를 맞출 수 있을까? 문제 종류- binary classification (검사 결과가 정상범위인지 아닌지) 실험- Sepsis 환자를 대상으로 다음 lab 검사 결과의 정상범위 여부를 예측 - Variables used in SOFA: - GCS, MAP, FiO2, Ventilation status (sourced from CHARTEVENTS) - Creatinine, Bilirubin, FiO2, PaO2, Platelets (sourced from LABEVENTS) - Dobutamine, Epinephrine, Norepinephrine (sourced from INPUTEVENTS_MV and INPUTEVENTS_CV) - Urine output (sourced from OUTPUTEVENTS) 교차검증 평가 예측값의 활용- 수련의 교육용 첫번째 검사 결과 예측 대상 : Fi02- https://github.com/MIT-LCP/mimic-code/blob/master/notebooks/aline/aline_sofa.sqlimport sys sys.path.insert(0, './db') import db_con import sqlalchemy import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline # Fi02 itemid 찾기 engine = db_con.get_engine() pd.read_sql_query(""" SELECT * FROM d_items WHERE label LIKE '%%FiO2%%'; """, engine)의학 의견- Item선정 기준# sepsis diagnosis 환자들의 FiO2 기록(입원 시점으로 분단위) pat = pd.read_sql(""" SELECT chi.icustay_id, chi.mins, chi.itemid, chi.valuenum, d_items.label FROM ( SELECT ch.icustay_id, EXTRACT(MINS FROM ch.charttime - ic.intime) AS mins, ch.itemid, ch.value, ch.valuenum FROM (SELECT * FROM chartevents LEFT JOIN diagnoses_icd di ON chartevents.subject_id = di.subject_id WHERE di.icd9_code IN ('77181', '99591', '99592', '67020', '67022', '67024') AND chartevents.itemid IN (3420, 190, 223835, 3422)) AS ch LEFT JOIN icustays ic ON ch.icustay_id = ic.icustay_id ) AS chi LEFT JOIN d_items ON chi.itemid = d_items.itemid """, engine) pat.shape pat.head() pat.describe() pat = pat[~pat.icustay_id.isna()] pat = pat[pat.mins>=0] pat.shape전처리pre-process the FiO2s to ensure they are between 21-100%pat = pat[(pat.valuenum>21) | ((pat.valuenum<1) & (pat.valuenum>0))] pat.shape # 제거 되었는지 확인 pat[(pat.valuenum<21) & (pat.valuenum<0.2)] # 백분위로 변환 def convert_to_percent(num): return num * 100 pat.loc[pat.valuenum<1, 'valuenum'] = pat[pat.valuenum<1].valuenum.map(lambda x: convert_to_percent(x)) pat[pat.valuenum<1] pat.valuenum.describe()한 환자의 FiO2값 예측하기# 가장 긴 관찰 값을 가지고 있는 환자 찾기 find_pat = pat.groupby(['mins', 'icustay_id']).mean().reset_index().pivot("icustay_id", 'mins', 'valuenum') find_pat.isnull().sum(axis=1).sort_values(ascending=True) target_pat = find_pat.iloc[find_pat.index==295669].values[0] target_pat train, test = target_pat[0:len(target_pat)-10], target_pat[len(target_pat)-10:] train testTime series 데이터를 기반으로 다음 값을 예측하기- Model : Autoregression(자기회귀) - output values based on a linear combination of input values - 앞선 관측값이 다음 값을 예측하는데 효과적이라는 가정을 기반으로 한다. - 만약 input과 output이 같은 방향(같이 증가하거나 같이 감소하거나)으로 변화한다면 positive correlation이라고 지칭하고, 반대 방향으로 변화한다면 negative correlation이라고 한다. - y = b0 + b1*x1 -> x(t+1) = b0 + b1*x(t-1) + b2*x(t-2)....from statsmodels.tsa.ar_model import AR model = AR(train) model_fit = model.fit() print('Lag : %s' % model_fit.k_ar) print('Coefficients : %s' % model_fit.params) predictions = model_fit.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False) for i in range(len(predictions)): print('Predicted= %f, expected=%f' % (predictions[i], test[i])) from sklearn.metrics import mean_squared_error error = mean_squared_error(test, predictions) print('Test MSE: %.3f' % error) plt.plot(test) plt.plot(predictions, color='red') plt.show() # 전체 시간을 5분 단위로 변경 def convert_to_min_range(min_num): return int(min_num/5)*5 pat.mins = pat.mins.map(lambda x: convert_to_min_range(x)) pat.head() # 같은 시간 내 관찰값이 여러번 추가되었다면, 평균으로 정리한다 -- 통계적, 의학적 타당성(?) mean_pat = pat.groupby(['mins', 'icustay_id']).mean().reset_index() mean_pat.head() ## Pivot table: 데이터 컬럼 중에 두 개의 컬럼을 각각 행의 인덱스, 열의 인덱스로 사용하여 테이블을 구성하는 것 # pandas에서는 피봇데이블을 만들기 위해 pivot(행 인덱스로 사용할 컬럼이름, 열 인덱스로 활용할 컬럼이름, 데이터로 사용할 컬럼이름) 을 입력한다 # - https://datascienceschool.net/view-notebook/76dcd63bba2c4959af15bec41b197e7c/ mean_pivot = mean_pat.pivot("icustay_id", 'mins', 'valuenum') mean_pivotmissing value- If any individual vital signs were missing for model estimation then the previous value was pulled forward. If no prior values were available then a median value was imputed, similar to prior work in this area.- Churpek, ., , and . "The value of vital sign trends for detecting clinical deterioration on the wards." Resuscitation 102 (2016): 1-5.# missing data visualization with heatmap import seaborn as sns plt.figure(figsize=(20, 10)) sns.heatmap(mean_pivot.isnull(), yticklabels=False, cbar=False, cmap='viridis') # 결측값 여부 확인 : isnull(), notnull() # 열별 결측값 갯수 : df.isnull().sum() # 행별 결측값 갯수 : df.isnull().sum(axis=1) mean_pivot.isnull().sum(axis=1).sort_values(ascending=True) mean_pivot.loc[mean_pivot.isnull().sum(axis=1).sort_values(ascending=True).index] mean_pivot.columns len(mean_pivot.columns) # missing value가 전체 데이터 갯수의 절반 이상인 경우 제외 mean_pivot = mean_pivot.loc[mean_pivot.isnull().sum(axis=1)<(len(mean_pivot.columns)/2)] mean_pivot결측값 처리하기- 특정 값으로 채우기 : df.fillna(0)- 결측값을 앞 방향(forward) 혹은 뒷 방향(backward)으로 채우기 : 앞 df.fillna(method='ffill') or df.fillna(method='pad') 뒤 df.fillna(method='bfill') - 앞/뒤 방향으로 결측값 채우는 횟수를 제한하기 : df.fillna(method='ffill', limit=number)- 결측값을 변수별 평균으로 대체하기 : df.fillna(df.mean()), df.where(pd.notnull(df), df.mean(), axis='columns')- 결측값을 다른 변수의 값으로 대체하기시계열 데이터에서 결측값 보간 하기https://rfriend.tistory.com/264?category=675917# 나머지 missing value 채우기 mean_pivot = mean_pivot.fillna(method='ffill', limit=1, axis=1) mean_pivot mean_pivot[mean_pivot.index==200282.0] mean_pivot.median(axis=1) # mean_pivot.fillna(mean_pivot.median(axis=1)) mean_pivot = mean_pivot.where(pd.notnull(mean_pivot), mean_pivot.median(axis=1), axis=0) mean_pivot # 확인 mean_pivot[mean_pivot.index==200282.0] # 시각화해보기 - 전체 데이터는 차트가 너무 지저분해서 일부만을 시각화함(해당 코드의 필요성 고민) fig = plt.figure(figsize=(20, 10)) index = 0 for m_ind in mean_pivot.index: if index % 30 == 0: row = mean_pivot.xs(m_ind) # print(row.get_values()) plt.plot(mean_pivot.columns, row.get_values(), linestyle='-') index += 1 plt.title("Change of FiO2 from admission") plt.xlabel("Minutes after admission") plt.ylabel("% of FiO2")Vanilla LSTMfrom keras.models import Sequential from keras.layers import LSTM, Dense mean_pivot.shape n_steps = mean_pivot.shape[1]-1 n_features = 1 mean_n = mean_pivot.iloc[:, :-1].values Y = mean_pivot.iloc[:, -1].values X = mean_n.reshape((mean_pivot.shape[0], n_steps, n_features)) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=10, shuffle=True) X_train.shape X_test.shape model = Sequential() model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') model.fit(X_train, y_train, epochs=200, verbose=0) y_predict = model.predict(X_test, verbose=0)This notebook shows the implementation of 3 different regression models: Random Forest, SVR, AdaBoost. With the first 2 there's a grid search CV and for loops to get the best result, with the last one just a for loop.#Function to round the probabilities to the nearest .25 def round_prob(x): if 0.0<= x <0.125: x=0.0 elif 0.125<= x <0.375: x=0.25 elif 0.375<= x <0.625: x=0.5 elif 0.625<= x <0.875: x=0.75 elif 0.875<= x <1.125: x=1.0 elif 1.125<= x <1.375: x=1.25 elif 1.375<= x <1.625: x=1.5 elif 1.625<= x <1.875: x=1.75 elif 1.875<= x <2.125: x=2.0 elif 2.125<= x <2.375: x=2.25 elif 2.375<= x <2.625: x=2.5 elif 2.625<= x <2.875: x=2.75 elif 2.875<= x <3.125: x=3.0 elif 3.125<= x <3.375: x=3.25 elif 3.375<= x <3.625: x=3.5 elif 3.625<= x <3.875: x=3.75 elif 3.875<= x <4.125: x=4.0 else: x=x return x #Applying the round function of above. df['Tot prob 0']=df['Tot prob 0'].apply(round_prob) df['Tot prob 1']=df['Tot prob 1'].apply(round_prob) df['Tot prob 2']=df['Tot prob 2'].apply(round_prob) df['Tot prob 3']=df['Tot prob 3'].apply(round_prob) df['Tot prob 4']=df['Tot prob 4'].apply(round_prob) df['Tot prob 5']=df['Tot prob 5'].apply(round_prob) df['Tot prob 6']=df['Tot prob 6'].apply(round_prob) df['Tot prob 7']=df['Tot prob 7'].apply(round_prob) df['NRating']=np.around(df['NRating'],2) train_columns=['Tot prob 0','Tot prob 1','Tot prob 2','Tot prob 3','Tot prob 4','Tot prob 5','Tot prob 6', 'Tot prob 7'] #Train columns for the prediction target_columns=['NRating'] #Target column for the prediction X= df[train_columns].to_numpy() Y= df[target_columns].to_numpy() scaler = StandardScaler() #Y = scaler.fit_transform(Y) length = math.floor(len(X)/1.5) #Spliting value for training and testing data train_X = X[:length] #X_train data train_Y = Y[:length] #Y_train data train_Y= train_Y.reshape(train_Y.shape[0], ) test_X = X[length:] #X_test data test_Y = Y[length:] #Y_test data test_Y= test_Y.reshape(test_Y.shape[0], )Grid Search implementation with the train datagsc = GridSearchCV( estimator=RandomForestRegressor(), #Estimator used for grid search param_grid={ 'n_estimators': (100,200,500,1000,1500), 'max_depth': (2,3,4,5,6,7,8,None), 'min_samples_split': (2,3,5,6,8,10), 'max_features': ('auto','sqrt','log2',5,6,2) }, #Parameters of that estimator cv=5, scoring='neg_mean_squared_error', verbose=0,n_jobs=-1) #Splitting value and metric for evaluation grid_result = gsc.fit(train_X, train_Y) best_params = grid_result.best_params_ #Best parameters after running grid search rfr = RandomForestRegressor(n_estimators=best_params['n_estimators'],max_depth=best_params['max_depth'], min_samples_split=best_params['min_samples_split'],max_features=best_params['max_features'] ,random_state=1,n_jobs=-1) #Best parameters usage for random forest rfr.fit(train_X,train_Y) pred_t=rfr.predict(train_X) y_pred_r=rfr.predict(test_X) print(mean_squared_error(pred_t,train_Y)) #Mean Squared Error between train data and its prediction. print(mean_squared_error(y_pred_r,test_Y)) #Mean Squared Error between test data and its prediction. print(best_params) #Graph of both predictions; test and train data vs the real data with random forest. f, (ax1, ax2) = plt.subplots(1, 2) ax1.plot(test_Y,test_Y,color='r') ax1.scatter(test_Y,y_pred_r) ax1.set_title('Predictions vs. real \n(test_data) (Random Forest)') ax1.set_xlabel('Y Test Values') ax1.set_ylabel('Y Predicted Values') plt.sca(ax1) for spine in plt.gca().spines.values(): spine.set_visible(False) ax2.plot(train_Y,train_Y,color='r') ax2.scatter(train_Y,pred_t) ax2.set_title('Predictions vs real \n(train_data) (Random Forest)') ax2.set_xlabel('Y Train Values') ax2.set_ylabel('Y Predicted Values') ax2.yaxis.set_label_position('right') plt.sca(ax2) for spine in plt.gca().spines.values(): spine.set_visible(False) gsc_s = GridSearchCV( estimator=SVR(), #Estimator used for grid search param_grid={ 'kernel': ('rbf','poly'), 'C': (0.001,0.01,0.1,1,10), 'epsilon': (0.1,0.5,1,2,5), 'gamma': (0.001,0.01,0.1,1) }, #Parameters of that estimator cv=5, scoring='neg_mean_squared_error', verbose=0,n_jobs=-1) #Splitting value and metric for evaluation grid_result_s = gsc_s.fit(test_X, test_Y) best_params_s = grid_result_s.best_params_ #Best parameters after running grid search rfr_s = SVR(kernel=best_params_s['kernel'],C=best_params_s['C'],epsilon=best_params_s['epsilon'], gamma=best_params_s['gamma'],verbose=0) #Best parameters usage for SVR rfr_s.fit(train_X,train_Y) pred_ts=rfr_s.predict(train_X) y_pred_rs=rfr_s.predict(test_X) print(mean_squared_error(pred_ts,train_Y)) #Mean Squared Error between train data and its prediction. print(mean_squared_error(y_pred_rs,test_Y)) #Mean Squared Error between test data and its prediction. print(best_params_s) #Graph of both predictions; test and train data vs the real data with SVR f, (ax3, ax4) = plt.subplots(1, 2) ax3.plot(test_Y,test_Y,color='r') ax3.scatter(test_Y,y_pred_rs) ax3.set_title('Predictions vs. real \n(test_data) (SVR)') ax3.set_xlabel('Y Test Values') ax3.set_ylabel('Y Predicted Values') plt.sca(ax3) for spine in plt.gca().spines.values(): spine.set_visible(False) ax4.plot(train_Y,train_Y,color='r') ax4.scatter(train_Y,pred_ts) ax4.set_title('Predictions vs real \n(train_data) (SVR)') ax4.set_xlabel('Y Train Values') ax4.set_ylabel('Y Predicted Values') ax4.yaxis.set_label_position('right') plt.sca(ax4) for spine in plt.gca().spines.values(): spine.set_visible(False)The implementation using for loops with the same paramaters as in Grid Search CV and calculation of the MSE in test and train data.#Random Forest parameters n_estimators = [10,20,50,100,200,500,1000,1500] max_depth = [2,3,4,5,6,7,8,None] min_samples_split = [2,3,5,6,8,10] max_features = ['auto','sqrt','log2',5,6,2,3] #Empty lists n_esti=[] max_d=[] min_s=[] max_f=[] mse_train=[] mse_test=[] #For loop to check every combination for i in n_estimators: for j in max_depth: for k in min_samples_split: for l in max_features: rfr = RandomForestRegressor(n_estimators = i,max_depth = j,min_samples_split = k, max_features = l,random_state = 1,n_jobs=-1) rfr.fit(train_X,train_Y) pred_t=rfr.predict(train_X) y_pred_r=rfr.predict(test_X) n_esti.append(i) max_d.append(j) min_s.append(k) max_f.append(l) mse_train.append(mean_squared_error(train_Y,pred_t)) mse_test.append(mean_squared_error(test_Y,y_pred_r)) #New DF with all combinations df_random=pd.DataFrame() df_random['trees'] = n_esti df_random['max_depth'] = max_d df_random['min_split'] = min_s df_random['max_characteristics'] = max_f df_random['mse train'] = mse_train df_random['mse test'] = mse_test df_random.sort_values(by='mse train',ascending=True).head(10) #Using one of the best combinations based on the MSE in the training data. rfr = RandomForestRegressor(n_estimators = 500,max_depth = None,min_samples_split = 2, max_features = 3,random_state = 1,n_jobs=-1) rfr.fit(train_X,train_Y) pred_t=rfr.predict(train_X) y_pred_r=rfr.predict(test_X) f, (ax5, ax6) = plt.subplots(1, 2) ax5.plot(test_Y,test_Y,color='r') ax5.scatter(test_Y,y_pred_r) ax5.set_title('Predictions vs. real \n(test_data) (Random Forest)(For Loop)') ax5.set_xlabel('Y Test Values') ax5.set_ylabel('Y Predicted Values') plt.sca(ax5) for spine in plt.gca().spines.values(): spine.set_visible(False) ax6.plot(train_Y,train_Y,color='r') ax6.scatter(train_Y,pred_t) ax6.set_title('Predictions vs real \n(train_data) (Random Forest)(For Loop)') ax6.set_xlabel('Y Train Data') ax6.set_ylabel('Y Predicted Values') ax6.yaxis.set_label_position('right') plt.sca(ax6) for spine in plt.gca().spines.values(): spine.set_visible(False) #SVR parameters kernel=['rbf','poly'] C=[0.001,0.01,0.1,1.0,10.0] epsilon=[0.1,0.5,1.0,2.0,5.0] gamma=[0.001,0.01,0.1,1.0] #Empty lists kernel_list=[] c_list=[] eps_list=[] gamma_list=[] mse_train_svr=[] mse_test_svr=[] #For loop to check every combination for i in kernel: for j in C: for k in epsilon: for l in gamma: rfr_s = SVR(kernel = i,C = j,epsilon = k,gamma = l,verbose=0) rfr_s.fit(train_X,train_Y) pred_ts=rfr_s.predict(train_X) y_pred_rs=rfr_s.predict(test_X) kernel_list.append(i) c_list.append(j) eps_list.append(k) gamma_list.append(l) mse_train_svr.append(mean_squared_error(train_Y,pred_ts)) mse_test_svr.append(mean_squared_error(test_Y,y_pred_rs)) #New DF with all combinations df_svr=pd.DataFrame() df_svr['Kernel'] = kernel_list df_svr['C'] = c_list df_svr['Epsilon'] = eps_list df_svr['Gamma'] = gamma_list df_svr['mse train'] = mse_train_svr df_svr['mse test'] = mse_test_svr df_svr.sort_values(by='mse train',ascending=True).head(10) #Using one of the best combinations based on the MSE in the training data. rfr_s = SVR(kernel = 'rbf',C = 10,epsilon = 2,gamma = 1,verbose=0) rfr_s.fit(train_X,train_Y) pred_ts=rfr_s.predict(train_X) y_pred_rs=rfr_s.predict(test_X) f, (ax7, ax8) = plt.subplots(1, 2) ax7.plot(test_Y,test_Y,color='r') ax7.scatter(test_Y,y_pred_rs) ax7.set_title('Predictions vs. real \n(test_data) (SVR)(For Loop)') ax7.set_xlabel('Y Test Values') ax7.set_ylabel('Y Predicted Values') plt.sca(ax7) for spine in plt.gca().spines.values(): spine.set_visible(False) ax8.plot(train_Y,train_Y,color='r') ax8.scatter(train_Y,pred_ts) ax8.set_title('Predictions vs. real \n(train_data) (SVR)(For Loop)') ax8.set_xlabel('Y Train Values') ax8.set_ylabel('Y Predicted Values') ax8.yaxis.set_label_position('right') plt.sca(ax8) for spine in plt.gca().spines.values(): spine.set_visible(False) #AdaBoost Regressor parameters base_estimator=[RandomForestRegressor(n_estimators=500),SVR(kernel='poly',degree=5,C=0.1,epsilon=1), SVR(kernel = 'rbf',C = 10,epsilon = 2,gamma = 1,verbose=0), RandomForestRegressor(n_estimators=500,min_samples_split=2,max_features=3,random_state=1 ,n_jobs=-1)] n_estiada=[200,500,1000] learning_rate=[0.1,1,0.5,] loss=['linear','square'] #Empty lists base_list=[] n_estiada_list=[] lr_list=[] loss_list=[] mse_train_ada=[] mse_test_ada=[] #For loop to check every combination for i in base_estimator: for j in n_estiada: for k in learning_rate: for l in loss: rfr_a = AdaBoostRegressor(base_estimator = i,n_estimators = j,learning_rate = k, loss = l) rfr_a.fit(train_X,train_Y) pred_ta=rfr_a.predict(train_X) y_pred_ra=rfr_a.predict(test_X) base_list.append(str(i)) n_estiada_list.append(j) lr_list.append(k) loss_list.append(l) mse_train_ada.append(mean_squared_error(train_Y,pred_ta)) mse_test_ada.append(mean_squared_error(test_Y,y_pred_ra)) #New DF with all combinations df_ada=pd.DataFrame() df_ada['Base Estimator'] = base_list df_ada['# of Estimators'] = n_estiada_list df_ada['Learning Rae'] = lr_list df_ada['Loss'] = loss_list df_ada['mse train'] = mse_train_ada df_ada['mse test'] = mse_test_ada df_ada.sort_values(by = 'mse train',ascending=True).head(10) #Using one of the best combinations based on the MSE in the training data. rfr_a = AdaBoostRegressor(RandomForestRegressor(n_estimators=500,min_samples_split=2,max_features=3, random_state=1,n_jobs=-1),loss='linear',learning_rate=0.1, n_estimators=200) rfr_a.fit(train_X,train_Y) pred_ta=rfr_a.predict(train_X) y_pred_ra=rfr_a.predict(test_X) f, (ax9, ax10) = plt.subplots(1, 2) ax9.plot(test_Y,test_Y,color='r') ax9.scatter(test_Y,y_pred_ra) ax9.set_title('Predictions vs. real \n(test_data) (AdaBoost)(For Loop)') ax9.set_xlabel('Y Test Values') ax9.set_ylabel('Y Predicted Values') plt.sca(ax9) for spine in plt.gca().spines.values(): spine.set_visible(False) ax10.plot(train_Y,train_Y,color='r') ax10.scatter(train_Y,pred_ta) ax10.set_title('Predictions vs. real \n(train_data) (AdaBoost)(For Loop)') ax10.set_xlabel('Y Train Values') ax10.set_ylabel('Y Predicted Values') ax10.yaxis.set_label_position('right') plt.sca(ax10) for spine in plt.gca().spines.values(): spine.set_visible(False)The best performance based on the mean squared error of the train data was obtained with the Random Forest Regressor. If the MSE of the test data was the metric to evaluate the performance, the data doesn't fit at all, neither in training or test data as shown below.df_random.sort_values(by='mse test',ascending=True).head(10) rfr_test = RandomForestRegressor(n_estimators = 20,max_depth = 3,min_samples_split = 5, max_features = 'log2',random_state = 1,n_jobs=-1) rfr_test.fit(train_X,train_Y) pred_t_test=rfr_test.predict(train_X) y_pred_r_test=rfr_test.predict(test_X) f, (ax11, ax12) = plt.subplots(1, 2) ax11.plot(test_Y,test_Y,color='r') ax11.scatter(test_Y,y_pred_r_test) ax11.set_title('Predictions vs. real \n(test_data) (Random Forest)(MSE test)') ax11.set_xlabel('Y Test Values') ax11.set_ylabel('Y Predicted Values') plt.sca(ax11) for spine in plt.gca().spines.values(): spine.set_visible(False) ax12.plot(train_Y,train_Y,color='r') ax12.scatter(train_Y,pred_t_test) ax12.set_title('Predictions vs real \n(train_data) (Random Forest)(MSE test)') ax12.set_xlabel('Y Train Data') ax12.set_ylabel('Y Predicted Values') ax12.yaxis.set_label_position('right') plt.sca(ax12) for spine in plt.gca().spines.values(): spine.set_visible(False)PPI network analysis#%matplotlib notebook %matplotlib inline import sys import csv import numpy as np import networkx as nx import matplotlib.pyplot as plt import scipy.sparse as ss import scipy.stats as st from signet.cluster import Cluster import signet.utils as ut import matplotlib from mpl_toolkits.axes_grid1 import AxesGrid def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'): ''' Function to offset the "center" of a colormap. Useful for data with a negative min and positive max and you want the middle of the colormap's dynamic range to be at zero. Input ----- cmap : The matplotlib colormap to be altered start : Offset from lowest point in the colormap's range. Defaults to 0.0 (no lower offset). Should be between 0.0 and `midpoint`. midpoint : The new center of the colormap. Defaults to 0.5 (no shift). Should be between 0.0 and 1.0. In general, this should be 1 - vmax / (vmax + abs(vmin)) For example if your data range from -15.0 to +5.0 and you want the center of the colormap at 0.0, `midpoint` should be set to 1 - 5/(5 + 15)) or 0.75 stop : Offset from highest point in the colormap's range. Defaults to 1.0 (no upper offset). Should be between `midpoint` and 1.0. ''' cdict = { 'red': [], 'green': [], 'blue': [], 'alpha': [] } # regular index to compute the colors reg_index = np.linspace(start, stop, 257) # shifted index to match the data shift_index = np.hstack([ np.linspace(0.0, midpoint, 128, endpoint=False), np.linspace(midpoint, 1.0, 129, endpoint=True) ]) for ri, si in zip(reg_index, shift_index): r, g, b, a = cmap(ri) cdict['red'].append((si, r, r)) cdict['green'].append((si, g, g)) cdict['blue'].append((si, b, b)) cdict['alpha'].append((si, a, a)) newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict) plt.register_cmap(cmap=newcmap) return newcmap # shifted cmap (useful to plot correlation matrices) s_cmap = shiftedColorMap(plt.cm.seismic, start= -4., midpoint=0., stop=2., name='shiftedcmap') #s_cmap = shiftedColorMap(plt.cm.seismic, midpoint=0., name='shiftedcmap')Import dataimport pandas # numpy arrays, days and prices data = pandas.read_csv('PPI.csv') dir(data) edges = data['Sign_Score'] data_ID1 = data['ID1'] data_ID2 = data['ID2'] #data['Sign_Score'] G = nx.Graph() for i in np.arange(edges.shape[0]): node1, node2, weight = data_ID1[i], data_ID2[i], edges[i] G.add_edges_from([(node1, node2, {'weight': weight})]) G=G.subgraph(max(nx.connected_components(G), key=len)) A = nx.adj_matrix(G, weight = 'weight').tocsc() Abar = abs(A) A_p = (A + Abar)/2 A_n = -(A - Abar)/2 A_p.eliminate_zeros() A_n.eliminate_zeros() # ss.save_npz('adjacency_plus.npz', A_p) # ss.save_npz('adjacency_minus.npz', A_n) Ad = A.todense()Analyse Adjacencyplt.hist(edges, 20); edges[edges>0].shape[0]*2 edges[edges<0].shape A_p.data.shape s_cmap = shiftedColorMap(plt.cm.seismic, start= -2.5, midpoint=0.2335, stop=2.5, name='shiftedcmap') plt.figure(); #corrs[np.diag_indices(corrs.shape[0])]=0 plt.matshow(Ad, cmap = s_cmap);# plt.colorbar(); # plt.savefig('A.png', dpi = 250)An example how to apply a strategy per trading session.import pandas as pd import numpy as np import vectorbt as vbt from datetime import timedelta # Generate sample price price_idx = pd.date_range('2018-01-01 12:00:00', periods=48, freq='H') np.random.seed(42) price = pd.Series(np.random.uniform(size=price_idx.shape), index=price_idx) print(price) # Sessions must be equal - fill missing dates # Fill on first date before 12:00 and on last date after 11:00 first_date = price.index[0].date() last_date = price.index[-1].date()+timedelta(days=1) filled_idx = pd.date_range(first_date, last_date, freq='H') filled_price = price.reindex(filled_idx) print(filled_price) # Remove dates that are outside of trading sessions session_price_idx = filled_price.between_time('9:00', '17:00', include_end=False).index session_price = filled_price.loc[session_price_idx] print(session_price) # Select first and last ticks of each trading session and split price into ranges between those ticks start_idxs = session_price.index[session_price.index.hour == 9] end_idxs = session_price.index[session_price.index.hour == 16] price_per_session = session_price.vbt(freq='1H').split_into_ranges(start_idxs=start_idxs, end_idxs=end_idxs) print(price_per_session) # Run your strategy (here using random signals) entries, exits = pd.DataFrame.vbt.signals.generate_random_both(price_per_session.shape, n=2, seed=42) portfolio = vbt.Portfolio.from_signals(price_per_session, entries, exits, freq='1H') print(portfolio.total_return())range_start range_end 2018-01-01 09:00:00 2018-01-01 16:00:00 -0.786858 2018-01-02 09:00:00 2018-01-02 16:00:00 1.466807 2018-01-03 09:00:00 2018-01-03 16:00:00 -0.529509 Name: total_return, dtype: float64Sverdrup Balance in observationsimport numpy as np import xarray as xr import cartopy.crs as ccrs import cartopy.feature as cfeature import matplotlib.pyplot as plt %matplotlib inlineIn this assignment, we are going to compute the Sverdrup streamfunction from the wind stress. As a reminder, the Sverdrup balance is\begin{equation} \beta V = \frac{\vec{\nabla} \times \vec{\tau}}{\rho}\end{equation} Download the zonal and meridional GODAS momentum flux (=wind stress in [N/m$^2$]) files for one year from the GODAS website at https://psl.noaa.gov/data/gridded/data.godas.html (via 'List of *.nc files'>'See list'>'Save Link as') and use their time average from here on.# loading and inspecting data = '/Users/Andre/Downloads' uflx = xr.open_dataset(f'{data}/uflx.2020.nc') vflx = xr.open_dataset(f'{data}/vflx.2020.nc') taux = uflx.uflx.mean('time') tauv = vflx.vflx.mean('time') uflxFine-tuning a BERT modelhttps://www.tensorflow.org/official_models/fine_tuning_bert Setupimport os import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_hub as hub import tensorflow_datasets as tfds tfds.disable_progress_bar() from official.modeling import tf_utils from official import nlp from official.nlp import bert # Load the required submodules import official.nlp.optimization import official.nlp.bert.bert_models import official.nlp.bert.configs import official.nlp.bert.run_classifier import official.nlp.bert.tokenization import official.nlp.data.classifier_data_lib import official.nlp.modeling.losses import official.nlp.modeling.models import official.nlp.modeling.networks # Setup memory to fix critical issue physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) physical_devicesResourcesbert_model_name = 'bert_en_uncased_L-12_H-768_A-12' map_name_to_handle = { 'bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3', 'bert_en_uncased_L-24_H-1024_A-16': 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3', 'bert_en_wwm_uncased_L-24_H-1024_A-16': 'https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/3', 'bert_en_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3', 'bert_en_cased_L-24_H-1024_A-16': 'https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/3', 'bert_en_wwm_cased_L-24_H-1024_A-16': 'https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/3', 'bert_multi_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3', 'small_bert/bert_en_uncased_L-2_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1', 'small_bert/bert_en_uncased_L-2_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/1', 'small_bert/bert_en_uncased_L-2_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-512_A-8/1', 'small_bert/bert_en_uncased_L-2_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-768_A-12/1', 'small_bert/bert_en_uncased_L-4_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-128_A-2/1', 'small_bert/bert_en_uncased_L-4_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-256_A-4/1', 'small_bert/bert_en_uncased_L-4_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1', 'small_bert/bert_en_uncased_L-4_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-768_A-12/1', 'small_bert/bert_en_uncased_L-6_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-128_A-2/1', 'small_bert/bert_en_uncased_L-6_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-256_A-4/1', 'small_bert/bert_en_uncased_L-6_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-512_A-8/1', 'small_bert/bert_en_uncased_L-6_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-768_A-12/1', 'small_bert/bert_en_uncased_L-8_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-128_A-2/1', 'small_bert/bert_en_uncased_L-8_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-256_A-4/1', 'small_bert/bert_en_uncased_L-8_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-512_A-8/1', 'small_bert/bert_en_uncased_L-8_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-768_A-12/1', 'small_bert/bert_en_uncased_L-10_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-128_A-2/1', 'small_bert/bert_en_uncased_L-10_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-256_A-4/1', 'small_bert/bert_en_uncased_L-10_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-512_A-8/1', 'small_bert/bert_en_uncased_L-10_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-768_A-12/1', 'small_bert/bert_en_uncased_L-12_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-128_A-2/1', 'small_bert/bert_en_uncased_L-12_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-256_A-4/1', 'small_bert/bert_en_uncased_L-12_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-512_A-8/1', 'small_bert/bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-768_A-12/1', 'albert_en_base': 'https://tfhub.dev/tensorflow/albert_en_base/2', 'albert_en_large': 'https://tfhub.dev/tensorflow/albert_en_large/2', 'albert_en_xlarge': 'https://tfhub.dev/tensorflow/albert_en_xlarge/2', 'albert_en_xxlarge': 'https://tfhub.dev/tensorflow/albert_en_xxlarge/2', 'electra_small': 'https://tfhub.dev/google/electra_small/2', 'electra_base': 'https://tfhub.dev/google/electra_base/2', 'experts_pubmed': 'https://tfhub.dev/google/experts/bert/pubmed/2', 'experts_wiki_books': 'https://tfhub.dev/google/experts/bert/wiki_books/2', 'talking-heads_base': 'https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_base/1', 'talking-heads_large': 'https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_large/1', } map_model_to_preprocess = { 'bert_en_uncased_L-24_H-1024_A-16': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'bert_en_wwm_cased_L-24_H-1024_A-16': 'https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3', 'bert_en_cased_L-24_H-1024_A-16': 'https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3', 'bert_en_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3', 'bert_en_wwm_uncased_L-24_H-1024_A-16': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'bert_multi_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_multi_cased_preprocess/3', 'albert_en_base': 'https://tfhub.dev/tensorflow/albert_en_preprocess/3', 'albert_en_large': 'https://tfhub.dev/tensorflow/albert_en_preprocess/3', 'albert_en_xlarge': 'https://tfhub.dev/tensorflow/albert_en_preprocess/3', 'albert_en_xxlarge': 'https://tfhub.dev/tensorflow/albert_en_preprocess/3', 'electra_small': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'electra_base': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'experts_pubmed': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'experts_wiki_books': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'talking-heads_base': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'talking-heads_large': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', } tfhub_handle_encoder = map_name_to_handle[bert_model_name] tfhub_handle_preprocess = map_model_to_preprocess[bert_model_name] print(f'BERT model selected : {tfhub_handle_encoder}') print(f'Preprocessing model auto-selected: {tfhub_handle_preprocess}') bert_preprocess = hub.load(tfhub_handle_preprocess) tok = bert_preprocess.tokenize(tf.constant(['Hello TensorFlow!'])) print(tok) text_preprocessed = bert_preprocess.bert_pack_inputs([tok, tok], tf.constant(20)) print('Shape Word Ids : ', text_preprocessed['input_word_ids'].shape) print('Word Ids : ', text_preprocessed['input_word_ids'][0, :16]) print('Shape Mask : ', text_preprocessed['input_mask'].shape) print('Input Mask : ', text_preprocessed['input_mask'][0, :16]) print('Shape Type Ids : ', text_preprocessed['input_type_ids'].shape) print('Type Ids : ', text_preprocessed['input_type_ids'][0, :16]) hub_url_bert = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3" gs_folder_bert = "gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-12_H-768_A-12" tf.io.gfile.listdir(gs_folder_bert)Data Get the dataset from TensorFlow Datasetsglue, info = tfds.load('glue/mrpc', with_info=True, # It's small, load the whole dataset batch_size=-1) list(glue.keys()) glue['train'] info.features info.features['label'].names glue_train = glue['train'] for key, value in glue_train.items(): # print with f-string print(f"{key:9s}: {value}")The BERT tokenizer# Set up tokenizer to generate Tensorflow dataset tokenizer = bert.tokenization.FullTokenizer( vocab_file=os.path.join(gs_folder_bert, "vocab.txt"), do_lower_case=True) print("Vocab size:", len(tokenizer.vocab))The preprocessing modelbert_preprocess_model = hub.KerasLayer(tfhub_handle_preprocess) text_test = ['this is such an amazing movie!'] text_preprocessed = bert_preprocess_model(text_test) print(f'Keys : {list(text_preprocessed.keys())}') print(f'Shape : {text_preprocessed["input_word_ids"].shape}') print(f'Word Ids : {text_preprocessed["input_word_ids"][0, :12]}') print(f'Input Mask : {text_preprocessed["input_mask"][0, :12]}') print(f'Type Ids : {text_preprocessed["input_type_ids"][0, :12]}')Using the BERT modelbert_model = hub.KerasLayer(tfhub_handle_encoder) bert_results = bert_model(text_preprocessed) print(f'Loaded BERT: {tfhub_handle_encoder}') print(f'Pooled Outputs Shape:{bert_results["pooled_output"].shape}') print(f'Pooled Outputs Values:{bert_results["pooled_output"][0, :12]}') print(f'Sequence Outputs Shape:{bert_results["sequence_output"].shape}') print(f'Sequence Outputs Values:{bert_results["sequence_output"][0, :12]}')Define your modeldef build_classifier_model(): text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text') preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing') encoder_inputs = preprocessing_layer(text_input) encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder') outputs = encoder(encoder_inputs) net = outputs['pooled_output'] net = tf.keras.layers.Dropout(0.1)(net) net = tf.keras.layers.Dense(1, activation=None, name='classifier')(net) return tf.keras.Model(text_input, net) classifier_model = build_classifier_model() bert_raw_result = classifier_model(tf.constant(text_test)) print(tf.sigmoid(bert_raw_result)) tf.keras.utils.plot_model(classifier_model) for n in range(3): print("Original: ", example[n].numpy()) print("Round-trip: ", " ".join(vocab[encoded_example[n]])) print()Create the modelmodel = tf.keras.Sequential([ encoder, tf.keras.layers.Embedding( input_dim=len(encoder.get_vocabulary()), output_dim=64, # Use masking to handle the variable sequence lengths mask_zero=True), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1) ]) print([layer.supports_masking for layer in model.layers]) # predict on a sample text without padding. sample_text = ('The movie was cool. The animation and the graphics ' 'were out of this world. I would recommend this movie.') predictions = model.predict(np.array([sample_text])) print(predictions[0]) # predict on a sample text with padding padding = "the " * 2000 predictions = model.predict(np.array([sample_text, padding])) print(predictions[0]) model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(1e-4), metrics=['accuracy'])Train the modelhistory = model.fit(train_dataset, epochs=10, validation_data=test_dataset, validation_steps=30) test_loss, test_acc = model.evaluate(test_dataset) print('Test Loss: {}'.format(test_loss)) print('Test Accuracy: {}'.format(test_acc)) plt.figure(figsize=(16,8)) plt.subplot(1,2,1) plot_graphs(history, 'accuracy') plt.ylim(None,1) plt.subplot(1,2,2) plot_graphs(history, 'loss') plt.ylim(0,None) sample_text = ('The movie was cool. The animation and the graphics ' 'were out of this world. I would recommend this movie.') predictions = model.predict(np.array([sample_text]))Stack two or more LSTM layersmodel = tf.keras.Sequential([ encoder, tf.keras.layers.Embedding(len(encoder.get_vocabulary()), 64, mask_zero=True), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True)), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(1) ]) model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(1e-4), metrics=['accuracy']) history = model.fit(train_dataset, epochs=10, validation_data=test_dataset, validation_steps=30) test_loss, test_acc = model.evaluate(test_dataset) print('Test Loss: {}'.format(test_loss)) print('Test Accuracy: {}'.format(test_acc)) # predict on a sample text without padding. sample_text = ('The movie was not good. The animation and the graphics ' 'were terrible. I would not recommend this movie.') predictions = model.predict(np.array([sample_text])) print(predictions) plt.figure(figsize=(16,6)) plt.subplot(1,2,1) plot_graphs(history, 'accuracy') plt.subplot(1,2,2) plot_graphs(history, 'loss')Experian Marketing Services reported that the typical American spends a mean of 144 minutes (2.4 hours) per day accessing the Internet via a mobile device. (Source: The 2014 Digital Marketer, available at ex.pn/1kXJjfX.) In order to test the validity of this statement, you select a sample of 30 friends and family. The results for the time spent per day accessing the Internet via mobile device (in minutes) are stored in InternetMobileTime .a. Is there evidence that the population mean time spent per day accessing the Internet via mobile device is different from 144 minutes? Use the p-value approach and a level of significance of 0.05.b. What assumption about the population distribution is needed in order to conduct the t test in (a)?import numpy as np import pandas as pd mydata = pd.read_csv("InternetMobileTime .csv") mydata.head() mydata.describe() mydata.info() #Null Hypothesis - Mean=144 #Alternate Hypothesis - Mean != 144 xbar = mydata.mean() mu=144 s = mydata.std() n=30 tstat = (xbar - mu) / (s/np.sqrt(n)) tstat from scipy import stats p = stats.t.cdf(1.224674, df = 29) (1 - p)*2Standard process in data science!['CRISP_DM'](../reports/figures/CRISP_DM.png) Data Preparation* Data strcture must be clear and understandable* Visulize data into plots and graphs GitHub CSV data : First we will scrap data for confirmed cases country wise and will do it for limited number of countriesgit_repo = 'https://github.com/CSSEGISandData/COVID-19.git' git_clone = subprocess.Popen( "git clone " + git_repo , cwd = os.path.dirname( '../data/raw/' ), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE ) (out, error) = git_clone.communicate() print('out:', out) print('error:', error) # load data from csv file filepath = '../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' pd_raw_confirmed = pd.read_csv(filepath) pd_raw_confirmed.head()Filter raw datat_idx = pd_raw_confirmed.columns[4:] df_confirmed = pd.DataFrame({'date':t_idx}) df_confirmed.head() # get daily cases for one counrty e.g. Germany pd_raw_confirmed[pd_raw_confirmed['Country/Region']=='Germany'].iloc[:,4::].sum(axis=0)[-4:] # do same for multiple countries countries =['Italy', 'US', 'Spain', 'Germany', 'Russia' , 'India', 'Brazil'] for con in countries: df_confirmed[con]=np.array(pd_raw_confirmed[pd_raw_confirmed['Country/Region']==con].iloc[:,4::].sum(axis=0)) df_confirmed.tail() df_confirmed.set_index('date').plot() plt.xlabel('Date') plt.ylabel('Total cases') plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%.0f'))Datatype of datedf_confirmed.tail() # convert to datetime df_confirmed t_idx = [datetime.strptime(date,"%m/%d/%y") for date in df_confirmed.date] # convert back to date ISO norm (str) t_str = [each.strftime('%Y-%m-%d') for each in t_idx] # set back to DataFrame df_confirmed['date'] = t_idx # cross check type(df_confirmed['date'][0]) df_confirmed.to_csv('../data/processed/COVID_small_flat_table.csv',sep=';',index=False)Scrap recovered and currently infected cases and deathsdef store_JH_small_data(filepath, country_list): # load data from csv file df = pd.read_csv(filepath) t_idx = df.columns[4:] df_processed = pd.DataFrame({'date':t_idx}) for each in country_list: df_processed[each]=np.array(df[df['Country/Region']==each].iloc[:,4::].sum(axis=0)) t_idx = [datetime.strptime(date,"%m/%d/%y") for date in df_processed.date] df_processed['date'] = t_idx return df_processedRecoveredfilepath = '../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv' df_recovered = store_JH_small_data(filepath, countries) df_recovered.tail() df_recovered.to_csv('../data/processed/COVID_small_flat_table_recovered.csv',sep=';',index=False)Deathsfilepath = '../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv' df_deaths = store_JH_small_data(filepath, countries) df_deaths.tail() df_deaths.to_csv('../data/processed/COVID_small_flat_table_deaths.csv',sep=';',index=False)Infecteddf_infected = pd.DataFrame() df_infected['date'] = t_idx df_infected = pd.concat([df_infected, df_confirmed.iloc[:, 1::] - df_recovered.iloc[:, 1::] - df_deaths.iloc[:, 1::]], axis=1) df_infected.to_csv('../data/processed/COVID_small_flat_table_infected.csv',sep=';',index=False)Relational data model - defining a primary keyA primary key’s main features are:* It must contain a unique value for each row of data.* It cannot contain NaN values.data_path = '../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' pd_raw = pd.read_csv(data_path) pd_raw.head() # adjust column name pd_data_base = pd_raw.rename(columns = {'Country/Region':'country', 'Province/State':'state'}) pd_data_base['state'] = pd_data_base['state'].fillna('no') # drop unnecessary columns pd_data_base = pd_data_base.drop(['Lat','Long'],axis=1) pd_data_base.head() pd_relational=pd_data_base.set_index(['state','country']).T.stack(level=[0,1]).reset_index().rename(columns={'level_0': 'date', 0:'confirmed' }) pd_relational.head() pd_relational.dtypes # chnage datatype of date pd_relational['date'] = pd_relational.date.astype('datetime64[ns]') pd_relational['confirmed'] = pd_relational.confirmed.astype(int) pd_relational.dtypes pd_relational[pd_relational['country']=='US'].tail() pd_relational.to_csv('../data/processed/COVID_relational_confirmed.csv',sep=';',index=False)Rational data model for US region from datasetdata_path='../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv' pd_raw_US=pd.read_csv(data_path) pd_raw_US.head() # remove unwated columns and chnage column names pd_raw_US=pd_raw_US.drop(['UID', 'iso2', 'iso3', 'code3', 'Country_Region','FIPS', 'Admin2', 'Lat', 'Long_', 'Combined_Key'],axis=1) pd_data_base_US=pd_raw_US.rename(columns={'Province_State':'state'}).copy() # stack data in rational form pd_relational_US=pd_data_base_US.set_index(['state']).T.stack().reset_index() \ .rename(columns={'level_0':'date', 0:'confirmed'}) # convert to datetime pd_relational_US['country']='US' pd_relational_US['date']=[datetime.strptime( each,"%m/%d/%y") for each in pd_relational_US.date] pd_relational_US.head() # merge US data into main rational DataFrame pd_relational_model_all=pd_relational[pd_relational['country']!='US'].reset_index(drop=True) pd_relational_model_all=pd.concat([pd_relational_model_all,pd_relational_US],ignore_index=True) pd_relational_model_all[pd_relational_model_all['country']=='US'].tail() # export data to csv pd_relational_model_all.to_csv('../data/processed/20200730_COVID_relational_confirmed.csv',sep=';',index=False)SuperResolution - Syft Duet - Data Owner 🎸Contributed by [@Koukyosyumei](https://github.com/Koukyosyumei)This example trains a SuperResolution network on the BSD300 dataset with Syft.This notebook is mainly based on the original pytorch [example](https://github.com/OpenMined/PySyft/tree/dev/examples/duet/super_resolution/original). PART 1: Launch a Duet Server and ConnectAs a Data Owner, you want to allow someone else to perform data science on data that you own and likely want to protect.In order to do this, we must load our data into a locally running server within this notebook. We call this server a "Duet".To begin, you must launch Duet and help your Duet "partner" (a Data Scientist) connect to this server.You do this by running the code below and sending the code snippet containing your unique Server ID to your partner and following the instructions it gives!import syft as sy duet = sy.launch_duet(loopback=True) sy.logger.add(sink="./syft_do.log") from os import listdir from os import makedirs, remove from os.path import exists, join, basename from six.moves import urllib import tarfile from PIL import Image import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as data from torch.utils.data import DataLoader from torchvision.transforms import Compose, CenterCrop, ToTensor, ResizeAdd handler# handler with no tags accepts everything. Better handlers coming soon. duet.requests.add_handler(action="accept")Set params and functionsTo train the model, you have to send the data to data scientists with duet. Thus, you have to convert the data to torch.array.config = {"upscale_factor": 2, "threads":4, "batchSize":1, "testBatchSize":10} def is_image_file(filename): return any(filename.lower().endswith(extension) for extension in [".png", ".jpg", ".jpeg"]) def load_img(filepath): img = Image.open(filepath).convert('YCbCr') y, _, _ = img.split() return y class Prepare_DataSet: def __init__(self, image_dir, input_transform=None, target_transform=None): self.image_filenames = [join(image_dir, x) for x in listdir(image_dir) if is_image_file(x)] self.input_transform = input_transform self.target_transform = target_transform def __getitem__(self): inputs = [] targets = [] for path in self.image_filenames: input = load_img(path) target = input.copy() if self.input_transform: input = self.input_transform(input) if self.target_transform: target = self.target_transform(target) inputs.append(input) targets.append(target) return inputs, targets def __len__(self): return len(self.image_filenames) def download_bsd300(dest="dataset"): output_image_dir = join(dest, "BSDS300/images") if not exists(output_image_dir): makedirs(dest) url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz" print("downloading url ", url) data = urllib.request.urlopen(url) file_path = join(dest, basename(url)) with open(file_path, 'wb') as f: f.write(data.read()) print("Extracting data") with tarfile.open(file_path) as tar: for item in tar: tar.extract(item, dest) remove(file_path) return output_image_dir def calculate_valid_crop_size(crop_size, upscale_factor): return crop_size - (crop_size % upscale_factor) def input_transform(crop_size, upscale_factor): return Compose([ CenterCrop(crop_size), Resize(crop_size // upscale_factor), ToTensor(), ]) def target_transform(crop_size): return Compose([ CenterCrop(crop_size), ToTensor(), ]) def get_training_set(upscale_factor): root_dir = download_bsd300() train_dir = join(root_dir, "train") crop_size = calculate_valid_crop_size(256, upscale_factor) return Prepare_DataSet(train_dir, input_transform=input_transform(crop_size, upscale_factor), target_transform=target_transform(crop_size)) def get_test_set(upscale_factor): root_dir = download_bsd300() test_dir = join(root_dir, "test") crop_size = calculate_valid_crop_size(256, upscale_factor) return Prepare_DataSet(test_dir, input_transform=input_transform(crop_size, upscale_factor), target_transform=target_transform(crop_size))Load Datatrain_set = get_training_set(config["upscale_factor"]) test_set = get_test_set(config["upscale_factor"]) X_train, y_train = train_set.__getitem__() X_train = torch.cat(X_train) y_train = torch.cat(y_train)Send Data and its sizeX_train.tag("X_train") X_train.send(duet, searchable=True) y_train.tag("y_train") y_train.send(duet, searchable=True) train_num = sy.lib.python.Int(X_train.shape[0]) train_num.tag("train_num") train_num.send(duet, searchable=True)Checkpoint 1 : Now STOP and run the Data Scientist notebook until the same checkpoint.duet.store.pandas duet.requests.pandasStarted with chapter 10. Some references were there in chapter 14 **Perceptron**![perceptron](images/prashant/perceptron.png)import numpy as np from sklearn.datasets import load_iris from sklearn.linear_model import Perceptron iris = load_iris() # selected column 2 & 3 X = iris.data[:, (2, 3)] # petal length, petal width y = (iris.target == 0).astype(np.int) # Iris setosa? per_clf = Perceptron() per_clf.fit(X, y) y_pred = per_clf.predict([[2, 0.5]]) print(per_clf.predict([[4, 2.5]])) iris.target iris.data[:,0:3]The most common step function used in Perceptrons is the Heaviside step function. Sometimes sign function is used![Heaviside function](images/prashant/heaviside_sgn_function.png) It is based on a slightly different artificial neuron (see Figure 10-4) called a **threshold logic unit (TLU)**, or sometimes a linear threshold unit (LTU). The inputs and output are numbers (instead of binary on/off values), and each input connection is associated with a weight ![MLP XOR function](images/prashant/MLP_XOR.png) BackpropagationAutomatically computing gradients is called automatic differentiation, or autodiff. The one used by backpropagation is called reverse-mode autodiff. It is fast and precise, and is well suited when the function to differentiate has many variables (e.g., connection weights) and few outputs (e.g., one loss) Autodiff Automatically differentiating- Forward auto diff -> needs more passes per parameter- Reverse auto diff -> needs less passes if we have less output **Forward passes**:Computing actual derivative at each node**Reverse auto diff**: Computing derivative on each node; as output of currect node depends on previous node output; interpret derivative of current node in terms of previous node (use chain rule![Reverse Auto Diff](./images/prashant/reverse_auto_diff.png)from tensorflow import keras fashion_mnist = keras.datasets.fashion_mnist (X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data() print(X_train_full.shape) print(X_train_full.dtype) X_valid, X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0 y_valid, y_train = y_train_full[:5000], y_train_full[5000:] class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] class_names[y_train[0]] y_train[0]Create model using sequential APImodel = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) model.add(keras.layers.Dense(300, activation="relu")) model.add(keras.layers.Dense(100, activation="relu")) model.add(keras.layers.Dense(10, activation="softmax")) model.summary() model.layers hidden1 = model.layers[1] hidden1.name model.get_layer('dense') is hidden1 weights, biases = hidden1.get_weights() weights.shape biases biases.shape model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"]) history = model.fit(X_train, y_train, epochs=30, validation_data=(X_valid, y_valid)) import pandas as pd import matplotlib.pyplot as plt pd.DataFrame(history.history).plot(figsize=(8, 5)) plt.grid(True) plt.gca().set_ylim(0, 1) # set the vertical range to [0-1] plt.show() model.evaluate(X_test, y_test) X_new = X_test[:3] y_proba = model.predict(X_new) y_proba.round(2) y_pred = model.predict_classes(X_new) y_pred np.array(class_names)[y_pred] y_new = y_test[:3] y_new from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler housing = fetch_california_housing() X_train_full, X_test, y_train_full, y_test = train_test_split( housing.data, housing.target) X_train, X_valid, y_train, y_valid = train_test_split( X_train_full, y_train_full) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_valid = scaler.transform(X_valid) X_test = scaler.transform(X_test) model = keras.models.Sequential([ keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]), keras.layers.Dense(1) ]) model.compile(loss="mean_squared_error", optimizer="sgd") history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid)) mse_test = model.evaluate(X_test, y_test) X_new = X_test[:3] # pretend these are new instances y_pred = model.predict(X_new) input_ = keras.layers.Input(shape=X_train.shape[1:]) hidden1 = keras.layers.Dense(30, activation="relu")(input_) hidden2 = keras.layers.Dense(30, activation="relu")(hidden1) concat = keras.layers.Concatenate()([input_, hidden2]) output = keras.layers.Dense(1)(concat) model = keras.Model(inputs=[input_], outputs=[output])Prepared by Data Visualization With Plotly (Part - 1)import numpy as np import pandas as pd import plotly.graph_objects as go import plotly.offline as po from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import matplotlib.pyplot as plt import dash import plotly.express as px import random import plotly.figure_factory as ffLoading Datasetspokemon = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/pokemon_updated.csv") pokemon.head(10) stdperf = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/studentp.csv") stdperf.head(10) corona = pd.read_csv('C:/Users/DELL/Documents/GitHub/Public/COVID-19/covid/data/countries-aggregated.csv' , index_col='Date' , parse_dates=True) corona.head(10) spotify = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/spotify.csv" , index_col="Date") spotify.head(10) housing = pd.read_csv('C:/Users/DELL/Documents/GitHub/Data-Visualization/housing.csv') housing.tail() insurance = pd.read_csv('C:/Users/DELL/Documents/GitHub/Data-Visualization/insurance.csv') insurance.head(10) employment = pd.read_excel("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/unemployment.xlsx") employment.head(10) helpdesk = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/helpdesk.csv") helpdesk.head(10) fish= pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/Fish.csv") fish.head(10) exercise = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/exercise.csv") exercise.head(10) suicide = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/suicide.csv") suicide.head(10) canada = pd.read_csv("C:/Users/DELL/Documents/GitHub/Public/Data-Visualization/Plotly/Datasets/canada.csv") canada.head() canada.columns canada.drop(columns=['AREA' , 'DEV', 'DevName' , 'REG', 'Type', 'Coverage' , 'AreaName', 'RegName' ], inplace=True) canada.head() canada.rename(columns={'OdName':'Country'} , inplace=True) canada.set_index(canada.Country,inplace=True) canada.head() canada2 = canada.copy() canada2.head() canada.index.name=None canada.head() del canada['Country'] canada.head() canada = canada.transpose() canada.head()Sunburst Chartinsurance = pd.read_csv('C:/Users/DELL/Documents/GitHub/Data-Visualization/insurance.csv') insurance.head(10) # Simple Sunburst Chart fig = px.sunburst(insurance, path=['region', 'smoker' , 'sex'], values='charges') fig.update_layout (height = 800 , width = 800) fig.show() fig = px.sunburst(insurance, path=['region', 'smoker' , 'sex'], values='charges' , color= 'charges') fig.update_layout (height = 800 , width = 800) fig.show() # Using inbuilt color scales in Sunburst Chart fig = px.sunburst( insurance, path=['region', 'smoker' , 'sex'], values='charges' , color= 'charges' , color_continuous_scale=px.colors.sequential.Aggrnyl ) fig.update_layout (height = 800 , width = 800) fig.show() # Using color scales in Sunburst Chart fig = px.sunburst( insurance, path=['region', 'smoker' , 'sex'], values='charges' , color= 'charges' , color_continuous_scale=["#8BC34A", "#FF6F00"] #Explicitly Constructing a Color Sequence ) fig.update_layout (height = 800 , width = 800) fig.show() # Using color scales in Sunburst Chart fig = px.sunburst( insurance, path=['region', 'smoker' , 'sex'], values='charges' , color= 'charges' , color_continuous_scale=["#689F38","#F9A825", "#FF6F00"] #Explicitly Constructing a Color Sequence ) #Controlling text fontsize with uniformtext fig.update_layout (height = 800 , width = 800 , uniformtext=dict(minsize=14, mode='hide')) fig.show() suicide.head(10) fig = px.sunburst(suicide, path=['sex', 'age' , 'generation'], values='suicides_no') fig.update_layout (height = 900 , width = 900) fig.show() fig = px.sunburst( suicide, path=['sex', 'age' , 'generation'], values='suicides_no' , color= 'suicides_no' , color_continuous_scale=["#8BC34A","#FF6F00"] ) fig.update_layout (height = 900 , width = 900) fig.show()FEATURE EXTRACTIONimport pandas as pd from textblob import TextBlob from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer d = pd.read_csv("Processed_tweets.csv") d = d.drop(["Unnamed: 0"],axis=1) d.head(10) d.drop_duplicates(inplace=True) d.isna().sum() d.dropna(inplace=True) d.info() n = len(d) d["Sentiment"]=[None for i in range(n)] for i in range(n): s = TextBlob(d["Text"].iloc[i]).sentiment if(s[0]>=0): d["Sentiment"].iloc[i] = 0 else: d["Sentiment"].iloc[i] = 1 len(d[d["Sentiment"]==0]), len(d[d["Sentiment"]==1]) no = len(d[d["Sentiment"]==1]) t = d[d["Sentiment"]==0][:no] s = d[d["Sentiment"]==1] df = pd.concat([s,t],ignore_index="True") len(df)MODEL DEVELOPMENT AND EVALUATION Splitting into Train and Test Datafrom sklearn.model_selection import train_test_split x= df["Text"] y = df["Sentiment"].astype("int") x_train,x_test, y_train, y_test = train_test_split(x,y,test_size=0.2,random_state=42) cv = CountVectorizer(ngram_range=(1,3)) tf = TfidfVectorizer(ngram_range=(1,3)) x1 = cv.fit_transform(x_train) x2 = tf.fit_transform(x_train) from sklearn import metrics accuracy = {} model = {} vectorizer = {"CountVectorizer":cv,"TfidfVectorizer":tf}Decision Tree Classifier CountVectorizerfrom sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(x1,y_train) y_pred1 = dtc.predict(cv.transform(x_test)) k = "Decision Tree Classifier with CountVectorizer" model[k]=dtc a1 = metrics.accuracy_score(y_test, y_pred1) recall = metrics.recall_score(y_test, y_pred1) accuracy[k] = a1 print(k) print("Accuracy: {0:.4f}".format(a1)) print("Recall : {0:.4f}".format(recall))Decision Tree Classifier with CountVectorizer Accuracy: 0.9008 Recall : 0.8942TfidfVectorizerdtc = DecisionTreeClassifier() dtc.fit(x2,y_train) y_pred2 = dtc.predict(tf.transform(x_test)) k = "Decision Tree Classifier with TfidfVectorizer" model[k]=dtc a2 = metrics.accuracy_score(y_test, y_pred2) recall = metrics.recall_score(y_test, y_pred2) accuracy[k] = a2 print(k) print("Accuracy: {0:.4f}".format(a2)) print("Recall : {0:.4f}".format(recall))Decision Tree Classifier with TfidfVectorizer Accuracy: 0.8963 Recall : 0.8963Random Forest Classifier CountVectorizerfrom sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() rfc.fit(x1,y_train) y_pred3 = rfc.predict(cv.transform(x_test)) k = "Random Forest Classifier with CountVectorizer" model[k]=rfc a3 = metrics.accuracy_score(y_test, y_pred3) recall = metrics.recall_score(y_test, y_pred3) accuracy[k] = a3 print(k) print("Accuracy: {0:.4f}".format(a3)) print("Recall : {0:.4f}".format(recall))Random Forest Classifier with CountVectorizer Accuracy: 0.8579 Recall : 0.7754TfidfVectorizerrfc = RandomForestClassifier() rfc.fit(x2,y_train) y_pred4 = rfc.predict(tf.transform(x_test)) k = "Random Forest Classifier with TfidfVectorizer" model[k]=rfc a4 = metrics.accuracy_score(y_test, y_pred4) recall = metrics.recall_score(y_test, y_pred4) accuracy[k] = a4 print(k) print("Accuracy: {0:.4f}".format(a4)) print("Recall : {0:.4f}".format(recall)) sorted(accuracy) ad = pd.DataFrame({"Accuracy":accuracy}) adScorescore = regressor.score(X,y) scoreplotarimport matplotlib.pyplot as plt plt.scatter(X,y) plt.title('Redes Neurais') plt.xlabel('Idade') plt.ylabel('Custo plano') plt.plot(X,regressor.predict(X),color='red') print(scaler_y.inverse_transform(regressor.predict(X)))[ 247.55311319 378.11485371 497.26494608 702.74098707 963.65460287 1602.87772593 2851.57866443 4471.06453626 6086.01710924 7694.58675711]Working with Arrays 1. Run the following cells:import numpy as np array_1D = np.array([10,11,12,13, 14]) array_1D array_2D = np.array([[20,30,40,50,60], [43,54,65,76,87], [11,22,33,44,55]]) array_2D array_3D = np.array([[[1,2,3,4,5], [11,21,31,41,51]], [[11,12,13,14,15], [51,52,53,54,5]]]) array_3D2. Slice the first column of the 2-D array.array_2D[:,0]3. Slice the last column of the 2-D arrayarray_2D[:,-1]4. Slice the second row of the 2-D arrayarray_2D[1,:] #array_2D[1]5. Slice the last two columns of the 2-nd row of the 2-D arrayarray_2D[1,-2:]6. Slice the 2-nd row of the 2-D array excluding the last two columnsarray_2D[1,:-2]7. Slice everything excluding the first row and last column of the 2-D arrayarray_2D[1:,:-1]8. Slice the 1st, 3rd and 5th columns of the 2-D array (Hint: Take advantage of the [start:stop:step] suntax of indices)array_2D[:,0::2]9. Slice the first columns of both matrices in the 3-D array (Hint: The syntax for the indices of the 3-D array is the following: [subarray, row, column])array_3D[:,:,0]10. Slice every other column of both matrices in the 3-D arrayarray_3D[:,:,::2]11. Use conditional slicing to check if the individual elements of each array satisfy a given condition (e.g. greater than 10)array_1D > 10 array_2D > 10 array_3D > 1012. Use conditional slicing to display which individual elements of each array satisfy this conditionarray_1D[array_1D > 10] array_2D[array_2D > 10] array_3D[array_3D > 10]13. Add a second condition and disaply which individual elements satisfy both (e.g. greater than 10 and odd) (Hint: Odd numbers leave a remainder of 1, when dividing by 2. Hence, we can use "%" to express this condition.)array_1D[((array_1D > 10) & (array_1D % 2 == 1))] array_2D[((array_2D > 10) & (array_2D % 2 == 1))] array_3D[((array_3D > 10) & (array_3D % 2 == 1))]13. Loosen up the requirements, so that either condition works (Hint: We just need to go from condition_1 and condition_2 to condition_1 or condition_2)array_1D[((array_1D > 10) | (array_1D % 2 == 1))] array_2D[((array_2D > 10) | (array_2D % 2 == 1))] array_3D[((array_3D > 10) | (array_3D % 2 == 1))]14. Call the first row of the first 2-D array in the 3-D array in 3 different ways: A) Use precise indices for both dimensions. B) Use a precise index for one dimension and a slice for the second dimension. C) Uses slices (from the origin, up to the second value) for both dimensions.array_3D[0,0] array_3D[:1,0] array_3D[:1,:1]15. Check the shapes of the 3 different notations calling the same valuesarray_3D[0,0].shape array_3D[:1,0].shape array_3D[:1,:1].shape16. Since the last two outputs contain excess dimensions, use the squeeze function to reduce themnp.squeeze(array_3D[:1,0]) #array_3D[:1,0].squeeze() np.squeeze(array_3D[:1,:1]) #array_3D[:1,:1].squeeze()17. Now check the shapes of the reduced outputsnp.squeeze(array_3D[:1,0]).shape np.squeeze(array_3D[:1,:1]).shapeData Mining and Machine Learning February 2021 Mean/median/mode and k-nn imputation Datasets: Titanic, Breastw y Segmentimport pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoderEjemplo1. Leyendo los datos de Titanictitanic=pd.read_csv('https://raw.githubusercontent.com/eacunafer/DataAnalysiswithPython3/master/Datasets/titanic.csv',header=0,sep=',',na_values='') titanic.info() titanic.head(39) RangeIndex: 1309 entries, 0 to 1308 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 PassengerId 1309 non-null int64 1 Pclass 1309 non-null int64 2 Name 1309 non-null object 3 Sex 1309 non-null object 4 Age 1046 non-null float64 5 SibSp 1309 non-null int64 6 Parch 1309 non-null int64 7 Ticket 1309 non-null object 8 Fare 1308 non-null float64 9 Cabin 295 non-null object 10 Embarked 1307 non-null object 11 Survived 1309 non-null int64 dtypes: float64(2), int64(5), object(5) memory usage: 122.8+ KBDeleting columns: PassengerID, Name and cabin(it has a lot of missing values)titanic1=titanic.iloc[:,[1,3,4,5,6,8,10]] titanic1.info() RangeIndex: 1309 entries, 0 to 1308 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Pclass 1309 non-null int64 1 Sex 1309 non-null object 2 Age 1046 non-null float64 3 SibSp 1309 non-null int64 4 Parch 1309 non-null int64 5 Fare 1308 non-null float64 6 Embarked 1307 non-null object dtypes: float64(2), int64(3), object(2) memory usage: 71.7+ KBHay missings en las variables Age, Fare que son continuas y en Embarked que es categorica#computing the values to be used in the imputation t3=titanic1['Age'].mean() t6=titanic1['Fare'].mean() t7=titanic1['Embarked'].mode().iloc[0] #Performing the imputacion values = {'Age': t3, 'Fare': t6, 'Embarked': t7} titanic2=titanic1.fillna(value=values) titanic2.info() RangeIndex: 1309 entries, 0 to 1308 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Pclass 1309 non-null int64 1 Sex 1309 non-null object 2 Age 1309 non-null float64 3 SibSp 1309 non-null int64 4 Parch 1309 non-null int64 5 Fare 1309 non-null float64 6 Embarked 1309 non-null object dtypes: float64(2), int64(3), object(2) memory usage: 71.7+ KBImputando los missing values por la media si el atributo es continuo y por la moda si el atributo es nominal#Imputando los missing values por la media si el atributo es continuo #y por la moda si el atributo es nominal from sklearn.impute import SimpleImputer from sklearn.base import TransformerMixin class DataFrameImputer(TransformerMixin): def fit(self, X, y=None): self.fill = pd.Series([X[c].value_counts().index[0] if X[c].dtype == np.dtype('O') else X[c].mean() for c in X], index=X.columns) return self def transform(self, X, y=None): return X.fillna(self.fill) X=titanic1 xt = DataFrameImputer().fit_transform(X) xt.info() #Imputando los missing values por la mediana si el atributo es continuo #y por la moda si el atributo es nominal class DataFrameImputer(TransformerMixin): def fit(self, X, y=None): self.fill = pd.Series([X[c].value_counts().index[0] if X[c].dtype == np.dtype('O') else X[c].median() for c in X], index=X.columns) return self def transform(self, X, y=None): return X.fillna(self.fill) X=titanic1 xt = DataFrameImputer().fit_transform(X) xt.info() RangeIndex: 1309 entries, 0 to 1308 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Pclass 1309 non-null int64 1 Sex 1309 non-null object 2 Age 1309 non-null float64 3 SibSp 1309 non-null int64 4 Parch 1309 non-null int64 5 Fare 1309 non-null float64 6 Embarked 1309 non-null object dtypes: float64(2), int64(3), object(2) memory usage: 71.7+ KBIMPUTACION k-nnimport numpy as np from sklearn.impute import KNNImputer X = [[4, 5, 6,np.nan], [5, 1, 1,4], [7, 9,2, 5], [8, 2,8, 5],[6,4,2,6]] #imputer = KNNImputer(n_neighbors=3) #imputer.fit_transform(X) KNNImputer(n_neighbors=3).fit_transform(X)Ejemplo 2. k-nn imputation applied to the Breast-wisconsin dataset (todas las features son continuas)#Reading the data from the UCI breastdf=pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data",header=None, sep=",",na_values=['?']) #breastdf=pd.read_csv("c://PW-PR/breast-cancer-wisconsin.data",header=None, sep=",",na_values=['?']) breastdf.columns=['idx','v1','v2','v3','v4','v5','v6','v7','v8','v9','class'] breastdf.info() breastdf.head() breast1=breastdf.iloc[:,1:11] breast1.head() b1=np.array(breast1) imputer = KNNImputer(n_neighbors=5) data_imp=imputer.fit_transform(b1) completo5=pd.DataFrame(data_imp) completo5.info() RangeIndex: 699 entries, 0 to 698 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 0 699 non-null float64 1 1 699 non-null float64 2 2 699 non-null float64 3 3 699 non-null float64 4 4 699 non-null float64 5 5 699 non-null float64 6 6 699 non-null float64 7 7 699 non-null float64 8 8 699 non-null float64 9 9 699 non-null float64 dtypes: float64(10) memory usage: 54.7 KBEjemplo 3. Imputacion k-nn a Titanic. Hay que convertir las variables categoticas a numericas porque knn-imputacion solo esta implementada para variables numericas Convirtiendo las variables nominales en numericas, debido a que la imputacion knn de python no trabaja con variables mezcladas Debe hacerse con cuidado porque el atributo categorico contiene missing value y el LabelEncoder puede asignarle un valor en lugar de ignorarlo. LabelEncoder codifica los valores categoricas en numero enteros desde 0 hasta el numero de categorias-1. Similar resultado se puede conseguir con cat.codes de Pandas#Guardando las posiciones donde estan los missings titanic3=titanic1.copy() mask = titanic3.isnull() print(mask) #Convirtiendo a numero los atributos categoricos titanic1=titanic1.astype(str).apply(LabelEncoder().fit_transform) #Sustituyendo con NaN las posiciones donde los atributos categoricos tienen missings titanic4=titanic1.where(~mask, titanic3).copy() titanic4.tail(7) titanic4.info() #Convirtiendo en Float las variables categoricas donde ahi missings titanic4['Age'] = titanic4['Age'].astype('float64') titanic4['Fare'] = titanic4['Fare'].astype('float64') titanic4['Embarked'] = titanic4['Embarked'].astype('float64') b2=np.array(titanic4) imputer = KNNImputer(n_neighbors=5) titanic_knnimp=imputer.fit_transform(b2) completo=pd.DataFrame(titanic_knnimp) completo.info() RangeIndex: 1309 entries, 0 to 1308 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 0 1309 non-null float64 1 1 1309 non-null float64 2 2 1309 non-null float64 3 3 1309 non-null float64 4 4 1309 non-null float64 5 5 1309 non-null float64 6 6 1309 non-null float64 dtypes: float64(7) memory usage: 71.7 KBExample 4. Simulating missing values and imputing them. Dataset: segment from the UCIThis dataset is an image segmentation database similar to a database already present in the repository (Image segmentation database) but in a slightly different form. The instances were drawn randomly from a database of 7 outdoor images. The images were handsegmented to create a classification for every pixel. Each instance is a 3x3 region.seg=pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/statlog/segment/segment.dat',header=None, delim_whitespace=True) #seg=seg=pd.read_table('c://PW-PR/segment.dat',header=None, delim_whitespace=True) seg.columns=['v1','v2','v3','v4','v5','v6','v7','v8','v9','v10','v11','v12','v13','v14','v15','v16','v17','v18','v19','class'] seg.info() RangeIndex: 2310 entries, 0 to 2309 Data columns (total 20 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 v1 2310 non-null float64 1 v2 2310 non-null float64 2 v3 2310 non-null int64 3 v4 2310 non-null float64 4 v5 2310 non-null float64 5 v6 2310 non-null float64 6 v7 2310 non-null float64 7 v8 2310 non-null float64 8 v9 2310 non-null float64 9 v10 2310 non-null float64 10 v11 2310 non-null float64 11 v12 2310 non-null float64 12 v13 2310 non-null float64 13 v14 2310 non-null float64 14 v15 2310 non-null float64 15 v16 2310 non-null float64 16 v17 2310 non-null float64 17 v18 2310 non-null float64 18 v19 2310 non-null float64 19 class 2310 non-null int64 dtypes: float64(18), int64(2) memory usage: 361.1 KBTodos las variables de segment son cuantitativas.#simulando un 5% de celdas missing import random seg5=seg.copy() seg5= seg5.stack().sample(frac=0.95).unstack().reindex(index=seg5.index, columns=seg5.columns) seg5.head() seg5.info() #imputando por la media seg5.mean=seg5.apply(lambda x: x.fillna(x.mean()),axis=0) #Imputando por la mediana seg5.median=seg5.apply(lambda x: x.fillna(x.median()),axis=0) b1=np.array(seg5) imputer = KNNImputer(n_neighbors=5) data_nimp=imputer.fit_transform(b1) completoknn5=pd.DataFrame(data_nimp) completoknn5.info() from sklearn.metrics import mean_squared_error #Calculando el cuadrado medio del error para imputacion por la media completo5mean=pd.DataFrame(seg5.mean) mean_squared_error(seg,completo5mean) #Calculando el cuadrado medio del error para imputacion por la mediana completo5median=pd.DataFrame(seg5.median) mean_squared_error(seg,completo5median) #Calculando el cuadrado medio del error para imputacion knn mean_squared_error(seg,completoknn5)Notice that the best imputation method was k-nn#simulando un 20% de celdas missing seg20=seg.copy() seg20= seg20.stack().sample(frac=0.80).unstack().reindex(index=seg20.index, columns=seg20.columns) seg20.info() #hallando el numero de filas que tienen missings sum(seg20.apply(lambda x: sum(x.isnull().values), axis = 1)>0) #imputando por la media al 20% seg20.mean=seg20.apply(lambda x: x.fillna(x.mean()),axis=0) completo20mean=pd.DataFrame(seg20.mean) mean_squared_error(seg,completo20mean) #imputando por la mediana al 20% seg20.median=seg20.apply(lambda x: x.fillna(x.median()),axis=0) completo20median=pd.DataFrame(seg20.median) mean_squared_error(seg,completo20median) b2=np.array(seg20) imputer = KNNImputer(n_neighbors=5) data_imp=imputer.fit_transform(b2) completoknn20=pd.DataFrame(data_imp) mean_squared_error(seg,completoknn20)What you will lean- what is a for loop- how to iterate though lists using for loops What is a for loopA for loop will iterate over a spesific section of code a number of timesIt is best seen in action Let's use a for loop to count to the number 10for i in range(10): print(i)0 1 2 3 4 5 6 7 8 9Note- Unlike matlab Python (and almost every other programming language) starts counting at 0 - This is because comuters start addressing memory at address 0 (to start at 1 would waist space)- The last number in the range is exclusivefor i in range(10, 20): print(i)10 11 12 13 14 15 16 17 18 19Note- We can spesify the range we are wanting to loop for- In this case starting at 10 (inclusively) and ending at 20 (exclusively) Using range we can also incrase by counts other then 1for i in range(0, 50, 10): print(i)0 10 20 30 40Looping though a listOne of the most common things we need to do is loop though a list. Right off the bat there is a good way to do this, and a bad way. - This is probably what you do in Matlabfruits = ["Apples", "Bananas", "Oranges", "Pineapples"] for i in range(len(fruits)): print(fruits[i])Apples Bananas Oranges PineapplesWhile it works this is bad form in Python. Why? Becuase the Python language is not being used to its fullest extentfruits = ["Apples", "Bananas", "Oranges", "Pineapples"] for fruit in fruits: print(fruit)Apples Bananas Oranges PineapplesNote- This is easier to read - NEVER underestimate the importane of readable code. Think of how bad it sucks to read though a terribly written essay. Now image reading though a terribly written essay written in a language built for computers doing complex logic Common Things With Lists EnumerateSometimes we need an index number while still iterating over a list. We can use enumerate to do thisfruits = ["Apples", "Bananas", "Oranges", "Pineapples"] for i, fruit in enumerate(fruits): print(f"{fruit} is at the {i} index")Apples is at the 0 index Bananas is at the 1 index Oranges is at the 2 index Pineapples is at the 3 indexNote- we take in the varable i followed by a comma then the variable fruit- If we reversed these then it would print "0 is at the Apples index" and so forth ZipWhat if we need to iterate over two lists at the same time? Well we could use enuerate and then get the index of the other list ... BUT there is a better way. Zipfruits = ["Apples", "Bananas", "Oranges", "Pineapples"] cars = ["Truck", "Jeep", "Sports Car", "SUV", "Sudan", "Hatch-Back", "Cross-Over"] for fruit, car in zip(fruits, cars): print(f"{fruit} is a type of fruit") print(f"{car} is a type of car\n")Apples is a type of fruit Truck is a type of car Bananas is a type of fruit Jeep is a type of car Oranges is a type of fruit Sports Car is a type of car Pineapples is a type of fruit SUV is a type of carNote- There are more items in the list 'cars' then in the list 'fruits'- zip will stop iterating when all items have been iterated though in the smallest list- Zip can unzip any number of list- Notice that fruit, car is in the same order as fruits, cars Looping though a dictionaryThere are a few diffrent ways we can loop though a dictionary. We'll go though all of them .keys()We can create a list of all the keys in a dictionary.books = { 'H': 'JK Rolling', 'The Kingkiller Chronicles': '', 'Game of Thrones': '', 'The Lord of the Rings': ''} for key in books.keys(): print(key) The Kingkiller Chronicles Game of Thrones The Lord of the RingsSome backaround theorybooks.keys() is not actally a list. It is a 'dict_keys' listbooks = { '': 'JK Rolling', 'The Kingkiller Chronicles': '', 'Game of Thrones': '', 'The Lord of the Rings': ''} bookKeys = books.keys() print(type(bookKeys)) print(bookKeys) dict_keys(['', 'The Kingkiller Chronicles', 'Game of Thrones', 'The Lord of the Rings'])So how can we iterate over bookKeys? Underneath the hood of varables we can see some information about then using dir()books = { '': 'JK Rolling', 'The Kingkiller Chronicles': '', 'Game of Thrones': '', 'The Lord of the Rings': ''} bookKeys = books.keys() print(dir(bookKeys))['__and__', '__class__', '__contains__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__ne__', '__new__', '__or__', '__rand__', '__reduce__', '__reduce_ex__', '__repr__', '__ror__', '__rsub__', '__rxor__', '__setattr__', '__sizeof__', '__str__', '__sub__', '__subclasshook__', '__xor__', 'isdisjoint']There is a lot of mumbo-jumbo in here you don't need to worry about. BUT notice that there is something called "_iter_",_iter_ tells python how to move to the next "thing". _iter_ has a function called __next__() that tells Python how to move to the next "thing"This is how the for loop knows how to move though the loopYou cannot loop though an iterable object backwards becuase there is no __back__(), only a __next__() call# define a list my_list = [4, 7, 0, 3] # get an iterator using iter() my_iter = iter(my_list) ## iterate through it using next() #prints 4 print(my_iter.__next__()) #prints 7 print(my_iter.__next__()) ## next(obj) is same as obj.__next__() #prints 0 print(my_iter.__next__()) #prints 3 print(my_iter.__next__()) ## This will raise error, no items left next(my_iter)4 7 0 3Note- can can dir() any varable in python- This can give you some insights on useageHere is an example on a listfruits = ["Apples", "Bananas", "Oranges", "Pineapples"] print(dir(fruits))['__add__', '__class__', '__contains__', '__delattr__', '__delitem__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__', '__setattr__', '__setitem__', '__sizeof__', '__str__', '__subclasshook__', 'append', 'clear', 'copy', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort']Note- Notice how "append", "copy", "pop", etc are listed here? ... Back to looping Looping by valuesbooks = { '': '', 'The Kingkiller Chronicles': '', 'Game of Thrones': '', 'The Lord of the Rings': ''} for val in books.values(): print(val) Looping by itembooks = { '': '', 'The Kingkiller Chronicles': '', 'Game of Thrones': '', 'The Lord of the Rings': ''} for key, val in books.items(): print(f"{key} by {val}") by The Kingkiller Chronicles by Game of Thrones by The Lord of the Rings by Breaking out of a loopIf you are iterating though somehthing and what to stop before all items have been iteraged over you can use breaknumbers = [1,2,3,4,5,6,7,8,9,10] # will get to 5 and then break from the loop # so this will print 1,2,3, and 4 for num in numbers: if num == 5: break print(num)1 2 3 4What you need to doData samples for the temperature are collected one a day for 100 days. Each sample is tagged with a timestamp anda temperature value. These data samples are stored in the below variable "data".- Somewhere between days 24 - 67 (inclusively) there was an error reading the temperature. The sensor reported a valuefar too hot to be possible. Find this value and remove it from the list.- We need to know how many days the temperature was over 40. Print this to the screen (omitting the bad data point).- What is the the average temperature over the 99 days (omitting the bad). Time stamps- The timestamps are in time sence epoch form- This is the number of seconds that have passed from midnight on Jan 1, 1970- Below is a quick example of how these time stamps were generated - We will talk more on imports later - We will talk more on dealing with time laterimport time print(f"{time.time()} seconds ago was New Years 1970") data = [{'Time Stamp': 1570648123.120855, 'Temp': 20}, {'Time Stamp': 1570734523.120863, 'Temp': 47}, {'Time Stamp': 1570820923.120866, 'Temp': 43}, {'Time Stamp': 1570907323.1208682, 'Temp': 39}, {'Time Stamp': 1570993723.1208708, 'Temp': 45}, {'Time Stamp': 1571080123.120873, 'Temp': 11}, {'Time Stamp': 1571166523.1208751, 'Temp': 17}, {'Time Stamp': 1571252923.120877, 'Temp': 2}, {'Time Stamp': 1571339323.1208792, 'Temp': 49}, {'Time Stamp': 1571425723.1208808, 'Temp': 16}, {'Time Stamp': 1571512123.120883, 'Temp': 27}, {'Time Stamp': 1571598523.120885, 'Temp': 43}, {'Time Stamp': 1571684923.120887, 'Temp': 44}, {'Time Stamp': 1571771323.1208892, 'Temp': 9}, {'Time Stamp': 1571857723.1208909, 'Temp': 1}, {'Time Stamp': 1571944123.120893, 'Temp': 38}, {'Time Stamp': 1572030523.120895, 'Temp': 57}, {'Time Stamp': 1572116923.120897, 'Temp': 5}, {'Time Stamp': 1572203323.120899, 'Temp': 22}, {'Time Stamp': 1572289723.120901, 'Temp': 9}, {'Time Stamp': 1572376123.120903, 'Temp': 9}, {'Time Stamp': 1572462523.120905, 'Temp': 11}, {'Time Stamp': 1572548923.120907, 'Temp': 9}, {'Time Stamp': 1572635323.120909, 'Temp': 50}, {'Time Stamp': 1572721723.1209111, 'Temp': 35}, {'Time Stamp': 1572808123.1209128, 'Temp': 49}, {'Time Stamp': 1572894523.120917, 'Temp': 27}, {'Time Stamp': 1572980923.120919, 'Temp': 58}, {'Time Stamp': 1573067323.1209211, 'Temp': 9}, {'Time Stamp': 1573153723.120925, 'Temp': 35}, {'Time Stamp': 1573240123.1209269, 'Temp': 7}, {'Time Stamp': 1573326523.120928, 'Temp': 43}, {'Time Stamp': 1573412923.12093, 'Temp': 37}, {'Time Stamp': 1573499323.120932, 'Temp': 33}, {'Time Stamp': 1573585723.1209338, 'Temp': 47}, {'Time Stamp': 1573672123.120936, 'Temp': 11}, {'Time Stamp': 1573758523.120938, 'Temp': 39}, {'Time Stamp': 1573844923.12094, 'Temp': 17}, {'Time Stamp': 1573931323.120942, 'Temp': 333}, {'Time Stamp': 1574017723.120944, 'Temp': 37}, {'Time Stamp': 1574104123.120946, 'Temp': 39}, {'Time Stamp': 1574190523.120948, 'Temp': 48}, {'Time Stamp': 1574276923.12095, 'Temp': 58}, {'Time Stamp': 1574363323.1209521, 'Temp': 0}, {'Time Stamp': 1574449723.120954, 'Temp': 29}, {'Time Stamp': 1574536123.120956, 'Temp': 26}, {'Time Stamp': 1574622523.1209579, 'Temp': 8}, {'Time Stamp': 1574708923.12096, 'Temp': 16}, {'Time Stamp': 1574795323.1209621, 'Temp': 8}, {'Time Stamp': 1574881723.120965, 'Temp': 49}, {'Time Stamp': 1574968123.1209679, 'Temp': 17}, {'Time Stamp': 1575054523.12097, 'Temp': 22}, {'Time Stamp': 1575140923.120973, 'Temp': 0}, {'Time Stamp': 1575227323.120975, 'Temp': 29}, {'Time Stamp': 1575313723.120977, 'Temp': 14}, {'Time Stamp': 1575400123.1209788, 'Temp': 17}, {'Time Stamp': 1575486523.120981, 'Temp': 45}, {'Time Stamp': 1575572923.1209831, 'Temp': 55}, {'Time Stamp': 1575659323.120985, 'Temp': 3}, {'Time Stamp': 1575745723.120987, 'Temp': 26}, {'Time Stamp': 1575832123.1209888, 'Temp': 25}, {'Time Stamp': 1575918523.120991, 'Temp': 3}, {'Time Stamp': 1576004923.1209931, 'Temp': 44}, {'Time Stamp': 1576091323.120995, 'Temp': 10}, {'Time Stamp': 1576177723.1209972, 'Temp': 49}, {'Time Stamp': 1576264123.120998, 'Temp': 46}, {'Time Stamp': 1576350523.121, 'Temp': 3}, {'Time Stamp': 1576436923.121002, 'Temp': 19}, {'Time Stamp': 1576523323.121004, 'Temp': 26}, {'Time Stamp': 1576609723.121006, 'Temp': 6}, {'Time Stamp': 1576696123.1210082, 'Temp': 23}, {'Time Stamp': 1576782523.1210098, 'Temp': 8}, {'Time Stamp': 1576868923.121012, 'Temp': 20}, {'Time Stamp': 1576955323.121015, 'Temp': 10}, {'Time Stamp': 1577041723.121017, 'Temp': 36}, {'Time Stamp': 1577128123.121021, 'Temp': 58}, {'Time Stamp': 1577214523.121023, 'Temp': 15}, {'Time Stamp': 1577300923.121025, 'Temp': 59}, {'Time Stamp': 1577387323.121026, 'Temp': 8}, {'Time Stamp': 1577473723.1210282, 'Temp': 13}, {'Time Stamp': 1577560123.1210299, 'Temp': 11}, {'Time Stamp': 1577646523.121032, 'Temp': 14}, {'Time Stamp': 1577732923.121034, 'Temp': 14}, {'Time Stamp': 1577819323.121036, 'Temp': 34}, {'Time Stamp': 1577905723.1210382, 'Temp': 35}, {'Time Stamp': 1577992123.1210399, 'Temp': 34}, {'Time Stamp': 1578078523.121042, 'Temp': 30}, {'Time Stamp': 1578164923.121044, 'Temp': 23}, {'Time Stamp': 1578251323.121046, 'Temp': 5}, {'Time Stamp': 1578337723.121048, 'Temp': 13}, {'Time Stamp': 1578424123.1210501, 'Temp': 49}, {'Time Stamp': 1578510523.1210518, 'Temp': 36}, {'Time Stamp': 1578596923.121054, 'Temp': 38}, {'Time Stamp': 1578683323.121056, 'Temp': 17}, {'Time Stamp': 1578769723.121058, 'Temp': 37}, {'Time Stamp': 1578856123.1210601, 'Temp': 23}, {'Time Stamp': 1578942523.1210618, 'Temp': 54}, {'Time Stamp': 1579028923.121063, 'Temp': 13}, {'Time Stamp': 1579115323.121068, 'Temp': 36}, {'Time Stamp': 1579201723.121071, 'Temp': 17}]Music GAN Colab AppA Colab hosted Streamlit app around the Lucid Dreams package. --- Pull Down the RepoFetch the repository from GitHub, and install Python requirements.!git clone https://github.com/michaeltinsley/music-gan-jupyter-app.git !pip3 install -r ./music-gan-jupyter-app/requirements.txt--- NgrokFirst we download an unzip [Ngrok](https://ngrok.com/).Ngrok lets us tunnel a port over the internet.We need this to access Streamlit.!wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip !unzip ngrok-stable-linux-amd64.zipRun the Ngrok command to tunnel port 8501.Use IPython `system_raw` to run the service in a separate process.get_ipython().system_raw('./ngrok http 8501 &')Ping the local Ngrok API to find out where our tunnelled port hasappeared on the internet.!curl -s http://localhost:4040/api/tunnels | python3 -c \ 'import sys, json; print("App available at: " +json.load(sys.stdin)["tunnels"][0]["public_url"])'--- Run the Streamlit AppRun `main.py` with Streamlit ensuring the default port is used.!streamlit run ./music-gan-jupyter-app/main.py --server.port=8501In the previous notebook, we set up a framework for doing gradient-based minimization of differentiable functions (via the `GradientDescent` typeclass) and implemented simple gradient descent for univariate functions. Next, let's try to extend this framework to a faster method such as nonlinear Conjugate Gradient, and see what modifications we'll need to make in order to accomodate it.$\newcommand\vector[1]{\langle 1 \rangle}\newcommand\p[2]{\frac{\partial 1}{\partial 2}}\newcommand\R{\mathbb{R}}$ Conjugate Gradient===Before diving in to Haskell, let's go over exactly what the conjugate gradient method is and why it works. The "normal" conjugate gradient method is a method for solving systems of linear equations. However, this extends to a method for minimizing quadratic functions, which we can subsequently generalize to minimizing arbitrary functions $f\!:\!\R^n \to \R$. We will start by going over the conjugate gradient method of minimizing quadratic functions, and later generalize.Suppose we have some quadratic function$$f(x) = \frac{1}{2}x^T A x + b^T x + c$$for $x \in \R^n$ with $A \in \R^{n \times n}$ and $b, c \in \R^n$.We can write any quadratic function in this form, as this generates all the coefficients $x_ix_j$ as well as linear and constant terms. In addition, we can assume that $A = A^T$ ($A$ is symmetric). (If it were not, we could just rewrite this with a symmetric $A$, since we could take the term for $x_i x_j$ and the term for $x_j x_i$, sum them, and then have $A_{ij} = A_{ji}$ both be half of this sum.)Taking the gradient of $f$, we obtain$$\nabla f(x) = A x + b,$$which you can verify by writing out the terms in summation notation.If we evaluate $-\nabla f$ at any given location, it will give us a vector pointing towards the direction of steepest descent. This gives us a natural way to start our algorithm - pick some initial guess $x_0$, compute the gradient $-\nabla f(x_0)$, and move in that direction by some step size $\alpha$. Unlike normal gradient descent, however, we do not have a fixed step size $\alpha$ - instead, we perform a line search in order to find the *best* $\alpha$. This $\alpha$ is the value of $\alpha$ which brings us to the minimum of $f$ if we are constrainted to move in the direction given by $d_0 = -\nabla f(x_0)$.Note that computing $\alpha$ is equivalent to minimizing the function$$\begin{align*}g(\alpha) &= f(x_0 + \alpha d_0) \\&= \frac{1}{2}(x_0 + \alpha d_0)^T A (x_0 + \alpha d_0) + b^T (x_0 + \alpha d_0) + c\\&= \frac{1}{2}\alpha^2 {d_0}^T A d_0 + {d_0}^T (A x_0 + b) \alpha + (\frac{1}{2} {x_0}^T A x_0 + {x_0}^T d_0 + c)\end{align*}$$Since this is a quadratic function in $\alpha$, it has a unique global minimum or maximum. Since we assume we are not at the minimum and not at a saddle point of $f$, we assume that it has a minimum. The minimum of this function occurs when $g'(\alpha) = 0$, that is, when$$g'(\alpha) = ({d_i}^T A {d_i})\alpha + {d_i}^T(A x_i + b) = 0.$$Solving this for $\alpha$, we find that the minimum is at$$\alpha = -\frac{{d_i}^T (A x_i + b)}{{d_i}^T A d_i}.$$Note that since the directon is the negative of the gradient, a.k.a. the direction of steepest descent, $\alpha$ will be non-negative. These first steps give us our second point in our iterative algorithm:$$x_1 = x_0 - \alpha \nabla f(x_0)$$If this were simple gradient descent, we would iterate this procedure, computing the gradient at each next point and moving in that direction. However, this has a problem - by moving $\alpha_0$ in direction $d_0$ (to find the minimum in direction $d_0$) and then moving $\alpha_1$ in direction $d_1$, we may *ruin* our work from the previous iteration, so that we are no longer at a minimum in direction $d_0$. In order to rectify this, we require that our directions be *conjugate* to one another.We define two vectors $x$ and $y$ to be conjugate with respect to some semi-definite matrix $A$ if $x^T A y = 0$. (Semi-definite matrices are ones where $x^T A x \ge 0$ for all $x$, and are what we require for conjugate gradient.)Since we have already moved in the $d_0 = -\nabla f(x_0)$ direction, we must find a new direction $d_1$ to move in that is conjugate to $d_0$. How do we do this? Well, let's compute $d_1$ by starting with the gradient at $x_1$ and then subtracting off anything that would counter-act the previous direction:$$d_1 = -\nabla f(x_1) + \beta_0 d_0.$$This leaves us with the obvious question - what is $\beta_0$? We can derive that from our definition of conjugacy. Since $d_0$ and $d_1$ must be conjugate, we know that ${d_1}^T A d_0 = 0$. Expanding $d_1$ by using its definition, we get that ${d_1}^T A d_0 = -\nabla f(x_1)^TAd_0 + \beta_0 {d_0}^TA d_0 = 0$. Therefore, we must choose $\beta_0$ such that$$\beta_0 = \frac{\nabla f(x_1)^T A d_0}{{d_0}^T A d_0}.$$Choosing this $\beta$ gives us a direction conjugate to all previous directions. Interestingly enough, iterating this will *keep* giving us conjugate directions. After generating each direction, we find the best $\alpha$ for that direction and update the current estimate of position.Thus, the full Conjugate Gradient algorithm for quadratic functions:> Let $f$ be a quadratic function $f(x) = \frac{1}{2}x^T A x + b^T x + c$which we wish to minimize.> 1. **Initialize:** Let $i = 0$ and $x_i = x_0$ be our initial guess, and compute $d_i = d_0 = -\nabla f(x_0)$.> > 2. **Find best step size:**Compute $\alpha$ to minimize the function $f(x_i + \alpha d_i)$ via the equation$$\alpha = -\frac{{d_i}^T (A x_i + b)}{{d_i}^T A d_i}.$$> > 3. **Update the current guess:**Let $x_{i+1} = x_i + \alpha d_i$.>> 4. **Update the direction:**Let $d_{i+1} = -\nabla f(x_{i+1}) + \beta_i d_i$ where $\beta_i$ is given by$$\beta_i = \frac{\nabla f(x_{i+1})^T A d_i}{{d_i}^T A d_i}.$$>> 5. **Iterate:** Repeat steps 2-4 until we have looked in $n$ directions, where $n$ is the size of your vector space (the dimension of $x$). Nonlinear Conjugate Gradient---So, now that we've derived this for quadratic functions, how are we going to use this for general nonlinear optimization of differentiable functions? To do this, we're going to reformulate the above algorithm in *slightly* more general terms.First of all, we will revise step two. Instead of > **Find best step size:**Compute $\alpha$ to minimize the function $f(x_i + \alpha d_i)$ via the equation$$\alpha = -\frac{{d_i}^T (A x_i + b)}{{d_i}^T A d_i}.$$we will simply use a line search:> **Find best step size:**Compute $\alpha$ to minimize the function $f(x_i + \alpha d_i)$ via a line search in the direction $d_i$.In addition, we must reformulate the computation of $\beta_i$. There are several ways to do this, all of which are the same in the quadratic case but are different in the general nonlinear case. We reformulate this computation by generalizing. Note that the difference between $x_{k+1}$ and $x_k$ is entirely in the direction $d_k$, so that for some constant $c$, $x_{k+1} - x_k = c d_k$. Since $\nabla f(x) = A x + b$, $$ \nabla f(x_{k+1}) - \nabla f(x_k) = (A x_{k+1} + b) - (A x_k + b) = A(x_{k+1}-x_k) = cA d_k.$$Therefore, $A d_k = c^{-1} (\nabla f(x_{k+1}) - \nabla f(x_k))$. We can now plug this in to the equation for $\beta_i$ and obtain$$\beta_k = \frac{\nabla f(x_{k+1})^T (\nabla f(x_{k+1}) - \nabla f(x_k))}{{d_k}^T (\nabla f(x_{k+1}) - \nabla f(x_k))}.$$Conveniently enough, the value of $c$ cancels, as it is both in the numerator and denominator. This gives us the new update rule:> **Update the direction:**Let $d_{k+1} = -\nabla f(x_{k+1}) + \beta_k d_k$ where $\beta_k$ is given by$$\beta_k = \frac{\nabla f(x_{k+1})^T (\nabla f(x_{k+1}) - \nabla f(x_k))}{{d_k}^T (\nabla f(x_{k+1}) - \nabla f(x_k))}.$$We can now apply this algorithm to any nonlinear and differentiable function! This reformulation of $\beta$ is known as the Polak-Ribiere method; know that there are others, similar in form and also in use. Line Search---The one remaining bit of this process that we haven't covered is step two: the line search. As you can see above, we are given a point $x$, some vector $v$, and a multivariate function $f\!:\!\R^n \to \R$, and we wish to find the $\alpha$ which minimizes $f(x + \alpha v)$. Note that a line search can be viewed simply as root finding, since we know that $v \cdot \nabla f(x + \alpha v)$ should be zero at the minimum. (Since if it were non-zero, we could move from that minimum to a better location.)There are many ways to do this line search, and they can range from relatively simple linear methods (like the [secant method](http://en.wikipedia.org/wiki/Secant_method)) to more complex (using quadratic or cubic polynomial approximations). One simple method for a line search is known as the **bisection method**. The bisection method is simply a binary search. To minimize a univariate function $g(x)$, it begins with two points, $a$ and $b$, such that $g(a)$ and $g(b)$ have opposite signs. By the intermediate value theorem, $g(x)$ must have a root in $[a, b]$. (Note that in our case, $g(\alpha) = v \cdot \nabla f(x + \alpha v)$.) It then computes their midpoint, $c = \frac{a + b}{2}$, and evaluates the function $g$ to compute $g(c)$. If $g(a)$ and $g(c)$ have opposite signs, the root must be in $[a, c]$; if $g(c)$ and $g(b)$ have opposite signs, then $[c, b]$ must have the root. At this point, the method recurses, continuing its search until it has gotten close enough to the true $\alpha$.Another simple method is known as the **secant method**. Like the bisection method, the secant method requires two initial points $a$ and $b$ such that $g(a)$ and $g(b)$ have opposite signs. However, instead of doing a simple binary search, it does linear interpolation. It finds the line between $(a, g(a))$ and $(b, g(b))$:$$g(x) \approx \frac{g(b) - g(a)}{b - a}(x - a) + g(a)$$It then finds the root of this linear approximation, setting $g(x) = 0$ and finding that the root is at$$\frac{g(b) - g(a)}{b - a}(x - a) + g(a) = 0 \implies x = a -\frac{b - a}{g(b) - g(a)}g(a).$$ It then evaluates $g$ at this location $x$. As with the bisection method, if $g(x)$ and $g(a)$ have opposite signs, then the root is in $[a, x]$, and if $g(x)$ and $g(b)$ have opposite signs, the root must be in $[x, b]$. As before, root finding continues via iteration, until some stopping condition is reached.There are more line search methods, but the last one we will examine is one known as **Brent's method**. Brent's method is a combination of the secand method and the bisection method. Unlike the previous two methods, Brent's method keeps track of three points:- $a_k$: the current "contrapoint"- $b_k$: the current guess for the root- $b_{k-1}$: the previous guess for the rootBrent's method then computes the two possible next values: $m$ (by using the bisection method) and $s$ (by using the secant method with $b_k$ and $b_{k-1}$). (On the very first iteration, $b_{k-1} = a_k$ and it uses the bisection method.) If the secant method result $s$ lies between $b_k$ and $m$, then let $b_{k+1} = s$; otherwise, let $b_{k+1} = m$.After $b_{k+1}$ is chosen, it is checked to for convergence. If the method has converged, iteration is stopped. If not, the method continues. A new contrapoint $a_{k+1}$ is chosen such that $b_{k+1}$ and $a_{k+1}$ have opposite signs. The two choices for $a_{k+1}$ are either for it to remain unchanged (stay $a_k$) or for it to become $b_k$ - the choice depends on the signs of the function values involved. Before repeating, the values of $f(a_k{+1})$ and $f(b_{k+1})$ are examined, and $b_{k+1}$ is swapped with $a_{k+1}$ if it has a higher function value. Finally, the method repeats with the new values of $a_k$, $b_k$, and $b_{k-1}$.Brent's method is effectively a heuristic method, but is nice in practice; it has the reliability of the bisection method and gains a boost of speed from its use of the secant method. Implementation---Now that we've reviewed the conjugate gradient method, let's revise our previous gradient descent framework to so that we can implement conjugate gradient (using Brent's method for its line search).Recall that in the previous notebook, we defined a class that allowed us to do gradient descent on arbitrary function-like data types:-- Extensions and imports we'll need later. :set -XTypeFamilies -XFlexibleContexts -XMultiParamTypeClasses -XDoAndIfThenElse -XFlexibleInstances import Control.Monad.Writer import Text.Printf class Monad m => GradientDescent m a where -- Type to represent the parameter space. data Params a :: * -- Compute the gradient at a location in parameter space. grad :: a -> Params a -> m (Params a) -- Move in parameter space. paramMove :: Double -- Scaling factor. -> Params a -- Direction vector. -> Params a -- Original location. -> m (Params a) -- New location.This same class isn't going to work quite as nicely in this case, because we must be able to compute$$\beta_k = \frac{\nabla f(x_{k+1})^T (\nabla f(x_{k+1}) - \nabla f(x_k))}{{d_k}^T (\nabla f(x_{k+1}) - \nabla f(x_k))}.$$Since both the gradients and the search directions are represented as vectors in the parameter space (`Param a`), we must be able to take the dot product of any two such vectors. We already have the capability to add and subtract them via `paramMove`, though.One option is to add something like `paramDot` to `GradientDescent`, and call it a day. One one hand, that is simple; on the other hand, it seems to conflate two independent notions - the ability to do gradient descent and the ability to use `Param a` as a vector space. Instead of doing that, we can require that the parameters form an inner product space:-- We will call this a vector space, though the definition actually -- requires an inner product, since it requires an implementation of `dot`. class VectorSpace v where -- Add two vectors in this inner product space. add :: v -> v -> v -- Scale a vector. scale :: Double -> v -> v -- Take the inner product of two vectors. dot :: v -> v -> Double -- For convenience. minus :: v -> v -> v minus a b = add a (scale (-1) b)Now, instead of requiring `GradientDescent` instances to provide `paramMove`, we'll just require that the parameters form a vector space:class (Monad m, VectorSpace (Params a)) => GradientDescent m a where -- Type to represent the parameter space. data Params a :: * -- Compute the gradient at a location in parameter space. grad :: a -> Params a -> m (Params a)Great! Now we start implementing these methods. In order to avoid spending too much time on line searches, let's just go with a simple bisection search for the time being.The implementation is pretty simple:-- A point consisting of a value and the function at that value. -- The stopping condition is implemented as a function -- Point -> Point -> Bool -- That way, the stopping condition can decide based on convergence -- of the x-coordinate or of the function values. newtype Point = Point {unPt :: (Double, Double)} bisectionSearch :: Monad m => (Double -> m Double) -- What function f to find the root of -> Double -- Starting point -> Double -- Second starting point -> (Point -> Point -> Bool) -- Whether to stop -> m Double -- Approximate root location. bisectionSearch f a b stop = do let midpoint = (a + b) / 2 aValue <- f a bValue <- f b -- Check if we're done with these two values. if stop (Point (a, aValue)) (Point (b, bValue)) then -- If we are, return their midpoint. return midpoint else do -- If we're not done, change one of the values to the midpoint. -- Keep the two values having opposite signs, though. midvalue <- f midpoint if signum midvalue /= signum aValue then bisectionSearch f midpoint a stop else bisectionSearch f midpoint b stopNow that we have our line search implemented, we can go ahead and implement the actual conjugate gradient algorithm.newtype StopCondition m a = StopWhen (Params a -> Params a -> m Bool) conjugateGradient :: GradientDescent m a => a -- What to optimize. -> StopCondition m a -- When to stop. -> Params a -- Initial point (x0). -> m (Params a) -- Return: Location of minimum. conjugateGradient f (StopWhen stop) x0 = go x0 Nothing where go x prevDir = do -- Compute the search direction gradVec <- grad f x let dir = case prevDir of -- If we have no previous direction, just use the gradient Nothing -> scale (-1) gradVec -- If we have a previous direction, compute Beta and -- then the conjugate direction in which to search. Just (prevX, prevGrad, prevDir) -> let diff = gradVec `minus` prevGrad numerator = gradVec `dot` diff denominator = prevDir `dot` diff beta = max 0 $ numerator / denominator in scale beta prevDir `minus` gradVec -- To minimize f(x + \alpha d_k), we find the zero of -- the dot product of the gradient and the direction let lineVal alpha = do let loc = x `add` scale alpha dir gradient <- grad f loc return $ gradient `dot` dir -- Stop when alpha is close enough let stopLineSearch p1 p2 = let val1 = fst $ unPt p1 val2 = fst $ unPt p2 in abs (val1 - val2) < 0.1 -- Find the best alpha value alpha <- bisectionSearch lineVal 0 0.5 stopLineSearch -- Compute the new location, and check if we want to continue iterating. let xNew = x `add` scale alpha dir shouldStop <- stop x xNew if shouldStop then return xNew else go xNew $ Just (x, gradVec, dir)Let's try this out on a two-variable function. Since we do a line search, doing a single-dimensional conjugate gradient would be pointless.-- We need FlexibleInstances for declarations like these! -- We must declare these instances together, because they have recursive dependencies on each other. instance VectorSpace (Params (Double -> Double -> Double)) where add (Arg a b) (Arg x y) = Arg (a + x) (b + y) dot (Arg a b) (Arg x y) = a * x + b * y scale s (Arg a b) = Arg (s * a) (s * b) -- In addition to our usual definition, let's log the number of function -- gradient evaluations using a Writer monad. instance GradientDescent (Writer [String]) (Double -> Double -> Double) where -- The parameter for a function is just its argument. data Params (Double -> Double -> Double) = Arg { x :: Double, y :: Double } -- Use numeric differentiation for taking the gradient. grad f (Arg x y) = do let dx = f x y - f (x - epsilon) y dy = f x y - f x (y - epsilon) gradient = (dx / epsilon, dy / epsilon) tell [ "Gradient at\t" ++ show' (x, y) ++ "\tis\t" ++ show' gradient ] return $ uncurry Arg gradient where epsilon = 0.0001 show' (x, y) = printf "%.5f, \t%.5f " x yWe can define a function $f = x^2 + y^2 + 3$, which looks like this:![](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQIAOQA5AAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCADUAjsDASIAAhEBAxEB/8QAHgABAAIDAAMBAQAAAAAAAAAAAAYHBAUIAgMJAQr/xABKEAABAwQBAgQEAwUFBQUHBQEBAgMEAAUGEQcSIRMiMUEIFDJRQmFxCRUjM4EWJENSYhdygpGhJTRjorEYRFNzksHRJjVUZMTh/8QAGwEBAAIDAQEAAAAAAAAAAAAAAAMEAQIFBgf/xAA+EQABAgMFBQcCBQIGAgMAAAABAAIDESEEEjFBUQUiYXHwBhMygZGhscHRFCNCUuFi8QckM3KS4jSygqLS/9oADAMBAAIRAxEAPwD6p0pSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlfhUlOupQGzobPqawb9frNi9knZHkVzj262WyOuVMlyFhDbDKElSlqJ9AACa5Tv9ru3xGXA59lE67WGzRD4mDQWC5Fl25ST5Ls8lX/vS9AoQpOm2j0qBK3BViz2aJan3GLVzgwTK67pVTcHctTcsEzj3PFNR87xtpHz6UJ6GrnGPZu4Rx6dC9aWgd217SexQpVs1C9jmOLX) This function has an obvious minimum at $f(0, 0) = 3$.Let's minimize this function using our conjugate gradient, and output the minimum and the gradient evaluation logs:-- Create a stop condition that respects a given error tolerance. stopCondition f tolerance = StopWhen stop where stop (Arg a b) (Arg x y) = do Arg dx dy <- grad f (Arg x y) return $ abs dx < tolerance && abs dy < tolerance -- A demo function with minimum at (0, 0) function x y = x^2 + y^2 + 3 -- Note that we don't need to set an alpha! let tolerance = 1e-2 let initValue = Arg 3 2 let writer = conjugateGradient function (stopCondition function tolerance) initValue (minLoc, messages) = runWriter writer :: (Params (Double -> Double -> Double), [String]) printf "Min at x = %.5f, y = %.5f\n" (x minLoc) (y minLoc) mapM_ putStrLn (take 10 messages) printf "... and so on ... (%d evaluations)\n" $ length messagesGroup Members: , & UCD Bootcamp Visualization Project Data Source: https://data.world/awram/us-mass-shootings (E) - Data Extraction* Import Needed Libraries* Load Excel File* Read Excel File & Store in Dataframe##### *** Set Up *** ##### ### Import Dependencies import pandas as pd import numpy as np import datetime as dt ### Python SQL Toolkit & Object Relational Mapper (ORM) import psycopg2 from sqlalchemy.orm import Session from sqlalchemy import create_engine, func ### Display Options - Set Dataframe to Show All Columns in Output Window pd.set_option("display.max_columns", None) ### File to Load US_Shooting_data = "Resources/US_Mass_Shootings_Data_1982_2019.xlsx" ### Read Excel Files & Store in Pandas Dataframes US_Shooting_df = pd.read_excel(US_Shooting_data, encoding='unicode_escape') #### Check Dataframe Datatypes # US_Shooting.dtypes #### Display Dataframe US_Shooting_df.head() #### Display Statistical Summary US_Shooting_df.describe()(T) - Data Transformation* Drop Columns that are Not Needed* Create Columns that are Needed* Reorder Columns * Recode Field Values for Consistency & Analysis#### Drop Columns that Aren't Needed (we are not doing text analysis, so dropping notes fields/string fields) US_Shooting_df = US_Shooting_df.drop(columns=["mental_health_details", "summary", "where_obtained", "weapon_details", "mental_health_sources", "sources_additional_age", "type", "weapon_type", "prior_signs_mental_health_issues", "weapons_obtained_legally"], axis=1) #### Create Additional Date Field US_Shooting_df["month"] = US_Shooting_df["date"].dt.month #### Rename One of the "Location" Fields (dataset came with two location fields) US_Shooting_df = US_Shooting_df.rename(columns={"location": "city_state", "location.1": "site"}) #### Reorder Dataframe US_Shooting_df = US_Shooting_df[["case", "city_state", "site", "date", "month", "year", "injured", "fatalities","total_victims", "age_of_shooter", "race", "gender", "longitude", "latitude"]] #### Display Dataframe US_Shooting_df.head() ##### *** Recode Fields for Consistency & Analysis *** ##### ### Recode "age_of_shooter" US_Shooting_df["age_of_shooter"].replace({"-": 0}, inplace=True) ### Recode "race" US_Shooting_df["race"].replace({"-": "Other/Unknown", "unclear": "Other/Unknown", "Other": "Other/Unknown", "White ": "White", "white": "White", "black": "Black"}, inplace=True) ### Recode "gender" US_Shooting_df["gender"].replace({"-": "Other/Unknown", "Male & Female": "Other/Unknown", "F": "Female", "M": "Male"}, inplace=True) ### Recode "site" ## This isn't recoding correctly, go back and revisit US_Shooting_df["site"].replace({"Other\n ": "Other", "\nWorkplace ": "Workplace"}, inplace=True) #### Display Dataframe US_Shooting_df.head()(L) - Data Load* Create Engine & Connection Layer to Database* Use Pandas to Load CSV Converted Dataframe to PostgreSQL DB* Create a Session (link) from Python to PostgreSQL DB Prior to Executing the Following Code, Set Up PostgreSQL Database* Using pgAdim, create a new database named "Mass_Shootings" (right-click on Databases in left menu list, create "database")* Click on the new database and navigate under "Schemas" > "Public" > "Tables" and create a new table named "Mass_Shootings" (right-click, create "table")* Navigate to "Columns", right-click and create "column". A new pop up window will display and you need to fill out the following elements: 1) "General Tab" > "Name" - name as it appears in the list below 2) "Definition Tab" > "Data Type" - type is shown in list below 3) Repeat the addition of columns for entire list (14 columns total)from IPython.display import Image Image("images/Mass_Shootings_Columns.png") ##### *** Create Engine & Connection Layer *** ##### connection_string = "postgres:&MF28wsac@localhost:5432/Mass_Shootings" engine = create_engine(f'postgresql://{connection_string}') connection = engine.connect() ##### *** Use Pandas to Load CSV Converted Dataframe into Database *** ##### US_Shooting_df.to_sql(name="Mass_Shootings", con=engine, if_exists="replace", index=False) ### Check Table Loaded to Database engine.table_names() ##### *** Create Our Session (link) from Python to the Database *** ##### session = Session(engine) ### Confirm data has been added by querying the database pd.read_sql_table("Mass_Shootings", con=engine).head()**Mouting My google Drive to this notebook**from google.colab import drive drive.mount('/content/drive') import pandas as pdTo get anime.csv and rating.csv data click on link :- [Anime Recommendation Dataset](https://www.kaggle.com/CooperUnion/anime-recommendations-database)anime_data=pd.read_csv('/content/drive/MyDrive/anime.csv') rating_data=pd.read_csv('/content/drive/MyDrive/rating.csv')**Checking Data**anime_data.shape anime_data.head() rating_data.shape rating_data.tail() anime_data.info() rating_data.info() rating_data.describe anime_data.describe anime_fulldata=pd.merge(anime_data,rating_data,on='anime_id',suffixes= ['', '_user']) anime_fulldata = anime_fulldata.rename(columns={'name': 'anime_title', 'rating_user': 'user_rating'}) anime_fulldata.head() anime_fulldata.shape anime_fulldata.describe**Visualising data to Understand the data better** Drpping na valuescombine_anime_rating = anime_fulldata.dropna(axis = 0, subset = ['anime_title']) combine_anime_rating.head() anime_ratingCount = (combine_anime_rating. groupby(by = ['anime_title'])['user_rating']. count(). reset_index().rename(columns = {'rating': 'totalRatingCount'}) [['anime_title', 'user_rating']] ) import seaborn as sns**Seeing top 10 anime based on user rating**top10_animerating=anime_ratingCount[['anime_title', 'user_rating']].sort_values(by = 'user_rating',ascending = False).head(10) ax=sns.barplot(x="anime_title", y="user_rating", data=top10_animerating, palette="Dark2") ax.set_xticklabels(ax.get_xticklabels(), fontsize=11, rotation=40, ha="right") ax.set_title('Top 10 Anime based on rating counts',fontsize = 22) ax.set_xlabel('Anime',fontsize = 20) ax.set_ylabel('User Rating count', fontsize = 20)**Top anime accoridng to member count**top10_animerating=anime_data[['name', 'members']].sort_values(by = 'members',ascending = False).head(10) ax=sns.barplot(x="name", y="members", data=top10_animerating, palette="Dark2") ax.set_xticklabels(ax.get_xticklabels(), fontsize=11, rotation=40, ha="right") ax.set_title('Top 10 Anime based on members counts',fontsize = 22) ax.set_xlabel('Anime',fontsize = 20) ax.set_ylabel('Members', fontsize = 20) import matplotlib.pyplot as plt plt.figure(figsize = (15, 7)) plt.subplot(1,2,1) anime_fulldata['rating'].hist(bins=70) plt.title("Rating of websites") plt.subplot(1,2,2) anime_fulldata['user_rating'].hist(bins=70) plt.title("Rating of users")Plotting a pie graph to see From where these anime cameimport plotly.graph_objects as go labels = anime_fulldata['type'].value_counts().index print(labels) values = anime_fulldata['type'].value_counts().values colors = ['gold', 'mediumturquoise', 'darkorange', 'lightgreen'] fig = go.Figure(data=[go.Pie(labels=labels, values=values)]) fig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=20, marker=dict(colors=colors, line=dict(color='#000000', width=2))) fig.update_layout( title={ 'text': "Medium of Streaming", 'y':0.9, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top'}) fig.show()Index(['TV', 'Movie', 'OVA', 'Special', 'ONA', 'Music'], dtype='object')**Word cloud Graph more the size more the occurence**nonull_anime=anime_fulldata.copy() nonull_anime.dropna(inplace=True) from collections import defaultdict all_genres = defaultdict(int) for genres in nonull_anime['genre']: for genre in genres.split(','): all_genres[genre.strip()] += 1 from wordcloud import WordCloud genres_cloud = WordCloud(width=800, height=400, background_color='white', colormap='gnuplot').generate_from_frequencies(all_genres) plt.imshow(genres_cloud, interpolation='bilinear') plt.axis('off')**Assigning the data to another variable so that our data won't be lost**anime_feature=anime_fulldata.copy()**as the user who haven't given any rating is defined as -1 so replacing it with a Nan value and then dropping that as it doesn't add anything to our model**import numpy as np anime_feature["user_rating"].replace({-1: np.nan}, inplace=True) anime_feature.head()how= takes two parameter (any and all) * By defualt it's any which means if any value of row or column contain Nan then drop that column and row * ‘all’ : If all values are NA, drop that row or column.anime_feature = anime_feature.dropna(axis = 0, how ='any') anime_feature.isnull().sum() anime_feature['user_id'].value_counts() counts = anime_feature['user_id'].value_counts() anime_feature = anime_feature[anime_feature['user_id'].isin(counts[counts >= 200].index)]**Creating a pivod table which will help us to easily calcualte cosine similarity**anime_pivot=anime_feature.pivot_table(index='anime_title',columns='user_id',values='user_rating').fillna(0) anime_pivot.head() anime_fulldata.columns from sklearn.feature_extraction.text import TfidfVectorizer genres_str = anime_data['genre'].str.split(',').astype(str) tfidf = TfidfVectorizer(analyzer='word', ngram_range=(1, 4), min_df=0) tfidf_matrix = tfidf.fit_transform(genres_str) tfidf_matrix.shape from sklearn.metrics.pairwise import linear_kernel cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix) indices = pd.Series(anime_data.index, index=anime_data['name']) def genre_recommendations(title, highest_rating=False, similarity=False): if highest_rating == False: if similarity == False: idx = indices[title] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:11] anime_indices = [i[0] for i in sim_scores] return pd.DataFrame({'Anime name': anime_data['name'].iloc[anime_indices].values, 'Type': anime_data['type'].iloc[anime_indices].values}) elif similarity == True: idx = indices[title] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:11] anime_indices = [i[0] for i in sim_scores] similarity_ = [i[1] for i in sim_scores] return pd.DataFrame({'Anime name': anime_data['name'].iloc[anime_indices].values, 'Similarity': similarity_, 'Type': anime_data['type'].iloc[anime_indices].values}) elif highest_rating == True: if similarity == False: idx = indices[title] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:11] anime_indices = [i[0] for i in sim_scores] result_df = pd.DataFrame({'Anime name': anime_data['name'].iloc[anime_indices].values, 'Type': anime_data['type'].iloc[anime_indices].values, 'Rating': anime_data['rating'].iloc[anime_indices].values}) return result_df.sort_values('Rating', ascending=False) elif similarity == True: idx = indices[title] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:11] anime_indices = [i[0] for i in sim_scores] similarity_ = [i[1] for i in sim_scores] result_df = pd.DataFrame({'Anime name': anime_data['name'].iloc[anime_indices].values, 'Similarity': similarity_, 'Type': anime_data['type'].iloc[anime_indices].values, 'Rating': anime_data['rating'].iloc[anime_indices].values}) return result_df.sort_values('Rating', ascending=False)This model it not deployed yet but you can name the angenre_recommendations('One Piece', highest_rating=True, similarity=False)Triple Barrier MethodThis notebook will cover partial exercise answers:* Exercise 3.5As we go along, there will be some explanations.More importantly, this method can be applied not just within mean-reversion strategy but also other strategies as well. Most of the functions below can be found under research/Labels.Contact: import numpy as np import pandas as pd import research as rs import matplotlib.pyplot as plt %matplotlib inline p = print #pls take note of version #numpy 1.17.3 #pandas 1.0.3 #sklearn 0.21.3 dollar = pd.read_csv('./research/Sample_data/dollar_bars.txt', sep=',', header=0, parse_dates = True, index_col=['date_time']) def bband(data: pd.DataFrame, window: int = 21, width: float = 0.001): avg = data['close'].ewm(span = window).mean() std = avg * width upper = avg + std lower = avg - std return avg, upper, lower, std dollar['ewm'], dollar['upper'], dollar['lower'], dollar['std'] = bband(dollar) # Check for normality, serial correlation, overall statistical properties, frequency count stability dollar['side'] = np.nan def side_pick(data: pd.DataFrame): for i in np.arange(data.index.shape[0]): if (data['close'].iloc[i] >= data['upper'].iloc[i]): data['side'].iat[i] = -1 elif (data['close'].iloc[i] <= data['lower'].iloc[i]): data['side'].iat[i] = 1 return data upper = dollar[dollar['upper'] < dollar['close']] # short signal lower = dollar[dollar['lower'] > dollar['close']] # long signal p("Num of times upper limit touched: {0}\nNum of times lower limit touched: {1}" .format(upper.count()[0], lower.count()[0])) # Recall white test as a benchmark and until this stage we filtered all those which did not meet min return dollar = side_pick(dollar) dollar.dropna(inplace= True) dollar['side'].value_counts() copy_dollar = dollar.copy() # make a back copy to be used in later exercise copy_dollar #up till this point the below dataframe should look like this, before tri_bar func. This is our primary model. d_vol = rs.vol(dollar['close'], span0 = 50) events = rs.cs_filter(dollar['close'], limit = d_vol.mean()) events vb = rs.vert_barrier(data = dollar['close'], events = events, period = 'days', freq = 1) vb # Show some example output tb = rs.tri_barrier(data = dollar['close'], events = events, trgt = d_vol, min_req = 0.002, num_threads = 3, ptSl = [0,2], #change ptSl into [0,2] t1 = vb, side = dollar['side']) tb # Show some example m_label = rs.meta_label(data = dollar['close'], events = tb, drop = False) m_label # Show some example m_label['bin'].value_counts(normalize=True) # Here is a quick look at our 'bin' values. # Slight imbalanced sample, but not much harm # 51.95% of the sample based on parameter touched vertical barrier first* Exercise 3.5bHere onwards we will be using sklearn modules to perform ML related task.from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split # A quick look at what we have till date using both primary and secondary model # as seen in previous example, only 48.04% was labeled 1. # Hence precision 1.0 = 0.48 (48% of the sample is relevant), while recall = 1 means fully correct (based on the 48% sample)The below function report_matrix is what we have till date using both primary (bband func) and secondary model (tri_bar func). Classification ReportAs seen in previous example, only 48.0455% was labeled 1. Hence precision 1.0 = 0.48 (48.0455% of the sample is relevant). It's basically ML's way of saying are these "features" relevant when tested.While recall = 1 means fully correct (based on the 48% sample). In the case where ML model is fitted, this result will mean the percentage of "correct" label was chosen. In short, is the ML model reliability in True positive identification based on given sample. Confusion Matrix8001 = False Positive (51.95%)7399 = True Positive (48.0455%) Accuracy ScoreIs a mere reflection of True Positive, which again is 48.0455%# this func can be found under Tools/stats_rpt forecast = rs.report_matrix(actual_data = m_label, prediction_data = None, ROC = None)Classification Report ======================================================= precision recall f1-score support 0.0 0.00 0.00 0.00 8001 1.0 0.48 1.00 0.65 7399 accuracy 0.48 15400 macro avg 0.24 0.50 0.32 15400 weighted avg 0.23 0.48 0.31 15400 Confusion Matrix ======================================================= [[TN, FP] [FN, TP]] [[ 0 8001] [ 0 7399]] Accuracy Score ======================================================= 0.48045454545454547Built a list of features.1. Volatility2. Autocorrelation3. Moving average4. log-price return (optional)5. Stationary series based on cumulative sum log-price return (optional)The last 2 items will be explained at AFML chapter 5, fractional differentiated features.# Data that was copied earlier before tri_bar func, this is our primary model only copy_dollar # Show example # drop redundant columns and keep crossing moving avaerages pri_dollar = copy_dollar.drop(['open', 'high', 'low', 'cum_vol', 'cum_dollar', 'cum_ticks'], axis = 1) #include volatility, autocorrelation pri_dollar #include original volatility pri_dollar['volatility'] = rs.vol(pri_dollar.close, span0 = 50) # Optional: getting stationarity feature pri_dollar['log_price'] = pri_dollar.close.apply(np.log) pri_dollar['log_return'] = pri_dollar.log_price.diff() cs_log = pri_dollar.log_price.diff().dropna().to_frame() pri_dollar['stationary'] = rs.fracDiff_FFD(data = cs_log, d = 1.99999889 , thres = 1e-5) rs.unit_root(pri_dollar['stationary'].dropna()) #check for stationarity pri_dollar.dropna(inplace = True) # autocorrelation residual feature, we will add AR features up to 2 lags from statsmodels.tsa.arima_model import ARMA pri_dollar['ar_0'] = ARMA(pri_dollar['stationary'], order=(0,0)).fit().resid pri_dollar['ar_1'] = ARMA(pri_dollar['stationary'], order=(1,0)).fit().resid pri_dollar['ar_2'] = ARMA(pri_dollar['stationary'], order=(2,0)).fit().resid #final dataset secondary_dollar = pri_dollar.copy()**Note* Good to include volume based or volume-weighted indicator as a predictive feature i.e. OBV, VWAP **Note**May try to add other types of trend related features as part of experimental Mathematics. (aka Trial & error)* Good to include volume based or volume-weighted indicator as a predictive feature i.e. OBV, VWAP* If not, try to add price based as predictive feature i.e. MOM, RSI# Now we run all the steps to complete labels, to train random forest. # we will use both primary & secondary model events0 = rs.cs_filter(secondary_dollar['close'], limit = secondary_dollar['volatility'].mean()) vb0 = rs.vert_barrier(data = secondary_dollar['close'], events = events0, period = 'days', freq = 1) tb0 = rs.tri_barrier(data = secondary_dollar['close'], events = events0, trgt = secondary_dollar['volatility'], min_req = 0.002, num_threads = 3, ptSl = [0,2], #change ptSl into [0,2] t1 = vb0, side = secondary_dollar['side']) m_label0 = rs.meta_label(data = secondary_dollar['close'], events = tb0, drop = 0.05) m_label0 m_label0['bin'].value_counts() # we still get back the same count. This is correct. # Tri_bar func is to calculate if vert_bar was triggered and consolidates the target. # while label will check which are the ones that hitted vertical barriers or non-profitable will be label 0 # At this stage you may wish to run Grid search CV, but I'm skipping that. n_estimators, max_depth, c_random_state = 500, 7, 42 # Random Forest Model rf = RandomForestClassifier(max_depth=max_depth, n_estimators=n_estimators, criterion='entropy', class_weight = None, #This will be cover in next few chapters random_state=c_random_state) X = secondary_dollar.reindex(m_label0.index) # this dataframe only contain all our features y = m_label0['bin'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=False) rf.fit(X_train, y_train.values.ravel()) # Performance Metrics y_prob = rf.predict_proba(X_train)[:, 1] #here we are only interested in True positive y_pred = rf.predict(X_train) p('Matrix training report for primary model & secondary model\n') rs.report_matrix(actual_data = y_train, # we need to use our train data from train_test_split prediction_data = y_pred, ROC = y_prob) # Meta-label # Performance Metrics y_prob = rf.predict_proba(X_test)[:, 1] #here we are only interested in True positive y_pred = rf.predict(X_test) p('Matrix test report for primary model & secondary model\n') rs.report_matrix(actual_data = y_test, prediction_data = y_pred, ROC = y_prob) rs.feat_imp(rf, X) **Now we start to create only primary model** events1 = rs.cs_filter(pri_dollar['close'], limit = pri_dollar['volatility'].mean()) vb1 = rs.vert_barrier(data = pri_dollar['close'], events = events1, period = 'days', freq = 1) tb1 = rs.tri_barrier(data = pri_dollar['close'], events = events1, trgt = pri_dollar['volatility'], min_req = 0.002, num_threads = 3, ptSl = [0,2], #change ptSl into [0,2] t1 = vb1, side = None) m_label1 = rs.meta_label(data = pri_dollar['close'], events = tb1, drop = 0.05) # take note we do not have a side hence we need to drop something # Random Forest Model rf = RandomForestClassifier(max_depth=max_depth, n_estimators=n_estimators, criterion='entropy', class_weight = None, #This will be cover in next few chapters random_state=c_random_state) X = pri_dollar.reindex(m_label1.index) # this dataframe only contain all our features y = m_label1['bin'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=False) rf.fit(X_train, y_train.values.ravel()) # Performance Metrics y_prob = rf.predict_proba(X_train)[:, 1] #here we are only interested in True positive y_pred = rf.predict(X_train) p('Matrix training report for primary model only\n') rs.report_matrix(actual_data = y_train, # we need to use our train data from train_test_split prediction_data = y_pred, ROC = y_prob) # Meta-label # Performance Metrics y_prob = rf.predict_proba(X_test)[:, 1] #here we are only interested in True positive y_pred = rf.predict(X_test) p('Matrix test report for primary model only\n') rs.report_matrix(actual_data = y_test, prediction_data = y_pred, ROC = y_prob) rs.feat_imp(rf, X)Matrix test report for primary model only Classification Report ======================================================= precision recall f1-score support -1.0 0.46 0.73 0.56 2027 1.0 0.60 0.31 0.41 2565 accuracy 0.50 4592 macro avg 0.53 0.52 0.49 4592 weighted avg 0.54 0.50 0.48 4592 Confusion Matrix ======================================================= [[TN, FP] [FN, TP]] [[1484 543] [1762 803]] Accuracy Score ======================================================= 0.4980400696864111# For more ram a = [] while(True): a.append('1') !nvidia-smi from google.colab import drive drive.mount('/content/drive') # Importing libraries import os import pickle import numpy as np import pandas as pd from keras import applications from keras.preprocessing.image import ImageDataGenerator from keras import optimizers from keras.optimizers import Adam from keras.models import Sequential, Model from keras.layers import Dense, GlobalAveragePooling2D,AvgPool2D from keras.layers import Activation, Dropout, Flatten, Dense from keras import backend as K import tensorflow as tf from keras.layers import Conv2D,MaxPooling2D,Dropout,Flatten,Dense,BatchNormalization,LeakyReLU,MaxPool2D from keras.models import Sequential from keras.layers import Conv2DTranspose,Reshape PATH = './drive/My Drive/TIP/Dataset/' PATH_TO_MAIN = './drive/My Drive/TIP/' YT_LINK = 'www.youtube.com/watch?v=' with open(PATH_TO_MAIN + 'Pickles/encoder_trainX_81.pkl','rb') as f: x_enc_train = pickle.load(f) with open(PATH_TO_MAIN + 'Pickles/encoder_trainY_81.pkl','rb') as f: y_train_encoder= pickle.load(f) x_enc_train.shape from sklearn.model_selection import train_test_split X_train,X_val,y_train,y_val = train_test_split(x_enc_train,y_train_encoder,test_size = 0.1) X_train.shape # from keras import backend as K # def root_mean_squared_error(y_true, y_pred): # return K.sqrt(K.mean(K.square(y_pred - y_true))) # def custom_loss(y_pred, y_true): # y_pred= tf.cast(y_pred, tf.float64) # y_true= tf.cast(y_true, tf.float64) # y_pred=tf.nn.relu(y_pred) # return tf.sqrt(tf.reduce_mean(tf.squared_difference(tf.log1p(y_pred), tf.log1p(y_true)))) model_encoder = Sequential() model_encoder.add(Conv2D(64, kernel_size=(4,4),strides=(1,1), input_shape=[598,257,2],use_bias=False)) model_encoder.add(LeakyReLU(alpha=0.1)) model_encoder.add(BatchNormalization()) model_encoder.add(Conv2D(64, kernel_size=(4,4),strides=(1,1), use_bias=False)) model_encoder.add(LeakyReLU(alpha=0.1)) model_encoder.add(BatchNormalization()) model_encoder.add(Conv2D(128, kernel_size=(4,4),strides=(1,1), use_bias=False)) model_encoder.add(LeakyReLU(alpha=0.1)) model_encoder.add(BatchNormalization()) model_encoder.add(MaxPool2D(pool_size=(2,1))) model_encoder.add(Conv2D(128, kernel_size=(4,4),strides=(1,1), use_bias=False)) model_encoder.add(LeakyReLU(alpha=0.1)) model_encoder.add(BatchNormalization()) model_encoder.add(MaxPool2D(pool_size=(2,1))) model_encoder.add(Conv2D(256, kernel_size=(4,4),strides=(1,1), use_bias=False)) model_encoder.add(LeakyReLU(alpha=0.1)) model_encoder.add(BatchNormalization()) model_encoder.add(MaxPool2D(pool_size=(2,1))) model_encoder.add(Conv2D(512, kernel_size=(4,4),strides=(1,1), use_bias=False)) model_encoder.add(LeakyReLU(alpha=0.1)) model_encoder.add(BatchNormalization()) model_encoder.add(Conv2D(512, kernel_size=(4,4),strides=(2,2), use_bias=False)) model_encoder.add(LeakyReLU(alpha=0.1)) model_encoder.add(BatchNormalization()) model_encoder.add(Conv2D(512, kernel_size=(4,4),strides=(2,2), use_bias=False)) model_encoder.add(AvgPool2D(pool_size=(15,1),strides = (1,1))) model_encoder.add(LeakyReLU(alpha=0.1)) model_encoder.add(BatchNormalization()) model_encoder.add(Flatten()) model_encoder.add(Dense(4096,activation='relu')) model_encoder.add(Dropout(0.2)) model_encoder.add(Dense(2048,activation='relu')) model_encoder.summary() model_encoder.compile(loss='mse',optimizer=Adam(lr=1e-3),metrics=['accuracy']) checkpoint_path = "./cp.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1) callbacks = [ tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5), tf.keras.callbacks.EarlyStopping(patience=7, monitor='val_loss'), tf.keras.callbacks.TensorBoard(log_dir='./logs'), cp_callback ] # model_encoder.fit(X_train, y_train, batch_size=2,validation_data=(X_val, y_val), epochs=10 ,verbose=1) model_encoder.fit(x_enc_train,y_train_encoder, batch_size=2,validation_data=(X_val, y_val), epochs=10 ,verbose=1) y_pred_encoder = model_encoder.predict(x_enc_train) with open(PATH_TO_MAIN + 'Pickles/y_pred_encoder_81.pkl','wb') as f: pickle.dump(y_pred_encoder,f) model_encoder.save(PATH_TO_MAIN + 'Models/Encoder1_81.h5')A Step by Step Guide to Transfer Learning with Pytorch: Achieving High Accuracy on the CIFAR10 Dataset Prerequisites* Python Programming at an intermediate level, especially good familiarity with Numpy and Pandas* Some software development experience would be helpful but not strictly required. If you are a student with reasonable programming background, you will do just fine.* Should have Anaconda for Python 3.x with Jupyter Notebook installed* Preferably a commodity Nvidia GPU (GTX 1050 Ti, or a flavor of 1060,1070 or 1080 would do just fine. Of course if you have anything better it would only improve things) either locally or rented in Cloud. Warning: If you work with CPU only, it may take significantly more time (in order of days) to achieve the same results* Previous knowledge of Pytorch or transfer learning is not required, but the reader should be familiar with basic concepts of Deep Learning and fundamentals of a Neural Network, as well as some basic terminology of Convolutional Neural Networks. Examples of such as fundamentals include Linear functions, Non-Linear transformations such as Rectified Linear Unit (ReLU) etc. back-propagation, gradients and gradient descent algorithm, loss-functions and how the weights of a Neural Network are updated. In terms of CNN, it would help to know how basic convolutions and pooling operations work.We will cover the basics of Pytorch Tensors and Tensor operations and introduce all concepts related to Transfer Learning and Pytorch as we move along with the tutorial. Introduction to PytorchPytorch is a relatively new Deep Learning framework from Facebook that is quickly gaining popularity in the research as well as the developer community. Its primary merits are flexibility and more control over the run-time behavior (sometimes called dynamic behavior) of a Neural Network. Another key advantage is its use of commonly used Python programming patterns and practices, unlike for example, Tensorflow which defines its own special kind of syntax and programming style on top of Python that makes it somewhat harder to learn for newcomers.Pytorch 1.0 version has just been released as beta, at the time of this writing which is a major upgrade in terms of model deployment in the real-world. It introduces a Just-In-Time (JIT) graph compiler through a mechanism called Torch Script that makes it more efficient to deploy a model for prediction. However, in this tutorial we shall be using version 0.4.1 which is the last one before this major upgrade. Introduction to Transfer LearningWhen we take a model created and trained elsewhere on a similar problem that we are trying to solve, and reuse its architecture and (possibly) its weights in our setting, we are applying Transfer Learning. It means that somebody trained a Neural Network model, on most likely a very large dataset, and put that pre-trained model in a model repository. We take that model and modify it a little bit to adapt it to our use case, thus transferring the learning achieved by that model previously to our application, without having to retrain it from scratch. This not only saves time but also transfers the ”knowledge” of the model to our case, which usually results in achieving very high accuracy. Essentially, we are building on other people’s work who make it available for the greater good. It’s a great step towards democratization of deep learning and Artificial Intelligence in general. Transfer learning is a highly effective technique used throughout the world by Deep Learning practitioners today. Transfer Learning is most effective when the use case is well-understood and the data is sort of ”fixed” so to speak, for example, image classification and object detection which are based on just pixels, or Natural Language Processing (NLP) text corpuses which are words out of a large vocabulary. It may not be that effective for structured or tabular data used in business settings e.g. data collected from databases and files because one company’s data may be quite different in structure and semantics from others. However, even that is changing now with recent trend in the use of categorical embeddings just like word embeddings used in NLP. Such embeddings allow us to transfer the learning achieved through data of one organization for a specific domain (e.g. predicting retail sales) to similar problems of others in the same domain. Our Problem DeifinitionIn this tutorial, we provide a step-by-step guide to applying Transfer Learning in Pytorch on an image classification problem. The problem is to automatically classify objects present in images into categories e.g. bird, plane, dog, cat etc. Image Classification Use CasesImage Classification is the basis and a core building block of several complex applications such as object detection, image captioning, face recognition and image segmentation to name a few. Features extracted from images during classification can be effectively used in several use cases and applications related to Computer Vision. The DatasetWe will be using Cifar10 dataset. It is a dataset consisting of 60000 images categorized into 10 classes. Each image is of size 28x28. The images being small and somewhat blurry (low resolution) makes it one of the more difficult data-sets for classification. Some of the available benchmarks for this dataset are given at: There was a Kaggle competition on Cifar10 in 2014, whose results are also available at ObjectivesAt the end of this tutorial, the readers should be able to:* Create an API (set of classes and utility functions) with Pytorch to preprocess and prepare any image dataset for training, evaluation and prediction* Construct and use an API to effectively apply Transfer Learning in Pytorch on an image dataset for classification* Acquire some tips and tricks to achieve very high accuracy on Cifar10 using three different, freely available pre-trained models by combining them effectively to achieve higher accuracy than the individual models. * Know how to create their own classes for Deep Learning tasks with Pytorch and use them as components in other applications State of the art resultsWhile preparing this tutorial, my accuracy (94.7%) ended up on third place on both, the benchmark site as well as Kaggle scoring (of course through late submission). This was achieved in less than 2 hours of training altogether (all three models combined) on a commodity Nvidia GTX 1070 GPU. I will show you some simple tips and tricks to increase accuracy of your models with transfer learning and also how to ensemble different models together to achieve even higher accuracy in most applications. Outline of the TutorialThis tutorial proceeds through the following steps: 1. Create Pytorch Dataset for Cifar10 2. Pre-process the Dataset and prepare it for training 3. Create a Base Class for building a basic Neural Network 4. Create a Fully Connected Class derived from the Base Class 5. Create a Transfer Learning Class derived from the Base Class 6. Train two different pretrained, transferred models on Cifar10 dataset 7. Evaluate and predict on test set with individual models and Ensemble 8. Predict on Kaggle's given much larger Test set Code CommentsCode blocks and snippets have been explained using multiline comments on top of each block where I thought was necessary to explain something. Please pay attention to the code comments in "red". Previously explained pieces of code have been replaced by Ellipses (...) in subsequent code blocks for brevity. Link to codeThe complete code is available on Git-Hub at:import torch ## for pytorch import torchvision ## for transfer learnhing models and many other vision related classes from torch import nn ## Core Neural Network Model classes in Pytorch from torch import optim ## Contains several Pytorch optimizer classes import torch.nn.functional as F ## Contains several utilily functions provided by Pytorch from torchvision import datasets, transforms, models ## Many Computer Vision related classes ## for datasets and transformations etc. from torch.utils.data import * ## Contains several utilily functions for dataset manipulation from PIL import Image import numpy as np ## The following impports contain classes and functions that we develop throughout this tutorial. They have ## been explained throughout this tutorial. from mylib.utils import * from mylib.model import * from mylib.cv_model import * from mylib.fc import * from mylib.chkpoint import * from mylib.cv_data import * ## The following two lines are for reloading any imported files if they are modified while ## our Jupyter Notebook is running %load_ext autoreload %autoreload 2Step 1: Create a Pytorch Dataset for CIFAR 10* We download the train and test datasets using CIFAR10 constructor available in datasets module in torchvision. * We pass train=True flag first to indicate that we want the training set. Then we pass it as False to download the test set* We pass download=True since it is the first time we are constructing this dataset. Therefore, it will download first from a prespecified URL within the CIFAR10 class. * After running this cell first time and successfully downloading the datasets, you should change it to False to avoid downaloading every time.* The result of the following operations would be two dataset objects representing the CIFAR10 training set and test sets respectively.train_dataset = datasets.CIFAR10('Cifar10', train=True, download=True) test_dataset = datasets.CIFAR10('Cifar10', train=False, download=True)This gives us two dataset objects which are of torchvision.datasets.cifar.CIFAR10 type. This is a sub-class of Pytorch' Dataset class which is the main class to generically represent any dataset. This particular class represents CIFAR10 data stored in its internal data structure. Later these objects shall be passed to a Pytorch Dataloader objects (explained later) for processing the images.We can veify the lengths (number of images) of both datasetslen(train_dataset),len(test_dataset)As you can see above, we have 50000 and 10000 images in training and test sets respectively. A quick refresher of TensorsTensors are just a way of representing n-dimensional data objects of a single type (integers or float etc) in a generic way. for example:* A single value (integer or float) is a 0-dimensional tensor* An array with N elements is a one-dimentional tensor* A matrix with M rows and N columns is a 2-dimensional tensor (MxN)* An MxN image with three RGB (Red, Green Blue) color channels represented by three matrices is a three dimenional tensor (3 x M x N)The image tensors are contained in the field train_data within the dataset object. Let's look at the shape of one of the tensors representing an imagetrain_dataset.train_data[0].shapeThis tells us that our images are of 32 x 32 in size with 3 color channels.Let's look at some of the images using matplotlib.plyplot module%matplotlib inline import matplotlib.pyplot as plt plt.imshow(train_dataset.train_data[100])This looks like a ship. As you can observe, the images are rather blurry and quite low resolution (32 x 32). Step 2: Pre-process the Dataset and prepare it for training* Understand the concept of Data loader and the Pytorch Data loader API* Split the images into train, validation and test sets* Create Pytorch Dataloaders to feed images while training, validation and prediction* Use Pytorch API to define Transforms for preprocessing the Dataset for more effective training* Use Pytorch API to convert all images to Pytorch Tensors* Normalize the dataset using mean and standard deviation of images Data LoadersPytorch Dataloaders are objects that act as Python generators. They supply data in chunks or batches while training and validation. We can instantiate Dataloader objects and pass our datasets to them. Dataloaders store the dataset objects internally. When the application asks for the next batch of data, a dataloader uses its stored dataset as a Python iterator to get the next element (row or image in our case) of data. Then it aggregates a batch worth of data and returns it to the application.Following is an example of calling the Dataloader constructor:num_train = len(train_dataset) indices = list(range(num_train)) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=50,sampler=SubsetRandomSampler(indices), num_workers=0) len(train_loader)* [4] Here we are creating a Dataloader object for our training dataset with a batch size of 50. * The sampler parameter specifies the strategy with which we want to sample data while constructing batches.* We have different samplers available in torch.utils.data.sampler. The explanation is straightforward. You can read about them in the Pytorch Documentation at https://pytorch.org/docs/stable/data.htmltorch.utils.data.Sampler.* The num_workers argument specifies how many processes (or cores) we want to use while loading our data. This provides parallelism while loading large datasets. Default is 0 which means load all data in main process.Dataloader reports its length in number of batches. Since we created this Dataloader with a batch size of 50 and we had 50000 images in our train dataset, we have the length of dataloader = 1000 batches Splitting DataNow let's write a function to split our datasets into train, validation and test sets and create their corresponding dataloaders''' This function takes the train and test data sets as arguments. Test Data can be None in which case it splits train data into three sets,train, test and validation. If test_data is not none it just splits train set into train and validation and creates a separate dataloader from test set ''' def split_image_data(train_data, test_data=None, batch_size=20, num_workers=0, valid_size=0.2, sampler=SubsetRandomSampler): num_train = len(train_data) ''' It creates a list of indices from the train set using Python range function on its length ''' indices = list(range(num_train)) np.random.shuffle(indices) ''' It splits the indices list according to the given validation set size (valid_size argument) whose default is 0.2 (20% of train data set aside for validation) ''' split = int(np.floor(valid_size * num_train)) train_idx, valid_idx = indices[split:], indices[:split] ''' It uses RandomSubsetSampler constructor to shuffle the train and validation set indices ''' train_sampler = sampler(train_idx) valid_sampler = sampler(valid_idx) ''' If a separate test set is given, it simply creates a Dataloader from that set. If no test set is given, it further splits the train indices (which were obtained by splitting the original train_set into train and validation indices earlier), into a set of train and test indices. Note that the test indices size is equal to the validation set. This results in a new set of indices and a sampler for test set from the train set. ''' if test_data is not None: test_loader = DataLoader(test_data, batch_size=batch_size, num_workers=num_workers) else: train_idx, test_idx = train_idx[split:],train_idx[:split] train_sampler = sampler(train_idx) test_sampler = sampler(test_idx) test_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=test_sampler, num_workers=num_workers) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers) return train_loader,valid_loader,test_loaderLet's call this function to obtain our Dataloaderstrainloader,validloader,testloader = split_image_data(train_dataset,test_dataset,batch_size=50) len(trainloader),len(testloader),len(validloader)And we have a nice split with 800 batches in our train set and 200 each in our validation and test sets respectively. Convolutional Neural Networks (CNN)Although, we assume that you have a basic understanding of CNNs, if you want to refresh the core concepts, folowing are some great tutorials:[Convolutional Neural Networks CS231n Stanford](http://cs231n.github.io/convolutional-networks/)[CNN Tutorial: AnalyticsVidhya](https://www.analyticsvidhya.com/blog/2018/12/guide-convolutional-neural-network-cnn/)[A Very Comprehensive Tutorial on ANN and CNN by Kaggle](https://www.kaggle.com/shivamb/a-very-comprehensive-tutorial-nn-cnn) Preprocessing and Transforming the DatasetBefore we move on to defining our Network and start training, we need to preprocess our datasets. Specifically, we need to perform the following steps:* Resize the images to an appropriate size for our models* Perform some basic and most common data augmentation* Convert the image dat to Pytorch Tensors * Normalize the image data Why do we want to Resize Images?Most of our Transfer Learning models require data to be of at least 224x224 size. The reason for this limitation is that these models are designed with a large number of Convolution and pooling layers, finally followed by a fully connected (Linear) layer at the end to generate the classification output. By the time the input image reaches the final layer, it has been reduced drastically in size due to the way convolutions and pooling are defined. If the input image was already too small (like 32x32 CIFAR10 images in our case), it would be too small for the network to produce any significant output. Therefore, these models sort of restrict us to input an image >=224x224.Please note that we wouldn't have needed resizing if our images were already > 224x224, like in case of ImageNet, or if we were to use our own CNN architecture which did not reduce the image size too much while passing it through layers. Resizing smaller images to larger ones (as in our case) creates artifacts that we don't (ideally) want our model to learn. Since our images are really small in case of CIFAR10 and the transfer learning models we are using have this requirement, we are obliged to resize.In case of datasets with larger images, our GPU or CPU memory constraints may become a factor. Therefore, we combine downsizing with increased batch sizes (till we hit the batch size limit) to optimize the model performance and balance the effects of down-sizing. Data AugmentationData Augmentation is a common technique with Deep learning where we modify images on the fly while training to make the neural network see additional images flipped or rotated at different axes and angles. This usually results in better training performance since the Network sees multiple views of the same image and has a better chance of identifying its class when minimizing the loss function. Note that the augmented images are not added to the dataset, they are just created while generating batches, so the actual images seen during training would increase but you won't see the number of images in the datasets increasing. The length and other functions that count the number of images would still give the same answer.We use two commonly used augmentations below: * RandomHorizontaFlip that flips some of the images around the vertical axis with a probability p that defaults to 0.5 meaning that 50% of the images shall be flipped* RadomRotation at a specific degree (10 in our case below) that rotates some of them randomly at an angle of 10 degree again with a probability of p which defaults to 0.5from torchvision import transforms train_transform = transforms.Compose([transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), ]) train_dataset = datasets.CIFAR10('Cifar10',download=False,transform=train_transform)Data NormalizationIn data normalization we statistically normalize the pixel values in our images. This mostly results in better training performance and faster convergence. A common way to perform normalization is to subract the mean of pixel values of the whole dataset from each pixel, and then divide by the standard deviation of the pixels of whole dataset.* The most common way in Transfer learning is to use the mean and std values of the dataset the original Transfer Learning model was trained on. However, it may be a good strategy for cases where we don't want to retrain any part of the original model. * If our dataset is large and we want to retrain whole or part of the original model, then we would be **better off normalizing with the mean and standard deviation of the dataset in question (CIFAR10 in our case)**. However, in most transfer learning tutorials you'll find, the mean and std values for ImageNet are used.Below, I give you two functions to calculate the mean and std of a dataset:First one, "calculate_img_stats_avg" is based on Dataloader and calculates means and stds of each batch of data as it is retrieved from the dataset object, and finally takes the average of the accumulated means and std values. Although, this gives us an approximation of the actual values, it is reasonable to use for large datasets that won't fit into memory at the same time. This code has been adapted from [Pytorch forum](https://discuss.pytorch.org/t/about-normalization-using-pre-trained-vgg16-networks/23560/6?u=ptrblck)The second function, "calculate_img_stats_full" calculates the actual mean and std of the whole dataset by working on it at once. This would give more accurate values, although, would most likely run out of memory for large datasets. For CIFAR10, this function requires 28GB of RAM. My machine has 32GB but it falls short and I am unable to run this function. This code has been adapted from the book "Deep Learning with Pytorch" by and , Manning Publications. You can try to run the second function on your specific dataset and if you run into memory issues, then fall-back to the first one for a good approximation. In case of CIFAR10 however, many people have calculated the mean and std of the dataset and the values are well known, like ImageNet. We are using those values in the code that follows. I did not try with the approximate values given by the first function but you are welcome to try with those.from torchvision import transforms transform = transforms.Compose([transforms.ToTensor()]) dataset = datasets.CIFAR10('Cifar10',download=False,transform=transform) loader = torch.utils.data.DataLoader(dataset, batch_size=50,num_workers=0)* We first create a dataset from full data and then a dataloader to feed the data in batches of size 50 to our loop.* Note that for Dataloader to work, the images have to be converted to a Tensor, so that is the only transform we are using.* The function below is straight forward implementation that calculates mean and std of each batch and add them to their cumulative sums, dividing in the end by the total number of batches to get the averagesdef calculate_img_stats_avg(loader): mean = 0. std = 0. nb_samples = 0. for imgs,_ in loader: batch_samples = imgs.size(0) imgs = imgs.view(batch_samples, imgs.size(1), -1) mean += imgs.mean(2).sum(0) std += imgs.std(2).sum(0) nb_samples += batch_samples mean /= nb_samples std /= nb_samples return mean,std calculate_img_stats_avg(loader) def calculate_img_stats_full(dataset): imgs_ = torch.stack([img for img,_ in dataset],dim=3) imgs_ = imgs_.view(3,-1) imgs_mean = imgs_.mean(dim=1) imgs_std = imgs_.std(dim=1) return imgs_mean,imgs_std calculate_img_stats_full(dataset)* torch.stack function above stacks the data along the given dimension (3 in our case). The view operation views the Tensor as a 3 x (product of all other dimensions) which basically flattens while keeping the first dimension to be 3* The best way to visualize what is going on in an obscure kind of function as this one is to copy isolate the statements and feed them some dummy tensors to see what's going on. I leave it for you as an exercise. Values below have been taken from the same book (referred above from which the code has been taken):cifar10_mean = [0.4915, 0.4823, 0.4468] cifar10_std = [0.2470, 0.2435, 0.2616]Now we can create our Datasets again from scratch with all the transformations, augmentations and normalization applied, splitting them into train and test and obtaining the final Dataloaders. **Note that we also define our batch size = 50**batch_size = 50 ''' ToTensor() converts a numpy array (all our images are constructed as numpy arrays by the Dataset class when read from disk). Normalize() is another transform that normalizes according to the passed values of Means and STD of each channel as separate lists or tuples. ''' train_transform = transforms.Compose([transforms.Resize((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(cifar10_mean, cifar10_std) ]) test_transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(cifar10_mean, cifar10_std) ]) train_data = datasets.CIFAR10('Cifar10', train=True, download=False, transform=train_transform) test_data = datasets.CIFAR10('Cifar10', train=False, download=False, transform=test_transform) trainloader,validloader,testloader = split_image_data(train_data,test_data,batch_size=batch_size) len(trainloader),len(testloader),len(validloader)Data Augmentation is (mostly) applied to Train Set onlyNote that we usually don't apply data augmentation to test set because we want the test data to remain as close to real data as possible, otherwise there's a chance that we may over-estimate performance. For example, our model may have mis-classified a test image but were correct for its flipped and rotated versions. This would increase the overall accuracy which would be misleading.Having said that, there is a technique called [Test Time Augmentation (TTA)](https://towardsdatascience.com/test-time-augmentation-tta-and-how-to-perform-it-with-keras-4ac19b67fb4d) where we augment test data and average out the predictions after showing the trained model all the (augmented) variations of an image with the original one while testing. This may result in better accuracy sometimes. We are not going to use it in this tutorial but you can find out more in [this tutorial](https://www.kaggle.com/andrewkh/test-time-augmentation-tta-worth-it) Step 3: Create a Base Class for building a basic Neural Network Now that we have our Dataloaders all prepared, we are ready to define our Neural Network and train it. In order to define a Neural Network, the best way is to define classes that isolate and abstract out functionality common to all types of Networks like training loops, validation, evaluation, prediction, setting different hyper-parameters etc. We also need to define classes that implement specific type of Networks e.g. specialized for Transfer Learning, or tailor-made for Fully Connected operation etc. Keeping this in mind, we will create three main classes:* A Base Class representing a Neural Network derived from Pytorch's core nn.Module class which is the foundation of any Neural Network in Pytorch* A class derived from our base class that implements functionality specific to Transfer Learning* A class derived from our base class that implements functionality specific to Fully Connected Networks **Let's build our base class called Network step by step**''' We inherit our class from nn.Module which is the core Pytorch class for creating Neural Networks ''' class Network(nn.Module): def __init__(self,device=None): ''' We call the parent's constructor as we do in any Python class derived from a parent class ''' super().__init__() ''' We set the device attribute to 'cuda' if it is available otherwise we set it to 'cpu'. This will help us avoid putting if else checks everywhere in the code regarding CUDA availability. We can just move the tensors to whatever device is set on our object. ''' if device is not None: self.device = device else: self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ''' We create a dummy forward method. Forward method is the core method in Pytorch that executes the Network graph and passes the inputs through the network transforming them and getting the output at the other end. In Pytorch, we write our own forward method that executes the modules defined in the __init__ method at run-time. Since we will be writing the forward methods in derived classes, it is empty in the base class. ''' def forward(self,x): pass* **Note that forward method is called by nn.Module's "__ call __" method. So the object of our calss can become a "callable" and when it is called, the forward method shall be automatically invoked**. Please refer to any good Python tutorial if you want to know more about callables. Train MethodNext we add the train method. For training any Neural Network, there are a few common tasks that need to be performed in each iteration of the training loop. The following outline of the training loop is the logic of the inner part of the loop that performs actual training in each epoch. This part of the code goes through each batch. It basically defines a **single epoch** (single pass through the whole dataset):* Get the next batch of data* Move the Tensors of the batch to the device (GPU or CPU)* Zero out the gradients of all weights * Call the forward function to send the inputs through the Network* Pass the outputs obtained to the criterion (loss function) to compare them against the labels (targets) and calculate the loss* Calculate the gradients* update all the weights according to the gradients and the learning rate* update the overall loss within this epoch.If you are familiar with the basics of Neural Network, you must have recognized these steps since they are common to all frameworks and Neural Network types. Following code in train_ method performs these steps. Although, the code is pretty self-explanatory, a quick summary of Pytorch specific functions follows the codeclass Network(nn.Module): ... ''' print_every indicates after how many batches we want to print the information about loss ''' def train_(self,trainloader,criterion,optimizer,print_every): ''' train method below (self.train()) is a built-in Pytorch method in the base class (nn.Module) that sets a flag on the model object indicating that training is in progress. This flag is used by several Pytorch modules that behave differently during training and validation/testing e.g. Dropout, batch normalization etc. ''' self.train() t0 = time.time() batches = 0 running_loss = 0 ''' inputs and labels are one batch of images and their corresponding labels from the trainloader ''' for inputs, labels in trainloader: batches += 1 #t1 = time.time() inputs, labels = inputs.to(self.device), labels.to(self.device) optimizer.zero_grad() outputs = self.forward(inputs) ''' Criterion is basically the loss function that calculates the difference between the output of the network and the actual labels. ''' loss = criterion(outputs, labels) ''' loss.backward() performs backpropagation, calculating the gradients througout the network following the complete graph of connected Tensors ''' loss.backward() ''' Optimizer.step performs one step of the optimizer algorithm after the loss function has executed and the new gradients are available. ''' optimizer.step() ''' item() method gives a scalar value. It is used for Tensors that return a single value (loss is a floating point numerical value in this case) ''' loss = loss.item() #print('training this batch took {:.3f} seconds'.format(time.time() - t1)) ''' We keep a total running loss for this epoch ''' running_loss += loss ''' Print the loss information if the number of batches has reached print_every since the last print ''' if batches % print_every == 0: print(f"{time.asctime()}.." f"Time Elapsed = {time.time()-t0:.3f}.." f"Batch {batches+1}/{len(trainloader)}.. " f"Average Training loss: {running_loss/(batches):.3f}.. " f"Batch Training loss: {loss:.3f}.. " ) t0 = time.time() ''' At the end we return the average loss of this epoch ''' return running_loss/len(trainloader)Loss FunctionsNote that Pytorch comes with many built-in loss functions for common cases like classification and regression etc. Here we are passing the loss function to train_ as an argument. Some common loss functions used in classification are CrossEntopy loss, Negative Likehood Log Loss (NLLLoss) and Binary-CrossEntropy). We will discuss more about loss function when we discuss the Fully Connected Class later in this tutorial. Optimizer ModuleOptimizer module applies gradient descent or its variant and performs weight updates with gradients and learning rates. Optimizers come in several flavors with different algorithms and are found in torch.optim module. Examples include Stocahstic Gradient Descent (SGD), Adam, AdaDelta etc. Validate MethodThe task of the validate method is to apply the model to the validation set for evaluation. The purpose is to periodically assess how we are doing in terms of training. If you are familiar with Machine Learning concepts, you most likely know about bias (underfitting) and variance (overfitting). If our loss on validation set is **significantly and consistently** higher than the loss on training set, we are overfitting. This basically means our model won't generlize good enough on any other dataset because we are too tightly bound to the training set.* The idea here is to evaluate the model on validation set after every few epochs (a good default is after every epoch), measure the loss and print it out to see if we are overfitting.* The difference between validate method and train is that in validation we don't need to back-propagate, calculate the gradients, apply gradient descend and update the weights. All we need is to pass the validation data set batch by batch through our model and evaluate the loss using the loss function. * As our model gets better after some epochs, we should see our validation loss going down.* One additional thing we also want to do in validation is to calculate the accuracy of our classification. This is simply the percentage of how many times we are correct in our prediction: 100 x (number of correctly pedicted classes/dataset size)* However, it would be better if we also calculate class-wise accuracy i.e. for each individual class we calculate how many of that class we got right verses the total number of images we have of that class.* So we also write a utility function to calculate class-wise accuracies as shown below. This may come handy when we do predictions on our test set or any other set of imagesfrom collections import defaultdict def update_classwise_accuracies(preds,labels,class_correct,class_totals): correct = np.squeeze(preds.eq(labels.data.view_as(preds))) ''' We simply go through the batch (shape[0] is the batch size) and update the classwise correct and total counts ''' for i in range(labels.shape[0]): label = labels.data[i].item() class_correct[label] += correct[i].item() class_totals[label] += 1 class Network(nn.Module): ... def validate_(self,validloader): running_loss = 0. accuracy = 0 ''' We create two Python Default dictionaries to store classwise correct predictions and total images per class ''' class_correct = defaultdict(int) class_totals = defaultdict(int) ''' self.eval() is a Pytorch method to put the model into evaluation mode. It tells Pytorch we only want to perform forward pass through the network and no backpropagation. It is opposite to train method we had in our training loop ''' self.eval() ''' Whatever we put in torch.no_grad() block tells Pytorch not to compute gradients. We want to make sure that gradients are never calculated within the evaluation loop. ''' with torch.no_grad(): for inputs, labels in validloader: inputs, labels = inputs.to(self.device), labels.to(self.device) outputs = self.forward(inputs) loss = self.criterion(outputs, labels) running_loss += loss.item() _, preds = torch.max(torch.exp(outputs), 1) # you can safely remove the call to #torch.exp(as described below) update_classwise_accuracies(preds,labels,class_correct,class_totals) ''' We calculate the accuracy by the simple formula we discussed earlier. ''' accuracy = (100*np.sum(list(class_correct.values()))/np.sum(list(class_totals.values()))) ''' We put the model back to train mode ''' self.train() ''' Running loss is total loss of all the batches. Dividing it by length of the trainloader (number of batches) gives us average loss for the whole validation set ''' return (running_loss/len(validloader),accuracy)* **np.squeeze(preds.eq(labels.data.view_as(preds)))** * This seems like a pretty obscure statement so let's break it down: * The actual labels are contained in our dataloader's data attribute. * Predictions are the output of our network * view_as method reorganizes a tensor according to the dimensions of the tensor passed as the argument. In our case this statement will align the labels in the batch with the predictions tensor i.e. batch_size x 10 since there are 10 classes of our network and our final Fully Connected layer would emit these many outputs for each batch. * The eq method compares each row of a tensor and emits a 1 (True) where the rows are equal and 0 otherwise. * The final result would be a 50 x 1 Tensor which we flatten by squeezing out extra batch dimension to make it a 50-dimensional vector (1-dimensional tensor) containing either 1s (where predictions are equal to labels or 0s where they are unequal. * **_, preds = torch.max(torch.exp(outputs), 1)** * We will use Log of Softmax alog with Negative Log Likelihood Loss (NLLLoss) in our Fully Connected model (more on this later). Therefore, our outputs are expected to be log of probability values (also called Logits) . We don't strictly need to exponentiate the logits here as the max of the logits would still give us the same class index. We are doing it here just to make our predictions look like probabilities which sometimes helps in debugging. You are free to remove torch.exp call in the code if you want. torch.max returns a tuple containing the maximum value and the index of the maximum value within the tensor. Since the index in our case represents the classified category itself, so we only take that ignoring the actual probability. Evaluate MethodThe purpose of evaluate method is to assess the performance of our model after training has completed on a test dataset. The assumption is that we have labels available for the dataset we want to pass to this method. The code is almost the same as validate. The only difference is that we don't have to calculate loss in this case since we are done with the training.Since this method returns the overall accuracy as well as class-wise accuracies, we need another utility function get_accuracies. We also need class_names to get the actual names of the classes (if available). We will store the class names as a dictionary mapping ids (numbers) to class name strings when we create our Transfer Learning Model (later in this tutorial).from collections import defaultdict def update_classwise_accuracies(preds,labels,class_correct,class_totals): correct = np.squeeze(preds.eq(labels.data.view_as(preds))) for i in range(labels.shape[0]): label = labels.data[i].item() class_correct[label] += correct[i].item() class_totals[label] += 1 def get_accuracies(class_names,class_correct,class_totals): accuracy = (100*np.sum(list(class_correct.values()))/np.sum(list(class_totals.values()))) ''' We get the class name and the accuracy of this class by dividing the correct predictions of this calss by the total number of images of this class we have in the test dataset. We put an extra condition that we have at least one image of a class to avoid dividing by 0 ''' class_accuracies = [(class_names[i],100.0*(class_correct[i]/class_totals[i])) for i in class_names.keys() if class_totals[i] > 0] return accuracy,class_accuracies class Network(nn.Module): ... def evaluate(self,testloader): self.eval() self.model.to(self.device) class_correct = defaultdict(int) class_totals = defaultdict(int) with torch.no_grad(): for inputs, labels in testloader: inputs, labels = inputs.to(self.device), labels.to(self.device) outputs = self.forward(inputs) ps = torch.exp(outputs) _, preds = torch.max(ps, 1) update_classwise_accuracies(preds,labels,class_correct,class_totals) self.train() return get_accuracies(self.class_names,class_correct,class_totals)Predict MethodThe predict method is used to predict or draw inference from our trained model to determine the class of images for which we do not have labels. This is the method that would be called when the model is deployed in real life.* It is very similar to evaluate except that there are no labels* Another difference is that we are also interested in probabilities as well as predicted classes.* We may also want to know the predicted probabilities of more than one classes e.g. top 3 most likely classes predicted along with their indices.class Network(nn.Module): ... ''' Since we need probabilities and (possibly) multiple ranked classes, we pass the topk argument that tells our function how many ranked classes with their probabilities we have to return. ''' def predict(self,inputs,topk=1): self.eval() self.model.to(self.device) with torch.no_grad(): inputs = inputs.to(self.device) outputs = self.forward(inputs) ps = torch.exp(outputs) p,top = ps.topk(topk, dim=1) return p,top* The topk method of a tensor in Pytorch returns k indices and their values from a tensor along a dimension (dim=1 means along each row i.e. horizontally. Since our Tensor is 50 x number of classes, this would return the topk classes and their probabilities in each row). Fit MethodThis is the main method that is called by the user of our class to kick off training. It implements the main training loop that implements the epoch loop. It calls train_ method, calls validation periodically to monitor performance and overfitting etc., keeps track of best accuracy achieved so far, saves the best accuracy model, saves full model alongwith its hyper-parameters and other variables to disk as a checkpoint.Checkpoints can be restored and training continued later if power is lost or training is disrupted due to some reason.Let's build this method step by step below:class Network(nn.Module): ... def fit(self,trainloader,validloader,epochs=2,print_every=10,validate_every=1): for epoch in range(epochs): ''' We move the model to device ('gpu' or 'cpu') ''' self.model.to(self.device) print('epoch {:3d}/{}'.format(epoch+1,epochs)) epoch_train_loss = self.train_(trainloader,self.criterion, self.optimizer,print_every) ''' We check if we need to call validate after every validate_every epochs, call it and print out the validation loss and accuracy. ''' if validate_every and (epoch % validate_every == 0): t2 = time.time() epoch_validation_loss,epoch_accuracy = self.validate_(validloader) time_elapsed = time.time() - t2 print(f"{time.asctime()}--Validation time {time_elapsed:.3f} seconds.." f"Epoch {epoch+1}/{epochs}.. " f"Epoch Training loss: {epoch_train_loss:.3f}.. " f"Epoch validation loss: {epoch_validation_loss:.3f}.. " f"validation accuracy: {epoch_accuracy:.3f}") self.train()Saving the best accuracy modelThe fit function should also monitor the best accuracy achieved so far across all epochs and save the best accuracy model as soon as it gets a new one better than the previous best. This ensure that even without checkpoints, we should be able to retrieve our best model if our validation loss starts to go down while training. This is a common scenario as training may take hours to complete and we may have to leave the system un-attended. This way we could ensure that we always re-load the best accuracy model's weights and use them for inference.from collections import defaultdict import math class Network(nn.Module): def __init__(self,device=None): ... ''' we initialize the best_accuracy to 0. when we create the model instance ''' self.best_accuracy = 0. ... def fit(self,trainloader,validloader,epochs=2,print_every=10,validate_every=1): for epoch in range(epochs): self.model.to(self.device) print('epoch {:3d}/{}'.format(epoch+1,epochs)) epoch_train_loss = self.train_(trainloader,self.criterion, self.optimizer,print_every) if validate_every and (epoch % validate_every == 0): t2 = time.time() epoch_validation_loss,epoch_accuracy = self.validate_(validloader) time_elapsed = time.time() - t2 print(f"{time.asctime()}--Validation time {time_elapsed:.3f} seconds.." f"Epoch {epoch+1}/{epochs}.. " f"Epoch Training loss: {epoch_train_loss:.3f}.. " f"Epoch validation loss: {epoch_validation_loss:.3f}.. " f"validation accuracy: {epoch_accuracy:.3f}") ''' We check and save the new best accuracy in the model if validate returned a better accuracy. ''' if self.best_accuracy == 0. or (epoch_accuracy > self.best_accuracy): print('updating best accuracy: previous best = {:.3f} new best = {:.3f}'.format(self.best_accuracy, epoch_accuracy)) self.best_accuracy = epoch_accuracy ''' Pytorch save method saves any Pytorch Tensor Data structure by serializing it with Python's Pickle Module. Here we are storing the model's state dictionary returned by state_dict() method that contains all the weights of the model's full graph (each tensor in the architecture). ''' torch.save(self.state_dict(),self.best_accuracy_file) self.train() # just in case we forgot to put the model back to train mode in validate print('loading best accuracy model') ''' We restore the best accuracy model when we are done with the training loop. This ensures that any evaluation or inference we perform while the model remains in memory shall be done using the best accuracy model instead of the one obtained in the last iteration of the training loop. ''' self.load_state_dict(torch.load(self.best_accuracy_file))* Note that the self.best_accuracy_file shall be the filename set during initialization of the model parameters (please see next) Setting and getting different parameters and hyper-parametersWe need to set different parameters and hyper-parameters of a model. These include loss function (criterion), optimizer, dropout probability, learning rate and some others.We write four methods:* set_criterion to create an instance of the loss function and set it on the model* set_optimizer to create an instance of the optimizer and set it on the model* set_model_params that calls the above two functions and sets additional hyper-parameters on the model object* get_model_params that retrieves the currently set parameters on a model. This will come handy when we want to save a full model checkpoint.class Network(nn.Module): ... def set_criterion(self,criterion_name): if criterion_name.lower() == 'nllloss': self.criterion_name = 'NLLLoss' self.criterion = nn.NLLLoss() elif criterion_name.lower() == 'crossentropyloss': self.criterion_name = 'CrossEntropyLoss' self.criterion = nn.CrossEntropyLoss() def set_optimizer(self,params,optimizer_name='adam',lr=0.003): from torch import optim if optimizer_name.lower() == 'adam': print('setting optim Adam') self.optimizer = optim.Adam(params,lr=lr) self.optimizer_name = optimizer_name elif optimizer.lower() == 'sgd': print('setting optim SGD') self.optimizer = optim.SGD(params,lr=lr) elif optimizer.lower() == 'adadelta': print('setting optim Ada Delta') self.optimizer = optim.Adadelta(params) def set_model_params(self, criterion_name, optimizer_name, lr, # learning rate dropout_p, model_name, best_accuracy, best_accuracy_file, class_names): self.set_criterion(criterion_name) self.set_optimizer(self.parameters(),optimizer_name,lr=lr) self.lr = lr self.dropout_p = dropout_p self.model_name = model_name self.best_accuracy = best_accuracy self.best_accuracy_file = best_accuracy_file self.class_names = class_names def get_model_params(self): params = {} params['device'] = self.device params['model_name'] = self.model_name params['optimizer_name'] = self.optimizer_name params['criterion_name'] = self.criterion_name params['lr'] = self.lr params['dropout_p'] = self.dropout_p params['best_accuracy'] = self.best_accuracy params['best_accuracy_file'] = self.best_accuracy_file params['class_names'] = self.class_names return params* set_criterion supports two loss functions: CrossEntropy and NLLLoss. However, support for other loss functions can be trivially added by adding more if else statements.* It is passed the name of the loss function and it instantiates an object using Pytorch API.* set_optimizer similarly enables the optimizer by instantiating it using Pytorch API. It supports 'Adam' as default while SGD and Adadelta can be set. Again support for other optimizers can be easily added.* set_model_params is a higher level method that calls set_criterion and set_optimizer as well as other parameters like model_name, current value of best accuracy, best_accuracy_file where we store the best accuracy, model weights, learning-rate and dropout probability. * We have omitted sanity checking for correctness of the types of parameters for brevity (e.g. model_name, optimizer_name should be strings and dropout_p, lr should be a float etc.).* The set_model_param method shall be called from the main model classes e.g. Transfer Learning and Fully Connected models whose classes we shall next derive from this base Network class* get_model_param simply returns the current parameters as a dicitonary. It will be used in creating the checkpoint (see next).* class_names is a dictionary that contains a mapping of class identifiers (integers) to class names (strings) if such a mapping is available Saving a Model Checkpoint* Saving a checkpoint of a model is an important task when training Deep Learning models. * This way we can comfortably execute long-running training loops* If there is any disruption e.g. the machine crashes, power fails, Jupyter Notebpook crashes or any other unforeseen issue happens and our training is interrupted, we can restore from the last checkpoint and continue training. Our (potentially) hours of training shall not be lost.* Now we will implement a method, save_checkpoint. * Later in this tutorial we will implement a utility function load_checkpoint when we have the derived classes from this base class for Fully Connected and Transfer Learning Models and we kow which type of model we need to instantiate (we will add that information to the store_chkpoint at that time)class Network(nn.Module): ... ''' We add the chkpoint_file argument to the set_params function ''' def set_model_params(self, criterion_name, optimizer_name, lr, # learning rate dropout_p, model_name, best_accuracy, best_accuracy_file, chkpoint_file): self.criterion_name = criterion_name self.set_criterion(criterion_name) self.optimizer_name = optimizer_name self.set_optimizer(self.parameters(),optimizer_name,lr=lr) self.lr = lr self.dropout_p = dropout_p self.model_name = model_name self.best_accuracy = best_accuracy print('set_model_params: best accuracy = {:.3f}'.format(self.best_accuracy)) self.best_accuracy_file = best_accuracy_file self.chkpoint_file = chkpoint_file def get_model_params(self): params = {} params['device'] = self.device params['model_name'] = self.model_name params['optimizer_name'] = self.optimizer_name params['criterion_name'] = self.criterion_name params['lr'] = self.lr params['dropout_p'] = self.dropout_p params['best_accuracy'] = self.best_accuracy print('get_model_params: best accuracy = {:.3f}'.format(self.best_accuracy)) params['best_accuracy_file'] = self.best_accuracy_file params['chkpoint_file'] = self.chkpoint_file print('get_model_params: chkpoint file = {}'.format(self.chkpoint_file)) return params def save_chkpoint(self): saved_model = {} ''' We retrieve all params via get_model_params and also the class names and just dump them in the chkpoint file. ''' saved_model['params'] = self.get_model_params() torch.save(saved_model,self.chkpoint_file) print('checkpoint created successfully in {}'.format(self.chkpoint_file))Step 4: Create a Fully Connected Class derived from the Base Class Now we are ready to create our first derived class for Fully Connected Neural Networks. Fully Connected Networks are traditionally called Multi-layer Perceptrons (MLP) in the literature. In most Deep Learning frameworks (including Pytorch) they are simply called Linear layers.* In order to have a functional class for a Fully Connected Network, we will rely on Pytorch's nn.Linear Module. * nn.Linear module is itself derived from nn.Module from which we derived our own Network class.* A Fully Connected Network consists of three basic pieces: * inputs * Fully Connected hidden layers with each one followed by a non-linear transformation (let's consider the non-linearity as part of the hidden layer instead of treating it as a separate layer) * An output layer and the number of outputs Fully Connected Network RequirementsWe need to meet the following requirements to create such a class: * Ability to specify as many hidden layers as desired * Ability to specify the number of inputs and outputs of the model * Ability to define drop out and non-linearity ('relu', tanh etc.) for each layer * Ability to define the output layer and prepare it for the classification task * Set different parameters and hyper-parameters of the model like optimizer, loss function etc. Given these requirements, let's define a class for Fully Connected Model''' Our new class FC is derived (inherited) from our base class Network and not nn.Module. So it gets all the methods available in base class for free ''' class FC(Network): ''' We pass the model parameters to init. We have already seen most of the parameters. The additional ones are the num_inputs, num_outputs, non_linearity which is defaulted to 'relu' and a list of hidden layer dimensions. ''' def __init__(self,num_inputs, num_outputs, layers=[], lr=0.003, class_names=None, optimizer_name='Adam', dropout_p=0.2, non_linearity='relu', criterion_name='NLLLoss', model_type='classifier', best_accuracy=0., best_accuracy_file ='best_accuracy.pth', chkpoint_file ='chkpoint_file.pth', device=None): super().__init__(device=device) self.set_model_params(criterion_name, optimizer_name, lr, dropout_p, 'FC', best_accuracy, best_accuracy_file, chkpoint_file )* **num_inputs** is the total number of input features this Network is going to accept.* **num_outputs** is the total number of outputs this Network is going to emit after passing throuhg any hidden layers. In other words this is the dimension of the output layer.* **Non-linearity** is stored in the model as an attribute. Note that we do not pass the non-linearity to set_model_params as this is model specific and does not belong to the base class. * We may have to implement our versions of set_model_params and get_model_params methods later if we want to set and get additional parameters specific to the model. This is like implementing our own **"__ init __"** and then calling the parent's too. We do additional work in our code and then call the parent to do the common work.* **layers** is a list specifying the number of units in each hidden layer. The order of numbers in this list would also specify their order in the model. Defining the Network using nn.Sequential* **nn.Sequential** is a Pytorch method to create a simple sequential Neural Network that just concatenates the defined modules together as a sequence.* At the time of execution, nn.Sequential automatically calls the forward methods of each module in the sequence.Here we define an empty nn.Sequential first and then add the input module, hidden layers and output module to itclass FC(Network): def __init__(self,num_inputs, num_outputs, layers=[], lr=0.003, class_names=None, optimizer_name='Adam', dropout_p=0.2, non_linearity='relu', criterion_name='NLLLoss', model_type='classifier', best_accuracy=0., best_accuracy_file ='best_accuracy.pth', chkpoint_file ='chkpoint_file.pth', device=None): super().__init__(device=device) self.set_model_params(criterion_name, optimizer_name, lr, dropout_p, 'FC', best_accuracy, best_accuracy_file, chkpoint_file ) self.non_linearity = non_linearity ''' We store the actual Network as a Sequential block in model attribute of our FC object ''' self.model = nn.Sequential() ''' We create groups of layers and add them to the Sequential model. Each group consists of a linear layer followed by a non-linearity and dropout with probability passed as an argument. inplace=True)) ''' if len(layers) > 0: self.model.add_module('fc1',nn.Linear(num_inputs,layers[0])) self.model.add_module('relu1',nn.ReLU()) self.model.add_module('dropout1',nn.Dropout(p=dropout_p,inplace=True)) for i in range(1,len(layers)): self.model.add_module('fc'+str(i+1),nn.Linear(layers[i-1],layers[i])) self.model.add_module('relu'+str(i+1),nn.ReLU()) self.model.add_module('dropout'+str(i+1),nn.Dropout(p=dropout_p, self.model.add_module('out',nn.Linear(layers[-1],num_outputs)) else: ''' If we don't have any hidden layer we just add one layer to our sequential model with number of inputs and number of outputs. In this case we don't add any non-linearity or dropout since non-Linearity is typically added in hidden layers. ''' self.model.add_module('out',nn.Linear(num_inputs,num_outputs))* **nn.Linear** is a Pytorch class that takes the number of inputs and the number of outputs and creates a Linear model with internal forward function.* Note that we are naming our output layer as **'out'** and our hidden layers as **'fcX'** where X is the layer number (1, 2 ..) Loss functions for Classification* We can broadly divide Linear Networks into two type: Regression and Classification.* Although there are many loss functions used for classification, two most common ones which can be generlized easily from 2 to any number of classes are: * Negative Likehood Log Loss or NLLLoss * CrossEntropy Loss NLLLoss* The NLLLoss function is very simple. It assumes that its input is a probability. It just takes the -ve of the log of its input for each input and adds them up. (more about it here: https://ljvmiranda921.github.io/notebook/2017/08/13/softmax-and-the-negative-log-likelihood/ )* We need to convert the outputs to probabilities before feeding to the NLLLoss.* The simplest way to do that is to take the Softmax of the inputs by taking the exponent of each input and dividing by the sum of the exponents (more info on the same link above). After this operation the outputs can be interpreted as probabilities (because they have been scaled or calibrated between 0 and 1), which are then fed to the NLLLoss, which outputs sum(-log(p)) where p is the output of each probability.* However, in Pytorch the NLLLoss function expects that the log has already been calculated and it just puts a -ve sign and sums up the inputs. Therefore, we need to take the log ourselves after Softmax. There is a convenient function in Pytorch called LogSoftmax that does exactly that. So we will use it if our loss function is specified to be 'NLLLoss' by adding that after our output layer in the Sequential. Cross_entropy Loss* If we were using Cross-Entropy loss we do nothing as the CrossEntropyLoss function will do what's required.class FC(Network): def __init__(self,num_inputs, num_outputs, layers=[], lr=0.003, class_names=None, optimizer_name='Adam', dropout_p=0.2, non_linearity='relu', criterion_name='NLLLoss', model_type='classifier', best_accuracy=0., best_accuracy_file ='best_accuracy.pth', chkpoint_file ='chkpoint_file.pth', device=None): super().__init__(device=device) self.set_model_params(criterion_name, optimizer_name, lr, dropout_p, 'FC', best_accuracy, best_accuracy_file, chkpoint_file ) self.non_linearity = non_linearity self.model = nn.Sequential() if len(layers) > 0: self.model.add_module('fc1',nn.Linear(num_inputs,layers[0])) self.model.add_module('relu1',nn.ReLU()) self.model.add_module('dropout1',nn.Dropout(p=dropout_p,inplace=True)) for i in range(1,len(layers)): self.model.add_module('fc'+str(i+1),nn.Linear(layers[i-1],layers[i])) self.model.add_module('relu'+str(i+1),nn.ReLU()) self.model.add_module('dropout'+str(i+1),nn.Dropout(p=dropout_p, inplace=True)) self.model.add_module('out',nn.Linear(layers[-1],num_outputs)) else: self.model.add_module('out',nn.Linear(num_inputs,num_outputs)) ''' We use Logsoftmax if loss = NLLLoss ''' if model_type.lower() == 'classifier' and criterion_name.lower() == 'nllloss': self.model.add_module('logsoftmax',nn.LogSoftmax(dim=1)) ''' We save the attributes of the model in our object for possible later reference. ''' self.num_inputs = num_inputs self.num_outputs = num_outputs self.layer_dims = layers ''' We store the class names dictionary if passed, otherwise we create a simple dictionary with each class id converted to an string id e.g. 1 shall be converted to '1' as class name. ''' if class_names is not None: self.class_names = class_names else: self.class_names = {str(k):v for k,v in enumerate(list(range(num_outputs)))}Flattening the inputs * Before we could feed inputs to our FC Network, we need to flatten the input Tensor so that each row is just a one dimensional tensor and we have a batch of those rows. In other words the inputs have to be two dimensional (rows by columns) as most of you might be familiar with tabular data (from CSV files for example), used in Machine Learning. This is a requirement of the Linear Layer that it expects its data to be in batches of single dimensional tensors (vectors).* To achieve this we simply have to change the view of our input tensors (if they are already in two dimensional nothing will change in the view).* To do so we define a simple one-liner function as a utility. This makes the code much more readable as we immediately know that a flattening operation is going on instead of a rather cryptic .view statement.def flatten_tensor(x): return x.view(x.shape[0],-1) class FC(Network): def __init__(self,num_inputs, num_outputs, layers=[], lr=0.003, class_names=None, optimizer_name='Adam', dropout_p=0.2, non_linearity='relu', criterion_name='NLLLoss', model_type='classifier', best_accuracy=0., best_accuracy_file ='best_accuracy.pth', chkpoint_file ='chkpoint_file.pth', device=None): super().__init__(device=device) self.set_model_params(criterion_name, optimizer_name, lr, dropout_p, 'FC', best_accuracy, best_accuracy_file, chkpoint_file ) self.non_linearity = non_linearity self.model = nn.Sequential() if len(layers) > 0: self.model.add_module('fc1',nn.Linear(num_inputs,layers[0])) self.model.add_module('relu1',nn.ReLU()) self.model.add_module('dropout1',nn.Dropout(p=dropout_p,inplace=True)) for i in range(1,len(layers)): self.model.add_module('fc'+str(i+1),nn.Linear(layers[i-1],layers[i])) self.model.add_module('relu'+str(i+1),nn.ReLU()) self.model.add_module('dropout'+str(i+1),nn.Dropout(p=dropout_p, inplace=True)) self.model.add_module('out',nn.Linear(layers[-1],num_outputs)) else: self.model.add_module('out',nn.Linear(num_inputs,num_outputs)) if model_type.lower() == 'classifier' and criterion_name.lower() == 'nllloss': self.model.add_module('logsoftmax',nn.LogSoftmax(dim=1)) self.num_inputs = num_inputs self.num_outputs = num_outputs self.layer_dims = layers if class_names is not None: self.class_names = class_names else: self.class_names = {str(k):v for k,v in enumerate(list(range(num_outputs)))} ''' We define our forward function that basically calls the forward function of our model (nn.Sequential in this case) after flattening the inputs. ''' def forward(self,x): return self.model(flatten_tensor(x))Setting and Getting Dropout* We add two more convenience methods that give us the ability to change dropout probability any time we want.* This might come in handy when we want to experiment quickly with different dropout probability values or may be change dropout dynamically while training based on some condition e.g. on detecting heavy overfitting.class FC(Network): ... ''' Dropout layers in Pytorch are of type 'torch.nn.modules.dropout.Dropout'. This can be checked for and acted upon accordingly for each such layer in our sequential model. ''' def _get_dropout(self): for layer in self.model: if type(layer) == torch.nn.modules.dropout.Dropout: return layer.p def _set_dropout(self,p=0.2): for layer in self.model: if type(layer) == torch.nn.modules.dropout.Dropout: print('FC: setting dropout prob to {:.3f}'.format(p)) layer.p=p* Here we are checking each layer for this type of module and if true we act accordingly in set and get methods.* Note that the **torch.nn.modules.dropout.Dropout** has an attribute p where the dropout probability is stored There are four additional attributes of our FC model we need to save in order to restore it correctly.These are numb_inputs,num_outputs, layers and class_names. Since these are quite specific to FC model, we shuld write FC model's versions of get_model_param and set_model_param methods that internally call the base class ones and also perform any additional stuff.So let's do that and complete our class before writing our restore_model functionclass FC(Network): ... def set_model_params(self, criterion_name, optimizer_name, lr, dropout_p, model_name, model_type, best_accuracy, best_accuracy_file, chkpoint_file, num_inputs, num_outputs, layers, class_names): ''' We call the parent class's set_model_params method passing it all its required arguments, and then add additional parameters to our object as attributes. ''' super(FC, self).set_model_params(criterion_name, optimizer_name, lr, dropout_p, model_name, best_accuracy, best_accuracy_file, chkpoint_file ) self.num_inputs = num_inputs self.num_outputs = num_outputs self.layer_dims = layers self.model_type = model_type if class_names is not None: self.class_names = class_names else: self.class_names = {k:str(v) for k,v in enumerate(list(range(num_outputs)))} def get_model_params(self): ''' We call the parent class's get_model_params method and retrieve the dictionary of params, then add our model specific attributes to the dictionary ''' params = super(FC, self).get_model_params() params['num_inputs'] = self.num_inputs params['num_outputs'] = self.num_outputs params['layers'] = self.layer_dims params['model_type'] = self.model_type params['class_names'] = self.class_names params['device'] = self.device return paramsLoading a Saved Checkpoint* Now let's create a load_chkpoint utility function which is given a checkpoint file to retrieve the model parameters and reconstruct the appropriate model. Since we have only one model type right now **(FC)**, we will put a check for that model_type only and later add support for Transfer Learning and any other classes as we create them.* The code is pretty straight forward. It gets the params dictionary from the chkpoint_file and calls the appropriate constructor and finally loads the state dictionary of the best accuracy model from the filename of the best accuracy model retrieved from the chkpoint_filedef load_chkpoint(chkpoint_file): restored_data = torch.load(chkpoint_file) params = restored_data['params'] print('load_chkpoint: best accuracy = {:.3f}'.format(params['best_accuracy'])) if params['model_type'].lower() == 'classifier': net = FC( num_inputs=params['num_inputs'], num_outputs=params['num_outputs'], layers=params['layers'], device=params['device'], criterion_name = params['criterion_name'], optimizer_name = params['optimizer_name'], model_name = params['model_name'], lr = params['lr'], dropout_p = params['dropout_p'], best_accuracy = params['best_accuracy'], best_accuracy_file = params['best_accuracy_file'], chkpoint_file = params['chkpoint_file'], class_names = params['class_names'] ) net.load_state_dict(torch.load(params['best_accuracy_file'])) net.to(params['device']) return net**This completes our FC class**. Now we should test it before proceeding further. Let's test it on **MNIST** dataset. First we should calculate the MNIST dataset's **mean and std** values. They can be calculated without getting into any memory issues in a couple seconds with the function we created earlier for this purposetrain_data = datasets.MNIST(root='data',download=False, transform = transforms.transforms.ToTensor()) mean_,std_= calculate_img_stats(train_data) mean_,std_* We create the transforms as before using the calculated mean and std values, and then apply them to our train and test sets, and then split our train set into train and validation. Remember that our split_image_data function just converts the test set into a dataloader if it is given as an argument.train_transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize([0.0839, 0.2038, 0.1042],[0.2537, 0.3659, 0.2798]) ]) test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.0839, 0.2038, 0.1042],[0.2537, 0.3659, 0.2798]) ]) train_dataset = datasets.MNIST(root='data',download=False,train=True, transform = train_transform) test_dataset = datasets.MNIST(root='data',download=False,train=False,transform = test_transform) trainloader,validloader,testloader = split_image_data(train_dataset,test_dataset,batch_size=50) len(trainloader),len(validloader),len(testloader)* We create an FC layer with number of inputs = 784 which is obtained after flattening the image dimensions (1 x 28 x 28) and number of outputs = 10 since we have 10 classes (digits 0 to 9)* We arbitrarily select two hidden layer of 512 units each* We set the optimizer to Ada Delta (more on it next)* We set best accuracy and checkpoint files as appropriatenet = FC(num_inputs=784, num_outputs=10, layers=[512,512], optimizer_name='Adadelta', best_accuracy_file ='best_accuracy_mnist_fc_test.pth', chkpoint_file ='chkpoint_file_mnist_fc_test.pth')setting optim Ada DeltaOptimizer choices* Optimizer algorithms come in many variations and forms. Most of them try to optimize the basic gradient descent algorithm by varying the learning rate and other related parameters as they see the data.* A full survey of optimizers is beyond the scope of this tutorial. See here for a detailed overview (http://ruder.io/optimizing-gradient-descent). * The main difference between most frequently used ones (adapted from the link above) are as follows: * **Batch Gragient Descend** is simplest and performs weight updates after looking at the entire dataset * **SGD** (Stochastic Gradient Descent) is on the other extreme and performs weight updates for each item (training example) in the dataset * **Mini-batch GD** is a variant of SGD and takes the best of both worlds. It updates weights after each mini-batch of data. In other words, pure SGD is mini-batch with a batch-size of 1. Anything in between 1 and the entire dataset, we call it Mini-batch GD * **Momentum** is a method that helps accelerate SGD in the relevant direction and attempts to dampen too many oscillations when trying to converge to a minimum * **Adagrad** adapts the learning rate to the parameters, and applies different learning rates for updating different parameters, based on the past history of the squares of the magnitude of gradients of each prameter. The main advantage of Adagrad is that the user does not have to tune the learning rate manually * **Adadelta** is an extension of Adagrad that seeks to reduce its aggressive, monotonically decreasing learning rate. Instead of accumulating all past squared gradients, Adadelta restricts the window of accumulated past gradients to some fixed size "w" * **RMSprop** is an unpublished, adaptive learning rate method proposed by **** in Lecture 6e of his **Coursera** Class (http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf). RMSprop and Adadelta have both been developed independently around the same time stemming from the need to resolve Adagrad's radically diminishing learning rates * **Adaptive Moment Estimation (Adam)** is another method that computes adaptive learning rates for each parameter. In addition to storing an exponentially decaying average of past squared gradients like Adadelta and RMSprop, Adam also keeps an exponentially decaying average of past gradients, similar to momentumIn my experimentation, **Adadelta** gives the highest accuracy on image datasets in general, much better than Adam and SGD, especially on Cifar10, although I admit I haven't tried Adadelta and RMSProp much. you should try these with your experiments to see if they make any difference. Next we call our fit function passing it train and validation dataloaders and train for 5 epochs, printing every 300 batches each epoch while performing validaton every epoch (remember that default of validate_every = 1)net.fit(trainloader,validloader,epochs=5,print_every=300)We get the following output (partial output shown for brevity)**updating best accuracy: previous best = 95.883 new best = 95.992** This shows that we got reasonable accuracy **(95.99)** in only **5** epochs. May be we can squeeze some more juice out of it by training for a few more epochs. So let' first test our save and load chkpoint functions and then continue training for another 10 epochs.net.save_chkpoint()get_model_params: best accuracy = 95.992 get_model_params: chkpoint file = chkpoint_file_mnist_fc_test.pth checkpoint created successfully in chkpoint_file_mnist_fc_test.pthWe load the saved chkpoint into another variable to ensure that it is a new model.net2 = load_chkpoint('chkpoint_file_mnist_fc_test.pth') net2.fit(trainloader,validloader,epochs=10,print_every=300)**updating best accuracy: previous best = 96.392 new best = 96.875** The best accuracy we could achieve on validation set after another 10 epochs is 96.875. Let's save and restore the model one more time before testing our evaluate methodnet2.save_chkpoint() net3 = load_chkpoint('chkpoint_file_mnist_fc_test.pth') net3.evaluate(testloader)Let's also test our predict function next. To do that, we need to convert our testloader into a Python iterator, and then get the next batch from it using "next" method of the iterator. If you are not familiar with Python iterators, please see any good tutorial such as here (https://www.datacamp.com/community/tutorials/python-iterator-tutorial) for more informationiterator = iter(testloader) imgs_,labels_ = next(iterator) imgs_[0].shape,labels_[0].item()We can see above that first image of our first batch is 1x28x28 while its label = 7We can verify this by displaying the image using matplotlib's pyplot library after converting the image to numpy and removing the extra dimension to make it only 28 x 28 instead of 1 x 28 x 28Note that to convert a Pytorch Tensor to numpy array, simply use .numpy() method available on Pytorch Tensor objectsimport matplotlib.pyplot as plt %matplotlib inline fig = plt.figure(figsize=(40,10)) ax = fig.add_subplot(2,10, 1, xticks=[], yticks=[]) ax.imshow(np.squeeze(imgs_[0].numpy()), cmap='gray')Let's see if our model predicts the image correctnet3.predict(imgs_[0])[1].item()* Ok so our evaluate and predict methods seem to be working fine, and we are able to score around **97%** on test set with all individual accuracies in the mid 90s. * This is pretty good given that we have only trained for **15** epochs for less than **3 minutes** and are using a simple fully connected network without any fancy **CNN** stuff* Additionally, we have refactored our code into classes, utility functions and are also able to save and restore models as we require. Step 5: Create a Transfer Learning Class derived from the Base ClassNow we are ready to create our Transfer Learning class derived from our base **Network** class. Implementing **Transfer Learning** will turn out to be incredibly simple now with all the required machinery already in place. Transfer Learning class is based on torchvision.models module that contains support for downloading and using several pretrained Network architectures for Computer Vision. We are going to add support for three models: * Densenet121 which we simply call DenseNet * Resent34 and ResNet50 respectivelyWe have the option to use the pretrained versions of models (passing pretrained = True which is default anyway) where we obtain the architecture + weights, or just the architectures without weights and train them from scratch.* Most of the pretrained versions available in torchvision.models have been trained on ImageNet with 1000 output classes* We want to adapt the selected model to our use case. For example, for CIFAR10, we only need 10 classes, so our output should be set to 10 instead of 1000* Each model can be considered as composed of two parts: * Convolution Backbone (a CNN architecture with several blocks comprising of convolutions with varying number of filters, non-linearities, max or average pooling layers, batch normalizations, dropout layers etc.) * A head with fully connected classifier at the output end * In most cases, the output layer does not have any fully connected hidden layers * However, we have the option to replace the classifier layer with our own and add more hidden layers by replacing the output layer with our own. We may easily use our own FC class (defined earlier in this tutorial) for this purpose * On the other hand, we may choose to just change the number of outputs without adding any additional hidden layers* Henceforth, we are going to use our own FC class and replace the original model's output layer with an FC object. This would give us the flexibility to pass any additional hidden layer if we want.The code for our new class 'TransferNetworkImg' (derived from out base class 'Network') is quite simple. You just have to pay attention to two functions: * set_transfer_model which sets the Transfer model from torchvision.models * set_model_head which sets the FC layer on the model after removing the original classifier or fc layer. Setting the Classifier* Note that the classifier at the head of each model is named differently in each torchvision model. Although, there are better ways to handle it, such as using a predefined dictionary in a file and loading it and looking up the classifier field for each model type to get the output layer's name* However, in the following code, we are just using simple if else statements. You are welcome to create your own versions of this calss by creating such a dictionary if you want* Setting the FC model is done in set_model_head. Since we need to call the FC constructor, we need to pass anything that is required to create our FC class object successfully. We are doing that by passing a dictionary called 'head' to our Transfer Learning Class* To successfully create an FC model, we need to pass a fixed number of inputs to its constructor since it is a requirement of our nn.Linear layer used in FC Networks. Luckily, the nn.Linear class in Pytorch stores its number of inputs in an attribute called 'in_features'. We can grab that from the original classifier layer in the transferred model (Densenet, Resnet etc.) and pass it as argument to our FC constructor Freezing and un-freezing layers* When using Transfer Learning models, it is important to decide whether we want to retrain all the layers (including Convolutions and Fully Connected) from scratch on our dataset. For reasonably large datasets such as CIFAR10, it makes sense to retrain the whole network.* However, please note that retraining all layers does not mean we are going to start from random weights. We will still start with pretrained weights of each layer and continue from there, but we will be calculating the gradients for all layers and updating all weights. So in other words, the model starts learning while keeping the knowledge it gained for identifying images when it trained on the previous dataset (ImageNet in most cases).* So its like a child that we trained on a specific thing and we don't want it to throw away all knowledge and continue when looking at new data.* On the other hand, we may want to keep the weights frozen for the backbone while retraining on the head only. This is a common scenario when we have trained the network on new data for a while and now our backbone knows both ImageNet and our new dataset (CIFAR10 in our case).* In the last case, we may want to do only predictions and want all weights including the backbone and the head to remain frozen. This is only good for prediction and evaluation though and not for training since there is no point in training if we don't want to do back propagation and update nothing.We write a function to freeze weights while keeping the head unfrozen by default using the Pytorch Tensor's requires_grad flag. This flag is available in all Tensors and we want to set it True or False for weight Tensors (which could be obtained via parameters() method of any model derived from nn.Module) Adding support for Freeze and Unfreeze in our base classWe need to add support for Freeze and Unfreeze in our base classclass Network(nn.Module): ... ''' We have added two methods to freeze and unfreeze the parameters of all layers of our model ''' def freeze(self): for param in self.model.parameters(): param.requires_grad = False def unfreeze(self): for param in self.model.parameters(): param.requires_grad = TrueTransfer Learning Classfrom torchvision import models class TransferNetworkImg(Network): def __init__(self, model_name='DenseNet', lr=0.003, criterion_name ='NLLLoss', optimizer_name = 'Adam', dropout_p=0.2, pretrained=True, device=None, best_accuracy=0., best_accuracy_file ='best_accuracy.pth', chkpoint_file ='chkpoint_file', head={}): super().__init__(device=device) self.model_type = 'transfer' self.set_transfer_model(model_name,pretrained=pretrained) if head is not None: self.set_model_head(model_name = model_name, head = head, optimizer_name = optimizer_name, criterion_name = criterion_name, lr = lr, dropout_p = dropout_p, device = device ) self.set_model_params(criterion_name, optimizer_name, lr, dropout_p, model_name, best_accuracy, best_accuracy_file, chkpoint_file, head) ''' set_model_params calls the parent's method as before and sets additional attributes specific to this class (head and model_type set to 'transfer') ''' def set_model_params(self,criterion_name, optimizer_name, lr, dropout_p, model_name, best_accuracy, best_accuracy_file, chkpoint_file, head): print('Transfer: best accuracy = {:.3f}'.format(best_accuracy)) super(TransferNetworkImg, self).set_model_params( criterion_name, optimizer_name, lr, dropout_p, model_name, best_accuracy, best_accuracy_file, chkpoint_file ) ''' We also set the head end of our model to create FC layer when required ''' self.head = head ''' this time our model_type is transfer and not classifier ''' self.model_type = 'transfer' def forward(self,x): return self.model(x) def get_model_params(self): params = super(TransferNetworkImg, self).get_model_params() params['head'] = self.head params['model_type'] = self.model_type params['device'] = self.device return params ''' Freeze first freezes all paramters of our model by calling the base class's freeze() method which we have added to the Network class (see below) and then unfreezes the head's (classifier attribute) parameters based on the passed flag. Note that we are calling our head as classifier. We would have to add more code if we want to handle the case of regression as well in future. We have added two methods, freeze and unfreeze to our base class appropriately ''' def freeze(self,train_classifier=True): super(TransferNetworkImg, self).freeze() if train_classifier: for param in self.model.classifier.parameters(): param.requires_grad = True def set_transfer_model(self,mname,pretrained=True): self.model = None if mname.lower() == 'densenet': self.model = models.densenet121(pretrained=pretrained) elif mname.lower() == 'resnet34': self.model = models.resnet34(pretrained=pretrained) elif mname.lower() == 'resnet50': self.model = models.resnet50(pretrained=pretrained) if self.model is not None: print('set_transfer_model: self.Model set to {}'.format(mname)) else: print('set_transfer_model:Model {} not supported'.format(mname)) ''' set_model_head calls the FC constructors using the head dictionary. It grabs the in_features from the appropriate attribute of the original model's classifier or fc layer. We need to check if the model was saved and loaded from a checkpoint because in the later case, the model's head-end object shall have num_inputs attribute instead of the original in_features because it would contain our FC object and that has num_inputs in place of in_features. ''' def set_model_head(self, model_name = 'DenseNet', head = {'num_inputs':128, 'num_outputs':10, 'layers':[], 'class_names':{} }, optimizer_name = 'Adam', criterion_name = 'NLLLoss', lr = 0.003, dropout_p = 0.2, device = None): self.num_outputs = head['num_outputs'] if model_name.lower() == 'densenet': if hasattr(self.model,'classifier'): in_features = self.model.classifier.in_features else: in_features = self.model.classifier.num_inputs self.model.classifier = FC(num_inputs=in_features, num_outputs=head['num_outputs'], layers = head['layers'], class_names = head['class_names'], non_linearity = head['non_linearity'], model_type = head['model_type'], model_name = head['model_name'], dropout_p = dropout_p, optimizer_name = optimizer_name, lr = lr, criterion_name = criterion_name, device=device ) elif model_name.lower() == 'resnet50' or model_name.lower() == 'resnet34': if hasattr(self.model,'fc'): in_features = self.model.fc.in_features else: in_features = self.model.fc.num_inputs self.model.fc = FC(num_inputs=in_features, num_outputs=head['num_outputs'], layers = head['layers'], class_names = head['class_names'], non_linearity = head['non_linearity'], model_type = head['model_type'], model_name = head['model_name'], dropout_p = dropout_p, optimizer_name = optimizer_name, lr = lr, criterion_name = self.criterion_name, device=device ) self.head = head print('{}: setting head: inputs: {} hidden:{} outputs: {}'.format(model_name, in_features, head['layers'], head['num_outputs'])) def _get_dropout(self): if self.model_name.lower() == 'densenet': return self.model.classifier._get_dropout() elif self.model_name.lower() == 'resnet50' or self.model_name.lower() == 'resnet34': return self.model.fc._get_dropout() def _set_dropout(self,p=0.2): if self.model_name.lower() == 'densenet': if self.model.classifier is not None: print('DenseNet: setting head (FC) dropout prob to {:.3f}'.format(p)) self.model.classifier._set_dropout(p=p) elif self.model_name.lower() == 'resnet50' or self.model_name.lower() == 'resnet34': if self.model.fc is not None: print('ResNet: setting head (FC) dropout prob to {:.3f}'.format(p)) self.model.fc._set_dropout(p=p)Adding support for Transfer Learning Model to load_chkpoint utility* We need to add the case for our TransferNetworkImg case in load_chkpoint function.* The main addition is the storage and retrieval of head along with other params and also adding support for passing the retrieved head to the constructordef load_chkpoint(chkpoint_file): restored_data = torch.load(chkpoint_file) params = restored_data['params'] print('load_chkpoint: best accuracy = {:.3f}'.format(params['best_accuracy'])) if params['model_type'].lower() == 'classifier': net = FC( num_inputs=params['num_inputs'], num_outputs=params['num_outputs'], layers=params['layers'], device=params['device'], criterion_name = params['criterion_name'], optimizer_name = params['optimizer_name'], model_name = params['model_name'], lr = params['lr'], dropout_p = params['dropout_p'], best_accuracy = params['best_accuracy'], best_accuracy_file = params['best_accuracy_file'], chkpoint_file = params['chkpoint_file'], class_names = params['class_names'] ) elif params['model_type'].lower() == 'transfer': net = TransferNetworkImg(criterion_name = params['criterion_name'], optimizer_name = params['optimizer_name'], model_name = params['model_name'], lr = params['lr'], device=params['device'], dropout_p = params['dropout_p'], best_accuracy = params['best_accuracy'], best_accuracy_file = params['best_accuracy_file'], chkpoint_file = params['chkpoint_file'], head = params['head'] ) net.load_state_dict(torch.load(params['best_accuracy_file'])) net.to(params['device']) return netStep 6: Train two different pretrained, transferred models on Cifar10 dataset Now we can move on to testing and experimentation. But before we can do that we should move our code to .py files and import it as modules. This makes it much more convenient and we don't have to rerun al lthe notebook cells after every time we reset the Python kernel of our notebook to empty the GPU memory for a fresh run.I have created four files:1. **model.py (contains the core Network class)**2. **fc.py (contains the FC class)**3. **cv_model.py (contains the TransferNetworkImg class)**4. **utils.py (contains all the utility functions not belonging to any class)**We create these files in a folder called mylib and import all of them. We also should use a special directive of our Jupyter notebook that makes it monitor and reload all the imported files in a cell that change on the disk. This would come in handy if wee modify any of the files for any reasn e.g. to fix a bugfrom mylib.utils import * from mylib.model import * from mylib.cv_model import * from mylib.fc import * from mylib.chkpoint import * %load_ext autoreload %autoreload 2Testing and ExperimentationIn the following cells, we are going to perform the following steps in a sequence:* Create our classes dictionary as well as the head dictionary to pass to the Transfer Learning object's constructor* Create a Transfer Learning object for Densenet* Unfreeze it* Fit it to train for 3 epochs* Save the check-point* Load it back into another variable* Unfreeze again and repeat with 3 more epochs* Save the check-point again* Reload into another variable* Freeze this time and retrain for 3 more epochs* Save the model againclasses = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] class_dict = {k:v for k,v in enumerate(classes)} head={ 'num_outputs':10, 'layers':[], 'class_names':class_dict, 'non_linearity':'relu', 'model_type':'classifier', 'model_name':'FC' } transfer_densenet = TransferNetworkImg(model_name='DenseNet', optimizer_name = 'Adadelta', best_accuracy_file ='densenet_best_accuracy_cifar10.pth', chkpoint_file ='densenet_cifar10_chkpoint_file', head = head ) transfer_densenet.unfreeze() transfer_densenet.fit(trainloader,validloader,epochs=3,print_every=200)**updating best accuracy: previous best = 80.490 new best = 85.920**transfer_densenet.save_chkpoint() transfer_densenet2 = load_chkpoint('densenet_cifar10_chkpoint_file') transfer_densenet2.unfreeze() transfer_densenet2.fit(trainloader,validloader,epochs=3,print_every=200) transfer_densenet2.save_chkpoint()get_model_params: best accuracy = 90.770 get_model_params: chkpoint file = densenet_cifar10_chkpoint_file checkpoint created successfully in densenet_cifar10_chkpoint_file**This time we have crossed 90% accuracy after unfreezing and training for another 3 epochs**transfer_densenet3 = load_chkpoint('densenet_cifar10_chkpoint_file') transfer_densenet3.freeze() transfer_densenet3.fit(trainloader,validloader,epochs=3,print_every=200)**updating best accuracy: previous best = 94.940 new best = 95.080**transfer_densenet3.save_chkpoint()get_model_params: best accuracy = 95.080 get_model_params: chkpoint file = densenet_cifar10_chkpoint_file checkpoint created successfully in densenet_cifar10_chkpoint_file**So after 9 epochs, 6 with unfreeze and 3 with freeze, we are at 95.08%** Let's repeat the same steps with Resnet34transfer_resnet = TransferNetworkImg(model_name='ResNet34', optimizer_name = 'Adadelta', best_accuracy_file ='resnet34_best_accuracy_cifar10.pth', chkpoint_file ='resnet34_cifar10_chkpoint_file', head = head ) transfer_resnet.unfreeze() transfer_resnet.fit(trainloader,validloader,epochs=3,print_every=200)**updating best accuracy: previous best = 82.700 new best = 86.040**transfer_resnet.save_chkpoint() transfer_resnet2 = load_chkpoint('resnet34_cifar10_chkpoint_file') transfer_resnet2.unfreeze() transfer_resnet2.fit(trainloader,validloader,epochs=3,print_every=200)**updating best accuracy: previous best = 89.400 new best = 89.640**transfer_resnet2.save_chkpoint() transfer_resnet3 = load_chkpoint('resnet34_cifar10_chkpoint_file') transfer_resnet3.freeze() transfer_resnet3.fit(trainloader,validloader,epochs=3,print_every=200)**updating best accuracy: previous best = 94.280 new best = 94.580**transfer_resnet3.save_chkpoint()get_model_params: best accuracy = 94.580 get_model_params: chkpoint file = resnet34_cifar10_chkpoint_file checkpoint created successfully in resnet34_cifar10_chkpoint_fileStep 7: Evaluate and predict on test set with individual models and EnsembleWe load both files for final Densenet and Resnet models and evaluate on the test settransfer_densenet = load_chkpoint('densenet_cifar10_chkpoint_file')**Transfer: best accuracy = 95.08**transfer_densenet.evaluate(testloader) transfer_resnet = load_chkpoint('resnet34_cifar10_chkpoint_file')**Transfer: best accuracy = 94.580**transfer_resnet.evaluate(testloader)Ensembling multiple models to improve accuracy* We have tested and evaluated two different "transferred" models. Both models seem to perform almost equally well on this dataset. We might wonder what would happen if we somehow combine the results of both models to make our final prediciton. Combining two or more models together is called Ensemble learning. * You might have heard the term in case of traditional ML with Random Forests (RF) and Gradient Boosted Decision Trees (GBDT) as Ensemble models. Here we are talking about using two or more Deep Learning models to try to achieve better accuracy. For more information, please see [Elements of Statistical Learning](https://web.stanford.edu/~hastie/Papers/ESLII.pdf)* The intuition behind Ensembling is that one model might have mis-classified a specific example while predicting but one or more of the others might have got it right. Our final prediction accuracy would likely improve if we somehow combine the predictions.* One simple way to combine the predicitions is to give weights to each model's predictions based on some heuristic such as: 1. Simple averaging of predicted values (e.g. probabilities) of different ensembles 2. Assigning different weights to each member of an Ensemble based on its performance on the validation set 3. Assign weights based on our experience with the model in general on multiple datasets. If one model performs better in majority of cases, we should give its prediction more weight.* A generalized way to create an Ensemble could be to create an Emseble model class derived from our base Network class just like for Transfer-Learning and FC. * We don't need to have fit and train methods since the members of our Ensemble are expected to be pretrained outside the Ensemble itself. However, implementing the predict and evaluate methods in the Ensemble class makes sense. * We could pass the model objects to the Ensemble along with their weights while constructing it. In a highly desirable scenario, we would like an ML model of some sort to learn those weights themselves, but to keep things simpler (at least in this tutorial), we would pass weights by using some heuristic as discussed. * We could then write evaluate and predict methods such that they call each individual member's corresponding methods and multiply them by the model's given weight, and add the wieghted predictions to make the final one.Below we show the relevant code of such a class. The code is quite simple and salient parts are explained for further clarity.class EnsembleModel(Network): ''' The constructor expects a list of models and each member of list to be a tuple. First element of tuple must be the pretrained model object and the second the weight of the model. ''' def __init__(self,models): self.criterion = None super().__init__() self.models = models ''' The weights must sum to 1 so that our predictions are a weighted sum of the predictions of all models for each class. ''' if sum(model[1] for model in models) != 1.0: raise ValueError('Weights of Ensemble must sum to 1') def evaluate(self,testloader,metric='accuracy'): from collections import defaultdict #evaluations = defaultdict(float) #num_classes = self.models[0][0].num_outputs class_correct = defaultdict(int) class_totals = defaultdict(int) class_names = self.models[0][0].class_names with torch.no_grad(): for inputs, labels in testloader: ps_list = [] ''' We go in a loop calling the forward method of each model and multiply the predicted values by the model's weight. ''' for model in self.models: model[0].eval() model[0].to(model[0].device) inputs, labels = inputs.to(model[0].device), labels.to(model[0].device) outputs = model[0].forward(inputs) ps = torch.exp(outputs) ps = ps * model[1] # multiply by model's weight ''' We build a list of the predicted probabilities and then go through the list performing the sum of those values. Since the predictions had been already multiplied by weights in the previous loop, we just need to sum now to get the final weighted sum. The vector of weighted sum contains the ensembled predicted values for each class. We can get the max of it just like we do for our regular models to get the final prediction for this image. ''' ps_list.append(ps) final_ps = ps_list[0] for i in range(1,len(ps_list)): final_ps = final_ps + ps_list[i] _, final_preds = torch.max(final_ps, 1) #print(final_preds) update_classwise_accuracies(final_preds,labels,class_correct,class_totals) return get_accuracies(class_names,class_correct,class_totals) ''' Predict is very similar to the regular predict. Again the only difference is that we have the two loops and the final prediction is the topk of the ensembled weighted sum of predictions. ''' def predict(self,inputs,topk=1): ps_list = [] for model in self.models: model[0].eval() model[0].to(model[0].device) with torch.no_grad(): inputs = inputs.to(model[0].device) outputs = model[0].forward(inputs) ps_list.append(torch.exp(outputs)*model[1]) final_ps = ps_list[0] for i in range(1,len(ps_list)): final_ps = final_ps + ps_list[i] _,top = final_ps.topk(topk, dim=1) return top def forward(self,x): outputs = [] for model in self.models: outputs.append(model[0].forward(x)) return outputsEvaluating with Ensemble Models* Let's create an Ensemble object and give 0.5 weight to both of ur models since they don't differ by much and observe the improvement in performance (if any)ensemble = EnsembleModel([(transfer_densenet,0.5),(transfer_resnet,0.5)]) ensemble.evaluate(testloader)**So we go get a significant improvement by using the Ensemble.** Step 8: Predict on Kaggle's given much larger Test setThe original CIFAR10 dataset has 60000 images, 50000 in the train set and 10000 in the test set. However, Kaggle has provided a huge dataset of 300000 images for testing of CIFAR10. Here is what Kaggle web-site has to say about these images:"To discourage certain forms of cheating (such as hand labeling) we have added 290,000 junk images in the test set. These images are ignored in the scoring. We have also made trivial modifications to the official 10,000 test images to prevent looking them up by file hash. These modifications should not appreciably affect the scoring. You should predict labels for all 300,000 images."Unzipping this test dataset once it has downloaded takes an enormous amount of time (several hours on my machine). Creating our own custom Dataset for Kaggle test imagesIn order to handle this dataset, we have written our own custom dataset class derived from the base Dataset class of Pytorch. We then pass this dataset object to the Pytorch Dataloader. This makes it much convenient in handling this large dataset. It also gives us good practice in creating our own Dataset for images.Below is the code for our own custom dataset class. The code is pretty straight forward. * A typical customer dataset contains an "__ init __" method, a "__ getitem__" method to convert it into an iterator and an "__ len __ " method to make the Python's len() function work on the dataset.* Our custom dataset class assumes that information about the dataset image files is contained in a CSV file.* The image ids are contained in column 0 of the file while image filename (path) is in column 1 and image's label (if available in the file) as a text e.g. bird, plane etc. is contained in column 2. * Our Kaggle test set has no labels since Kaggle uses it for scoring the competition and therefore, does not provide labels for test sets.* We use Pandas Dataframe to handle the CSV file and then create the actual image setimport pandas as pd from PIL import Image class ImageDataset(Dataset): def __init__(self, csv_path, transforms=None, labels_=False): self.labels = None self.transforms = None ''' We read the csv file in a Pandas DataFrame and extract image ids, image file-paths and labels (if present) from its columns, assuming that they are contained in columns 0, 1 and 2 respectively. ''' self.df = pd.read_csv(csv_path) self.ids = np.asarray(self.df.iloc[:, 0]) self.images = np.asarray(self.df.iloc[:, 1]) if labels: self.labels = np.asarray(self.df.iloc[:, 1]) ''' We set the length of the dataset as well as a transform if passed ''' self.data_len = len(self.df.index) if transforms is not None: self.transforms = transforms #print(self.data_len) ''' In the __ getitem__ we have to read a single image from its file according to the index requested in. Remember that this function shall be called by the Dataloader when creating a batch. It would call it for each ieration of batch consturction loop. We return the image and position index on each such call. ''' def __getitem__(self, index): image_name = self.images[index] id_ = self.ids[index] img_ = Image.open(image_name) ''' We apply transforms on each channel of the image. [:3,:,:] means all rows and columns of all three channels if any tranforms were given in the constructor. ''' if self.transforms is not None: img_ = self.transforms(img_)[:3,:,:] ''' Just to keep the API consistent and always return a two-valued tuple, we return 0 as label with each image even if there was no label. The assumption is that the caller of this method knows if label is to be expected. ''' label = 0 if self.labels is not None: label = self.labels[index] ''' Our Dataset object is returning a three member tuple (id,image,label) ''' return (id_,img_,label) def __len__(self): return self.data_lenExtracting Meta-Data of the Test Set into a CSV file * In order to use our Custom Dataset class with a Pytorch Dataloader, we need to create the csv file we want to pass to the Dataset constructor. * Kaggle hasn't really given usa csv file. All we have is a folder of images. To create the csv file we need to parse the image filenames and store the image names (ids) in first clumn and the path in the second column of our csv file. * Below is a function that uses Python's glob module, with Pandas Dataframe and some Python string searching functions to create such a csv file. The code is quite straightforward so we won't describe it here for brevity.def create_csv_from_folder(folder_path,outfile,cols=['id','path']): f = glob.glob(folder_path+'/*.*') ids = [] for elem in f: t = elem[elem.rfind('/')+1:] ids.append(t[:t.rfind('.')]) data = {cols[0]:ids,cols[1]:f} df = pd.DataFrame(data,columns=cols) df.to_csv(outfile,index=False)* Using this function we can create our csv file, passing it the folder and the desired output csv filename as arguments. We have placed our test images from Kaggle in cifar10-test foldercreate_csv_from_folder('cifar10-test','cifar10-test.csv')* We can test our code to see s sample of contents from the csv filedf = pd.read_csv('cifar10-test.csv') df[:10] len(df.index)Testing and preparing the submission file for Kaggle Test Set Now all we need to do is create our custom Dataset and a Dataloader to perform evaluation on it using our modelstest_transform_cifar10 = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(cifar10_mean,cifar10_std) ]) cifar10_test_dset = ImageDataset('cifar10-test.csv',transforms=test_transform_cifar10) len(cifar10_test_dset)* As expected, the Dataset has 300000 imagescifar10_test_dset.df[:10] cifar10_test_testloader = DataLoader(cifar10_test_dset, batch_size=50,num_workers=0) dataiter = iter(cifar10_test_testloader) id_,images_,_ = dataiter.next() images_.shape* As expected, our Dataloader's one batch has correct dimensions To submit to Kaggle we need to create a csv file with image-id (name) in first column and label in the second (please see the competition web-page here to see the sample submission file).The easiest way to do that is again to use Pandas DataFrame to prepare the results and the file.Below we have the normal Dataloader loop: * We get the next batch of data. Remember that our Dataset object is returning a three member tuple (id,image,label). We are ignoring the label in this case since we are always returning 0 * We first predict using our Ensemble, convert the predictions Tensor back to CPU, then convert it to numpy, flatten it using numpy's own flatten method available on numpy arrays, and finally convert to a simple Python list. This gives us the predicted classes for the whole batch * We keep collecting the predictions in our list and the corresponding labels in another list (using a lookup of image_ids into our class dictionary) * We finally create a Pandas DataFrame with two required columns and write it as a CSV file to disk. To match the exact required format, we set the index to False * Next we sort the values according to ids as the sample file shows us and rewrite the CSV filepredictions = [] image_ids = [] for ids_,images_,_ in cifar10_test_testloader: preds_ = ensemble.predict(images_).cpu().numpy().flatten().tolist() predictions += [class_dict[pred] for pred in preds_] image_ids += ids_.numpy().flatten().tolist() pd.DataFrame({'id':image_ids,'label':predictions}).to_csv('submission.csv',index=False) df = pd.read_csv('submission.csv') df = df.sort_values('id') df.to_csv('submission.csv',index=False)Self study 1In this self study you should work on the code examples below together with the associated questions. The notebook illustrates a basic neural network implementation, where we implement most of the relevant functions from scratch. Except the calculation of gradients, for which we rely on the functionality provided by PyTorch. The code illustrates the key concepts involved in the learning neural network. Go carefully through the code before starting to answer the questions at the end. Part of the code in the notebook is based on the tutorial at https://pytorch.org/tutorials/beginner/nn_tutorial.html First we import the modules used in this selfstudyimport torch from pathlib import Path import requests from matplotlib import pyplot import matplotlib.pyplot as plt import numpy as np import pickle import gzipThe MNIST database consists of grey scale images of handwritten digits. Each image is of size $28\times 28$; see figure below for an illustration. The data set is divided into a training set, validation set, and test set consisting of $50000$, $10000$, and $10000$ images, respectively; in all data sets the images are labeled with the correct digits. If interested, you can find more information about the MNIST data set at http://yann.lecun.com/exdb/mnist/, including accuracy results for various machine learning methods.![MNIST DATA](MNIST-dataset.png) First we download the dataset and unpackage it.DATA_PATH = Path("data") PATH = DATA_PATH / "mnist" PATH.mkdir(parents=True, exist_ok=True) URL = "http://deeplearning.net/data/mnist/" FILENAME = "mnist.pkl.gz" if not (PATH / FILENAME).exists(): content = requests.get(URL + FILENAME).content (PATH / FILENAME).open("wb").write(content)We then extract the data and store it numpy arrays: x_train, y_train, x_valid, y_valid, x_test, y_testwith gzip.open((PATH / FILENAME).as_posix(), "rb") as f: ((x_train, y_train), (x_valid, y_valid), (x_test, y_test)) = pickle.load(f, encoding="latin-1") x_valid.shapeThe $28 \times 28$ images are stored in rows of length $784$, hence to display the images we need to reshape them.pyplot.imshow(x_train[0].reshape((28, 28)), cmap="gray") print(x_train.shape)(50000, 784)In order to take adavntage of PyTorch support for calculating gradients, we need to convert the numpy arrays to PyTorch tensors. See the code example from the last lecture on PyTorch's support for automatic gradient calculation using the back propagation algorithm.x_train, y_train, x_valid, y_valid, x_test, y_test = map( torch.tensor, (x_train, y_train, x_valid, y_valid, x_test, y_test) ) n, c = x_train.shape x_train, x_train.shape, y_train.min(), y_train.max() print(x_train, y_train) print(x_train.shape) print(y_train.min(), y_train.max())tensor([[0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], ..., [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.]]) tensor([5, 0, 4, ..., 8, 4, 8]) torch.Size([50000, 784]) tensor(0) tensor(9)For the first part of this self study we will specify a neural network, which will encode a softmax function. For this we need a (randomly initialized) weight matrix and a bias, and for both of them we need their gradients wrt. our error function (yet to be defined) in order to perform learning.weights = torch.randn(784, 10) / np.sqrt(784) print(weights) weights.requires_grad_() bias = torch.zeros(10, requires_grad=True)tensor([[ 3.8133e-02, -2.2307e-02, 3.7967e-02, ..., -2.4189e-04, -9.4568e-04, -3.1848e-02], [ 3.1047e-02, -1.8964e-02, -5.3290e-04, ..., -1.3724e-02, -4.9714e-02, -1.9803e-02], [-1.2769e-01, -1.9871e-02, -2.9242e-03, ..., 1.3848e-02, -9.6237e-02, -3.1801e-02], ..., [-1.0241e-03, -1.9770e-02, -3.3223e-02, ..., 5.8534e-02, 3.5629e-02, -4.8278e-02], [ 2.0139e-02, 1.6968e-03, -1.4516e-02, ..., -2.6358e-02, -3.0065e-02, 5.7078e-03], [ 2.8457e-02, -6.9453e-02, 1.0701e-05, ..., 2.1814e-02, -5.1264e-02, 2.9715e-02]])Out model specificationdef softmax(x): return x.exp() / x.exp().sum(-1).unsqueeze(-1) # Below @ refers to matrix multiplication def model(xb): return softmax(xb @ weights + bias)Let's test our model (with our randomly initialized weights) using a so-called batch size of 64 (more on this later in the note book); for the prediction we pick out the first element in the batch.batch_size = 64 xb = x_train[0:batch_size] print(f"Batch shape: {xb.shape}") preds = model(xb) print(f"Prediction on first image {preds[0]}") print(f"Corresponding classification: {preds[0].argmax()}")Batch shape: torch.Size([64, 784]) Prediction on first image tensor([0.0673, 0.1038, 0.0988, 0.0766, 0.1603, 0.0896, 0.1360, 0.0892, 0.0861, 0.0923], grad_fn=) Corresponding classification: 4Next we define our loss function, in this case the log-loss (or negative log-likelihood):def nll(input, target): return (-input[range(target.shape[0]), target].log()).mean() loss_func = nll # Make a test calculation yb = y_train[0:batch_size] print(loss_func(preds,yb))tensor(2.3349, grad_fn=)In the end, we are interested in the accuracy of our modeldef accuracy(out, yb): preds = torch.argmax(out, dim=1) return (preds == yb).float().mean() print(f"Accuracy of model on batch (with random weights): {accuracy(preds, yb)}") print(preds.shape)Accuracy of model on batch (with random weights): 0.109375 torch.Size([64, 10])Now we are ready to combine it all and perform learningimport time epochs = 4 # how many epochs to train for lr = 0.05 # learning rate start = time.time() train_losses = [] valid_losses = [] momentum = 0.9 v = 0 for epoch in range(epochs): for batch_idx in range((n - 1) // batch_size + 1): start_i = batch_idx * batch_size end_i = start_i + batch_size xb = x_train[start_i:end_i] yb = y_train[start_i:end_i] pred = model(xb) #print(pred) loss = loss_func(pred, yb) loss.backward() with torch.no_grad(): #weights -= weights.grad * lr v = momentum * v -(weights.grad) ## momentum not sure if well implemented weights-=lr*v bias -= bias.grad * lr weights.grad.zero_() bias.grad.zero_() if batch_idx % 50 == 0: with torch.no_grad(): train_loss = loss_func(model(x_train), y_train) print(f"Epoch: {epoch}, B-idx: {batch_idx}, Training loss: {train_loss}") train_losses.append(train_loss) # valid_loss = loss_func(model(x_valid), y_valid) #print(f"Epoch: {epoch}, B-idx: {batch_idx}, valid loss: {valid_loss}") #valid_losses.append(train_loss)Epoch: 0, B-idx: 0, Training loss: 0.33044344186782837 Epoch: 0, B-idx: 100, Training loss: nan Epoch: 0, B-idx: 200, Training loss: nan Epoch: 0, B-idx: 300, Training loss: nan Epoch: 0, B-idx: 400, Training loss: nan Epoch: 0, B-idx: 500, Training loss: nan Epoch: 0, B-idx: 600, Training loss: nan Epoch: 0, B-idx: 700, Training loss: nan Epoch: 1, B-idx: 0, Training loss: nan Epoch: 1, B-idx: 100, Training loss: nan Epoch: 1, B-idx: 200, Training loss: nan Epoch: 1, B-idx: 300, Training loss: nan Epoch: 1, B-idx: 400, Training loss: nan Epoch: 1, B-idx: 500, Training loss: nan Epoch: 1, B-idx: 600, Training loss: nan Epoch: 1, B-idx: 700, Training loss: nan Epoch: 2, B-idx: 0, Training loss: nan Epoch: 2, B-idx: 100, Training loss: nan Epoch: 2, B-idx: 200, Training loss: nan Epoch: 2, B-idx: 300, Training loss: nan Epoch: 2, B-idx: 400, Training loss: nan Epoch: 2, B-idx: 500, Training loss: nan Epoch: 2, B-idx: 600, Training loss: nan Epoch: 2, B-idx: 700, Training loss: nan Epoch:[...]Plot the evolution of the training lossplt.plot(range(len(train_losses)), train_losses,'b')__Exercise:__ 1. Experiment with different variations of the gradient descent implementation; try varying the learning rate and the batch size. Assuming that you have a fixed time budget (say 2 minutes for learning), what can we then say about the effect of changing the parameters?2. Implement momentum in the learning algorithm. How does it affect the results? 3. Try with different initialization schemes for the parameters (e.g. allowing for larger values). How does it affect the behavior of the algorithm?4. Analyze the behavior of the algorithm on the test set and implement a method for evaluating the accuracy over the entire training/test set (for inspiration, see Line 21 above).NB: We didn't have time to cover momentum and batch sizes during the last lecture, so please revisit the slides/literature and try to get the gist of this on your own. We will discuss it further at the lecture on Thursday.pred_labels=model(x_test) print(pred_labels) print(y_valid) print(f"Accuracy of model on batch (with random weights): {accuracy(pred_labels, y_test)}")tensor([[1.1843e-04, 1.6155e-07, 1.7715e-04, ..., 9.9538e-01, 9.2537e-05, 1.8497e-03], [1.0879e-02, 1.2150e-04, 9.0924e-01, ..., 3.2650e-08, 6.5680e-03, 1.0433e-06], [1.2660e-04, 9.5337e-01, 1.6460e-02, ..., 5.0805e-03, 9.7125e-03, 1.5165e-03], ..., [1.6236e-06, 6.3454e-06, 6.3335e-05, ..., 4.0679e-03, 1.2782e-02, 4.4927e-02], [1.5373e-03, 3.1172e-03, 6.9479e-04, ..., 5.2257e-04, 2.5794e-01, 7.4855e-04], [3.4815e-04, 8.5643e-09, 9.4680e-04, ..., 3.4589e-08, 5.1580e-06, 9.0236e-07]], grad_fn=) tensor([3, 8, 6, ..., 5, 6, 8]) Accuracy of model on batch (with random weights): 0.9133999943733215Load in Datatickers = ['AAPL', 'MSFT', 'CVX', 'GE', 'GOOGL'] stock_data = utils.get_data(tickers, '1d', '2015-01-01', '2021-01-01')[*********************100%***********************] 1 of 1 completed AAPL data downloaded... [*********************100%***********************] 1 of 1 completed MSFT data downloaded... [*********************100%***********************] 1 of 1 completed CVX data downloaded... [*********************100%***********************] 1 of 1 completed GE data downloaded... [*********************100%***********************] 1 of 1 completed GOOGL data downloaded...Get Expected Returnsstock_returns = invs.stock_invariants(stock_data) stock_returns.head() exp_returns = stock_returns.mean() * 252 exp_returnsGet Covariance Matrixriskmodel = moments.RiskModel(tickers) stock_cov = riskmodel.avg_hist_cov(stock_data) stock_covSolve Optimisation Problemoptimiser = Optimiser(tickers, exp_returns, stock_cov) weights = optimiser.mean_variance(threshold=0.1) # weights = optimiser.max_sharpe() # min_pos = lambda x: x >= 0.1*np.ones(len(tickers)) # optimiser.add_constraint(min_pos) # weights = optimiser.solve(objective='max_sharpe') weights optimiser.weight_tearsheet(weights)Annual Return: 15.026 Annual Volatility: 23.203 Annual Sharpe: 0.561JupyterLab Outsource![](../docs/img/jupyterlab-outsource.png "screenshot of JupyterLab rich text editing and visual programming")This was written it in a **What You See Is What You Get** environment, powered by [Prosemirror](https://github.com/prosemirror/prosemirror).> *But I can also edit be edited in the notebook markdown cell easily, either in **Preview** or **Edit** mode**In* JupyterLab, click the *Magic Wand* icon…%%html

You can pick from… * *Prosemirror* * *Blockly*> *Suggest more input types [on Github](https://github.com/deathbeds/jupyterlab-outsource)!* BlocklyBlockly definitions, like which blocks and where to place them, are stored in the notebook cell metadata.# start blockly greeting = None planet = None i = None j = None def upRange(start, stop, step): while start <= stop: yield start start += abs(step) def downRange(start, stop, step): while start >= stop: yield start start -= abs(step) greeting = 'hello' planet = 'world' i_end = float(len(greeting)) for i in (0 <= i_end) and upRange(0, i_end, 1) or downRange(0, i_end, 1): j_end = float(len(planet)) for j in (0 <= j_end) and upRange(0, j_end, 1) or downRange(0, j_end, 1): if greeting[int(i - 1)] == planet[int(j - 1)]: break print(''.join([str(x) for x in ['letter ', i, ' in ', greeting, ' is the same as letter ', j, ' in ', planet]])) # end blocklyletter 5 in hello is the same as letter 2 in worldNaive Bayes implementationIt is tested with the following example data sets:- [arrhythmia](./data/arrhythmia.csv)- [banknote](./data/banknote.csv)- [forestfires](./data/forestfires.csv)- [lung-cancer](./data/lung-cancer.csv)- [phishing-websites](./data/phishing-websites.csv)- [pima-indians-diabetes](./data/pima-indians-diabetes.csv)The main source for the code is the following tutorial: [Naive Bayes Classifier From Scratch in Python](http://machinelearningmastery.com/naive-bayes-classifier-scratch-python/)from argparse import ArgumentParser from math import exp from math import pi as PI from math import sqrt from numpy import mean, std from sklearn.naive_bayes import GaussianNB from utility import display, load_dataset, split_datasetCalculate the mean, stdev and count for each column in a datasetdef summarize(dataset): summaries = [(mean(attribute), std(attribute)) for attribute in zip(*dataset)] del summaries[-1] return summariesSplit the dataset by class values, returns a dictionarydef separate_by_class(dataset, target): separated = {} for i in range(len(dataset)): vector = dataset[i] result = target[i] if result not in separated: separated[result] = [] separated[result].append(vector) return separatedSplit dataset by class then calculate statistics for each rowdef summarize_by_class(dataset, target): separated = separate_by_class(dataset, target) summaries = {} for classValue, instances in separated.items(): summaries[classValue] = summarize(instances) return summariesCalculate the Gaussian probability distribution function for xdef calculate_probability(x, mean, stdev): if mean == 0 or stdev == 0: return 0 exponent = exp(-(pow(x - mean, 2) / (2 * pow(stdev, 2)))) return (1 / (sqrt(2 * PI) * stdev)) * exponentCalculate the probabilities of predicting each class for a given rowdef calculate_class_probabilities(summaries, input_vector): probabilities = {} for class_value, class_summaries in summaries.items(): probabilities[class_value] = 1 for i in range(len(class_summaries)): mean, stdev = class_summaries[i] x = input_vector[i] probability = calculate_probability(x, mean, stdev) # ignore zero probability if probability != 0: probabilities[class_value] *= probability return probabilitiesPredict the class for a given rowdef predict(summaries, input_vector): probabilities = calculate_class_probabilities(summaries, input_vector) best_label, best_prob = None, -1 for class_value, probability in probabilities.items(): if best_label is None or probability > best_prob: best_prob = probability best_label = class_value return best_labelCalculate predictionsdef get_predictions(summaries, test_set): predictions = [] for i in range(len(test_set)): result = predict(summaries, test_set[i]) predictions.append(result) return predictionsTest the algorithm Load and split datadataset, target = load_dataset("data/banknote.csv") train_x, train_y, test_x, actual = split_dataset(dataset, target, 0.8) print(f"Training set size: {len(train_x)}, Testing set size: {len(test_x)}")Using self-implementation# prepare model summaries = summarize_by_class(train_x, train_y) # test model predictions = get_predictions(summaries, test_x) display(actual, predictions)Using scikit-learngnb = GaussianNB() y_pred = gnb.fit(train_x, train_y).predict(test_x) display(actual, y_pred)!pip install qiskit from qiskit import QuantumCircuit, assemble, Aer from qiskit.visualization import plot_histogram**Encoding The Input**##Using the Bit-Flip gate as 'NOT' gate of classical qc = QuantumCircuit(8) ##We bit-flipped the 7th bit qc.x(7) !pip install pylatexenc qc.draw() qc.measure_all() qc.draw(initial_state=True) simul = Aer.get_backend('aer_simulator') result = simul.run(qc, validate=True).result() counts = result.get_counts() plot_histogram(counts)The output value 1 comes from the 7th qubit, as we had applied the bit-flip gate at the 7th qubit itself, and initially the values of all qubits are zero.##Using the CNOT gate to extract the output i.e. to check wheather the bits are different or the same qc_cnot = QuantumCircuit(2) ##the CNOT gate is applied to the 2 qubits qc_cnot.cx(0,1) qc_cnot.draw()Here, q_0 is the control bit, and q_1 is the target bitqc_cnot1 = QuantumCircuit(2,2) qc_cnot1.x(0) qc_cnot1.cx(0,1) qc_cnot1.measure_all(1,1) qc_cnot1.measure_all(0,0) qc_cnot1.draw()Input (q1 q0) Output (q1 q0) 00 00 01 11 10 10 11 01The Output is 1 1, as q0 = 1 because we had initially introduced a bit flip operaor and q1 = 0Test for MohammedThis container was started with**sudo docker run -d -p 433:8888 --name=sar -v /home/mort/imagery/mohammed/Data:/home/imagery mort/sardocker**%matplotlib inlineHere are the RadarSat-2 quadpol coherency matrix image directories as created from the Sentinel-1 Toolbox:ls /home/imageryTo combine the matrix bands into a single GeoTiff image, we run the python script ingestrs2quad.py:run /home/ingestrs2quad /home/imagery/RS2_OK82571_PK721079_DK650144_FQ17W_20160403_230258_HH_VV_HV_VH_SLC/ run /home/ingestrs2quad /home/imagery/RS2_OK82571_PK721080_DK650145_FQ17W_20160427_230257_HH_VV_HV_VH_SLC/ run /home/ingestrs2quad /home/imagery/RS2_OK82571_PK721081_DK650146_FQ17W_20160614_230256_HH_VV_HV_VH_SLC/Here is an RGB display of the three diagonal matrix elements of the above image (bands 1,6 and 9):run /home/dispms -f /home/imagery/RS2_OK82571_PK721081_DK650146_FQ17W_20160614_230256_HH_VV_HV_VH_SLC/polSAR.tif \ -p [1,6,9]To estimate the equivalent number of looks, run the python script enlml.py:run /home/enlml /home/imagery/RS2_OK82571_PK721081_DK650146_FQ17W_20160614_230256_HH_VV_HV_VH_SLC/polSAR.tifSo the ENL would appear to be about 5.To run the change sequential change detection on the three images, run the bash script sar_seq_rs2quad.sh. It gathers the three images together and calls the python script sar_seq.py which does the change detection. By choosing a spatial subset (in this case 400x400), the images are clipped and co-registered to the first image. This might be unnecessary if the images are well registered anyway.If you have a multicore processor you can eneable parallel computation by openeing a terminal window in the container (new terminal) and runningipcluster start -n 4!/home/sar_seq_rs2quad.sh 20160403 20160427 20160614 [50,50,400,400] 5 0.01Here is the change map for the most recent changes:run /home/dispms \ -f /home/imagery/RS2_OK82571_PK721079_DK650144_FQ17W_20160403_230258_HH_VV_HV_VH_SLC/sarseq(20160403-1-20160614)_cmap.tif -c**PREPROCESSING AND ANALYSIS** [1] Fill in the missing latitude and longitude values by calculating the average for that country. Round the average to 2 decimal placeseuCities = pd.read_csv('EuCitiesTemperatures.csv') df = DataFrame(euCities) country = round(df.groupby('country').mean(), 2) updated_df = df.copy() for index in updated_df.index: if str(updated_df.loc[index, 'latitude']) == 'nan': updated_df.loc[index, 'latitude'] = country['latitude'][updated_df.loc[index, 'country']] for index in updated_df.index: if str(updated_df.loc[index, 'longitude']) == 'nan': updated_df.loc[index, 'longitude'] = country['longitude'][updated_df.loc[index, 'country']] updated_df[2] Find out the subset of cities that lie between latitudes 40 to 60 (both inclusive) and longitudes 15 to 30 (both inclusive). Find out which countries have the maximum number of cities in this geographical band. (More than one country could have the maximum number of values.)filteredDf = DataFrame(updated_df[(updated_df['latitude'] >= 40) & (updated_df['latitude'] <= 60) & (updated_df['longitude'] >= 15) & (updated_df['longitude']<=30)]) country_frq_dict = filteredDf['country'].value_counts().to_dict() max_value = max(country_frq_dict.values()) a = [k for k,v in country_frq_dict.items() if v == max_value] print('Countries that have the maximum number of cities in this geographical band is/are:') for i in a: print(i)Countries that have the maximum number of cities in this geographical band is/are: Romania Poland[3] Fill in the missing temperature values by the average temperature value of the similar region type. A region type would be a combinaton of whether it is in EU (yes/no) and whether it has a coastline (yes/no)region = round(updated_df.groupby(['EU','coastline']).mean(), 2) for id, row in updated_df.iterrows(): tup = row.loc['EU'], row.loc['coastline'] value = region['temperature'][tup] if pd.isnull(row['temperature']): updated_df.loc[id, 'temperature'] = value updated_df**VISUALIZATION** [1] Plot a bar chart for the number of cities belonging to each of the regions described in Preprocessing/Analysis 3 aboveregion = updated_df.groupby(['EU','coastline']).count() region = region.reset_index() region.drop(columns = ['country', 'population', 'latitude', 'longitude', 'temperature']) plt.figure(figsize=(10,8)) plt.bar(range(0,4), region['city']) plt.xticks(range(0,4), ['No No','No Yes', 'Yes No','Yes Yes'], fontsize = 13) plt.xlabel('Region', fontsize = 15) plt.ylabel('Number of Cities', fontsize =15) plt.show()[2] Plot a scatter plot of latitude (y-axis) v/s longitude (x-axis) values to get a map-like visual of the cities under consideration. All the cities in the same country should have the same colorfig, ax = plt.subplots(1,1) fig.set_size_inches(20,20) country_types = updated_df['country'].unique() colors= [] for i in range(0,37): r = random.random() g = random.random() b = random.random() colors.append((r,g,b)) for country, color in zip(country_types, colors): df = updated_df[updated_df['country'] == country] df.plot('longitude','latitude',ax=ax,kind='scatter',color=color, label =country) plt.xticks(range(-10, 60,10))[3] The population column contains values unique to each country. So two cities of the same country will show the same population value. Plot a histogram of the number of countries belonging to each population group: split the population values into 5 bins (groups)region2 = updated_df.groupby(['EU','coastline', 'country']).mean() l = list(region2['population']) plt.hist(l, color = 'blue', bins = 5, edgecolor = 'black') plt.xlabel('Population', fontsize = 13) plt.ylabel('Number of countries',fontsize = 13) plt.xticks(range(0,86, 17)) plt.yticks(range(0,31,5)) plt.show()[4] Plot subplots (2, 2), with proper titles, one each for the region types described in Preprocessing/Analysis 3 above Each subplot should be a scatter plot of Latitude (y-axis) vs. City (x-axis), where the color of the plot points should be based on the temperature values: ‘red’ for temperatures above 10, ‘blue’ for temperatures below 6 and ‘orange for temperatures between 6 and 10 (both inclusive). For each subplot, set xticks to an array of numbers from 0 to n-1 (both inclusive), where n is the total number of cities in each region type. This represents each city as a number between 0 and n-1def get_region(region, i, j): res_df = pd.DataFrame() if i == 0: if j == 0: res_df = region[(region['EU'] == 'no') & (region['coastline']=='no')] elif j == 1: res_df = region[(region['EU'] == 'no') & (region['coastline']=='yes')] elif i == 1: if j == 0: res_df = region[(region['EU'] == 'yes') & (region['coastline']=='no')] elif j == 1: res_df = region[(region['EU'] == 'yes') & (region['coastline']=='yes')] return res_df fig, axes = plt.subplots(2,2) fig.set_size_inches(15,15) region = updated_df.groupby(['EU','coastline', 'city']).mean() region = region.reset_index() colors = ['blue', 'red', 'orange'] for i in range(0,2): for j in range(0,2): df = get_region(region, i, j) n = df['city'].count() for color in colors: if(color == 'blue'): df2 = df[df['temperature'] < 6.0] elif(color == 'red'): df2 = df[df['temperature'] > 10.0] elif(color == 'orange'): df2 = df[ (df['temperature'] >= 6.0) & (df['temperature'] <= 10.0) ] df2.plot('city','latitude', kind = 'scatter', ax = axes[i][j], color = color) plt.sca(axes[i][j]) plt.xticks(range(n), np.arange(0,n,1)) axes[0][0].set_title('Not EU and Not Near Coastline') axes[0][1].set_title('Not EU and Near Coastline') axes[1][0].set_title('EU and Not Near Coastline') axes[1][1].set_title('EU and Near Coastline') plt.show() # We fill our graphs with blue cities first, then reds, then oranges. Hence the pattern below. #print(pd.__version__) #pip install --upgrade pandasFeature Selection This Notebook Attempts to Apply the Approach Described in "Short-term load forecasting using a two-stage sarimax model" (Tarsitano & Amerise) with the goal of efficiently producing parsimonious model parameters through backwards stepwise regression.We do not attempt to rigorously hypothesize that short term electricity loads, and bicycle counts behave in an analagous manner that makes the model approach generalizable, but rather explore the intuition that the model selection approach described may be similarly useful and specifically extensible to the bicycle count data, as both represent similar computational issues due to the relatively high frequency seasonal components in both model groups.import pmdarima as pm from pmdarima import arima from pmdarima import model_selection from pmdarima import pipeline from pmdarima import preprocessing as ppc from pmdarima.arima import ADFTest from sklearn.compose import ColumnTransformer from sklearn.preprocessing import PowerTransformer from sklearn.metrics import mean_squared_log_error, mean_squared_error from statsmodels.tsa.deterministic import CalendarSeasonality import cabi.etl.load as l import cabi.etl.transform as t import pandas as pd import numpy as np from matplotlib import pyplot as plt print("pmdarima version: %s" % pm.__version__) def RMSE(y_true, y_pred): return mean_squared_error(y_true, y_pred, squared=False)Load the Data Select Top Five Most Active ANCs in Either Direction Follow Up Ideas **FLAGGED FOLLOW UP**- Model Checkins/Checkouts by selecting start/ends from trips long- Model Poisson instead of SARIMA?counts = l.load_counts_full() pd.set_option('display.float_format', lambda x: '%.5f' % x) # 1A/1C has most outflow, 2E/2B/6D have most inflow on average counts.mean().sort_values() bot_five = counts.sum().sort_values().head(5).index top_five = counts.sum().sort_values().tail(5).index print(top_five, bot_five) model_groups = list(bot_five) + list(top_five) model_groups bot_five = ['1A', '1C', '3C', '5E', '4C'] top_five = ['6C', '2C', '6D', '2E', '2B'] hourly_groups = counts[model_groups].resample('1H').sum() hourly_groups = hourly_groups[hourly_groups.index > '2020-06-15'] hourly_groupsCreate Weekday/Hourly Dummies, Weekly Fourier Features to Backwards Eliminatedef get_seasonal_dummies(df): """Accepts a time-indexed df of hourly data, returns hourly and weekday dummies as a df to passed as exogenous variables in a SARIMAX model""" columns = df.columns new_df = df.copy() new_df['time'] = new_df.index # create weekday dummy generator wday_dumgen = ppc.DateFeaturizer(column_name='time', with_day_of_month=False) # since all have the same index, we can use any column in the df to generate the day_dums _, wday_dums = wday_dumgen.fit_transform(new_df[columns[0]], new_df) # drop the columns that aren't dummies wday_dums = wday_dums[wday_dums.columns[-7:]] # set the index for easy merging wday_dums.set_index(new_df.index, inplace=True) # create hourly dummy generator hourly_dumgen = CalendarSeasonality('H', 'D') # generate dummies hourly_dums = hourly_dumgen.in_sample(new_df.index) # merge results full_dums = wday_dums.merge(hourly_dums, on='time') return full_dums # for use with pmdarima, the timestamps must be in a column instead of the index hourly_groups['time'] = hourly_groups.index hourly_groups wday_dums = ppc.DateFeaturizer(column_name='time', with_day_of_month=False) # since all have the same index, we can use any column in the df to generate the day_dums _, day_dums = wday_dums.fit_transform(hourly_groups['1A'], hourly_groups) day_dums # drop the columns that aren't dummies day_dums = day_dums[day_dums.columns[-7:]] day_dums.set_index(hourly_groups.index, inplace=True) day_dums.columns day_dums hourly_dumgen = CalendarSeasonality('H', 'D') hourly_dummies = hourly_dumgen.in_sample(hourly_groups.index)Note on SparsitySee below representation of each hour of the day for each timestamp in the index.Note the large number of zero values this results in (for each row only one of 24 columns will have a non-zero value).We will first attempt to fit the data in this manner, but if it proves inefficient, it may be worth converting these columns to binary data, as in (23 = 10111, instead of 00000...1)hourly_dummies full_dums = day_dums.merge(hourly_dummies, on='time') full_dums hourly_groups = hourly_groups.drop('time', axis=1) hourly_groups get_seasonal_dummies(hourly_groups)Human Eval in Google Colab[Original Openai github source](https://github.com/openai/human-eval)[Paper](https://arxiv.org/pdf/2107.03374.pdf) Import modulesfrom data import write_jsonl, read_problems problems = read_problems() num_samples_per_task = 200 generate_one_completion = {"task_id": "test/0", "prompt": "def return1():\n", "canonical_solution": " return 1", "test": "def check(candidate):\n assert candidate() == 1", "entry_point": "return1"} samples = [ generate_one_completion for task_id in problems for _ in range(num_samples_per_task) ] write_jsonl("samples.jsonl", samples) len(problems) problems['HumanEval/1'] ## problems given by openai import json with open('problems.json', 'w') as fp: json.dump(problems, fp)update execution.py Note**This program exists to run untrusted model-generated code. Users are strongly encouraged not to do so outside of a robust security sandbox. The execution call in execution.py is deliberately commented out to ensure users read this disclaimer before running code in a potentially unsafe manner. See the comment in execution.py for more information and instructions.**To continue, you need to manually update `execution.py`, specifically uncomment `exec(check_program, exec_globals)`!python evaluate_functional_correctness.py ../data/example_samples.jsonl --problem_file=../data/example_problem.jsonlInformation about this notebookThis example script was provided as part of the Data Management Project (INF) within the TR-172 ["ArctiC Amplification: Climate Relevant Atmospheric and SurfaCe Processes, and Feedback Mechanisms" (AC)³](http://www.ac3-tr.de/) funded by the German Research Foundation (Deutsche Forschungsgemeinschaft, DFG)Author: , [Institute of Environmental Physics](http://www.iup.uni-bremen.de), University of Bremen, Germany, Github repository: https://github.com/ac3-tr/ac3-notebooks**Setup instructions for a reference Python Environment can be found on the [Github page](https://github.com/ac3-tr/ac3-notebooks)**import matplotlib.pyplot as plt import numpy as np import numpy.ma as ma import datetime as dt import cartopy.crs as ccrs import cartopy import pyhdf.SD as SD %matplotlib inlineSea Ice Concentration from AMSR Dataset resourcesThe data of the operational seaice concentration product can be downloaded via the link given below.**Authors** ., , and G.Heygster**Year** 2018**Institute** Institute of Environmental Physics, University of Bremen**URL** https://seaice.uni-bremen.de**Citeable Publication** ., , and G.Heygster (2008), Sea ice remote sensing using AMSR-E 89 GHz channels J. Geophys. Res.,vol. 113, C02S03, https://doi.org/10.1029/2005JC003384 Reading example datasetAn .hdf-file providing the arctic seaice concentration of an arbitrary day (here 2018-10-29) was downloaded and saved in the working directory of this notebook. The .hdf file is opened using the pyhdf (or python-hdf4) module and the concentration values read into memory.data = SD.SD('./asi-AMSR2-n6250-20181029-v5.hdf') #print(data2.datasets()) asi = data.select('ASI Ice Concentration') concentration = asi.get() concentration = ma.masked_less(concentration, 1) data.end()Plotting the datasetUsing the Cartopy module, the seaice concentration can be plotted onto a map. Using Cartopy's coordinate reference system module, a North-Polar-Stereographic projection is created and ocean and land masks read in from the built-in Natural Earth API.The extent of the data array is set to match the [NSIDC projection convention](https://nsidc.org/ease/clone-ease-grid-projection-gt).crs = ccrs.NorthPolarStereo(central_longitude=-45,true_scale_latitude=70) land110m = cartopy.feature.NaturalEarthFeature('physical', 'land', '110m', edgecolor='None', facecolor='k') ocean110m = cartopy.feature.NaturalEarthFeature('physical', 'ocean', '110m', edgecolor='None', facecolor='lightgray') fig = plt.figure(figsize=(15, 8)) ax = fig.add_subplot(111, projection=crs) ax.set_title('2018-10-29') ax.add_feature(ocean110m) ax.add_feature(land110m) ax.set_extent([-3850000,3750000,-5350000,5850000],crs=crs) im = ax.imshow(concentration, extent=[-3850000,3750000,-5350000,5850000], zorder=30) cb = fig.colorbar(im, ax=ax, fraction=0.024, pad=0.01) cb.set_label('AMSR Sea Ice Concentration [%]', fontsize=15)Exercise 8.03Import the required Librariesimport numpy as np import keras from keras.layers import Dense from keras.models import Sequential from tensorflow import randomUsing TensorFlow backend.Initiate the Modelvgg_model = keras.applications.vgg16.VGG16()Check the model summaryvgg_model.summary()Model: "vgg16" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) (None, 224, 224, 3) 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 _____________________________________________________________[...]Remove the last layerlast_layer = str(vgg_model.layers[-1]) np.random.seed(42) random.set_seed(42) classifier= Sequential() for layer in vgg_model.layers: if str(layer) != last_layer: classifier.add(layer)Recheck the summary & Last layerclassifier.summary()Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 _________________________________________________________________ block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 ______________________________________________________[...]Freeze layersfor layer in classifier.layers: layer.trainable=FalseAdd a new layer & Check summary & last layerclassifier.add(Dense(1, activation='sigmoid')) classifier.summary()Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 _________________________________________________________________ block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 ______________________________________________________[...]Complie the networkclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])Process the image and fit itCreate training and test data generatorsfrom keras.preprocessing.image import ImageDataGenerator generate_train_data = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) generate_test_data = ImageDataGenerator(rescale =1./255)Create training and test datasetstraining_dataset = generate_train_data.flow_from_directory('../Data/Dataset/training_set', target_size = (224, 224), batch_size = 32, class_mode = 'binary') test_datasetset = generate_test_data.flow_from_directory('../Data/Dataset/test_set', target_size = (224, 224), batch_size = 32, class_mode = 'binary')Found 10764 images belonging to 2 classes. Found 2674 images belonging to 2 classes.Fit the model to the training dataclassifier.fit_generator(training_dataset, steps_per_epoch = 100, epochs = 10, validation_data = test_datasetset, validation_steps = 30, shuffle=False)Epoch 1/10 100/100 [==============================] - 1306s 13s/step - loss: 0.4165 - accuracy: 0.8019 - val_loss: 0.4084 - val_accuracy: 0.8750 Epoch 2/10 100/100 [==============================] - 1265s 13s/step - loss: 0.3124 - accuracy: 0.8706 - val_loss: 0.2068 - val_accuracy: 0.8969 Epoch 3/10 100/100 [==============================] - 1280s 13s/step - loss: 0.2556 - accuracy: 0.8984 - val_loss: 0.1578 - val_accuracy: 0.8879 Epoch 4/10 100/100 [==============================] - 1246s 12s/step - loss: 0.2453 - accuracy: 0.8903 - val_loss: 0.1456 - val_accuracy: 0.9083 Epoch 5/10 100/100 [==============================] - 817s 8s/step - loss: 0.2383 - accuracy: 0.8997 - val_loss: 0.1684 - val_accuracy: 0.9000 Epoch 6/10 100/100 [==============================] - 661s 7s/step - loss: 0.2359 - accuracy: 0.9006 - val_loss: 0.1790 - val_accuracy: 0.9017 Epoch 7/10 100/100 [==============================] - 645s 6s/step - loss: 0.2169 - accuracy: 0.9106 - val_loss: 0.1694 - val_accuracy[...]Predict a new imageLoad the image snad show the imageimport numpy as np from keras.preprocessing import image new_image = image.load_img('../Data/Prediction/test_image_2.jpg', target_size = (224, 224)) new_imageShow the class labelstraining_dataset.class_indicesMake the predictionnew_image = image.img_to_array(new_image) new_image = np.expand_dims(new_image, axis = 0) result = classifier.predict(new_image) if result[0][0] == 1: prediction = 'It is a flower' else: prediction = 'It is a car' print(prediction)It is a carПодготовка обучающей и тестовой выборки, кросс-валидация и подбор гиперпараметров на примере метода ближайших соседей. Задание1. Выберите набор данных (датасет) для решения задачи классификации или регресии.2. В случае необходимости проведите удаление или заполнение пропусков и кодирование категориальных признаков.3. С использованием метода train_test_split разделите выборку на обучающую и тестовую.4. Обучите модель ближайших соседей для произвольно заданного гиперпараметра K. Оцените качество модели с помощью трех подходящих для задачи метрик.5. Постройте модель и оцените качество модели с использованием кросс-валидации. Проведите эксперименты с тремя различными стратегиями кросс-валидации.6. Произведите подбор гиперпараметра K с использованием GridSearchCV и кросс-валидации.7. Повторите пункт 4 для найденного оптимального значения гиперпараметра K. Сравните качество полученной модели с качеством модели, полученной в пункте 4.8. Постройте кривые обучения и валидации.import pandas as pd from enum import Enum from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from operator import itemgetter import matplotlib.pyplot as plt import numpy as np import sklearn.metrics from sklearn.neighbors import KNeighborsRegressor from sklearn.metrics import mean_squared_error as MSE, median_absolute_error as MedAE, r2_score as R2 from sklearn.model_selection import cross_val_score, cross_validate from sklearn.model_selection import KFold, RepeatedKFold, LeaveOneOut, LeavePOut, ShuffleSplit, StratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import learning_curve, validation_curve data = pd.read_csv("train.csv",sep=",",encoding = 'cp1251') data.head() data.shapeПроверим наличие пропущенных значениенийnd = data.columns[data.isnull().any()] print(len(nd))0Как мы видим пропущенных значений нет Проверим наличие категориальных значенийcats = [col for col in data.columns if data[col].dtype=="object"] print(len(cats))12Имеем 12 столбцов с категориальными признаками Заменим категориальные признаки числовыми используя Label encodingle = LabelEncoder() for col in cats: data[col]=le.fit_transform(data[col]) newcats = [col for col in data.columns if data[col].dtype=="object"] print(len(newcats)) y=data["value"] y.unique() x=data.drop("value",axis=1) X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42) # Размер тренировочной выборки print(X_train.shape, y_train.shape) # Размер тестовой выборки print(X_test.shape, y_test.shape)(6980, 55) (6980,) (1746, 55) (1746,)Обучим модель используя метод k близжайших значенийKNN = KNeighborsRegressor() KNN.fit(X_train,y_train) a= KNN.predict(X_test)Проверим модель при помощи метрик R2, MSE, MedAER2(y_test, a) MSE(y_test, a) MedAE(y_test, a)Обучим модель при помощи кросс валидации тремя стратегиями и подберем гиперпараметрыll = [1,2,3,4,5,10,25,40,50,100,250,500,1000]1. KFoldscores = cross_val_score(KNeighborsRegressor(n_neighbors=5),x, y,cv=KFold(n_splits=5), scoring = 'r2') print(np.mean(scores)) scores = -1*cross_val_score(KNeighborsRegressor(n_neighbors=5),x, y,cv=KFold(n_splits=5), scoring = 'neg_mean_squared_error') print(np.mean(scores)) scores = -1*cross_val_score(KNeighborsRegressor(n_neighbors=5),x, y,cv=KFold(n_splits=5), scoring = 'neg_median_absolute_error') print(np.mean(scores)) random_search_kFold = GridSearchCV(estimator= KNeighborsRegressor(), param_grid= {'n_neighbors': ll}, scoring= 'r2', cv= KFold(n_splits=5)) random_search_kFold.fit(x,y) random_search_kFold.best_params_ random_search_kFold = GridSearchCV(estimator= KNeighborsRegressor(), param_grid= {'n_neighbors': ll}, scoring= 'neg_mean_squared_error', cv= KFold(n_splits=5)) random_search_kFold.fit(x,y) random_search_kFold.best_params_ random_search_kFold = GridSearchCV(estimator= KNeighborsRegressor(), param_grid= {'n_neighbors': ll}, scoring= 'neg_median_absolute_error', cv= KFold(n_splits=5)) random_search_kFold.fit(x,y) random_search_kFold scores = cross_val_score(KNeighborsRegressor(n_neighbors=1000),x, y, cv=KFold(n_splits=5), scoring = 'r2') print(np.mean(scores)) scores = -1*cross_val_score(KNeighborsRegressor(n_neighbors=1000),x, y, cv=KFold(n_splits=5), scoring = 'neg_mean_squared_error') print(np.mean(scores)) scores = -1*cross_val_score(KNeighborsRegressor(n_neighbors=10),x, y, cv=KFold(n_splits=5), scoring = 'neg_median_absolute_error') print(np.mean(scores))163.454700000000032. ShuffleSplitscores = cross_val_score(KNeighborsRegressor(n_neighbors=5),x, y, cv=ShuffleSplit(n_splits=5, test_size=0.25), scoring = 'r2') print(np.mean(scores)) scores = -1 * cross_val_score(KNeighborsRegressor(n_neighbors=5),x, y, cv=ShuffleSplit(n_splits=5, test_size=0.25), scoring = 'neg_mean_squared_error') print(np.mean(scores)) scores = -1* cross_val_score(KNeighborsRegressor(n_neighbors=5),x, y, cv=ShuffleSplit(n_splits=5, test_size=0.25), scoring = 'neg_median_absolute_error') print(np.mean(scores)) random_search_Shuffled = GridSearchCV(estimator= KNeighborsRegressor(), param_grid= {'n_neighbors': ll}, scoring= 'r2', cv= ShuffleSplit(n_splits=5, test_size=0.25)) random_search_Shuffled.fit(x,y) random_search_Shuffled.best_params_ random_search_Shuffled = GridSearchCV(estimator= KNeighborsRegressor(), param_grid= {'n_neighbors': ll}, scoring= 'neg_mean_squared_error', cv= ShuffleSplit(n_splits=5, test_size=0.25)) random_search_Shuffled.fit(x,y) random_search_Shuffled.best_params_ random_search_Shuffled = GridSearchCV(estimator= KNeighborsRegressor(), param_grid= {'n_neighbors': ll}, scoring= 'neg_median_absolute_error', cv= ShuffleSplit(n_splits=5, test_size=0.25)) random_search_Shuffled.fit(x,y) random_search_Shuffled.best_params_ scores = cross_val_score(KNeighborsRegressor(n_neighbors=3),x, y, cv=ShuffleSplit(n_splits=5, test_size=0.25), scoring = 'r2') print(np.mean(scores)) scores = -1 * cross_val_score(KNeighborsRegressor(n_neighbors=3),x, y, cv=ShuffleSplit(n_splits=5, test_size=0.25), scoring = 'neg_mean_squared_error') print(np.mean(scores)) scores = -1* cross_val_score(KNeighborsRegressor(n_neighbors=3),x, y, cv=ShuffleSplit(n_splits=5, test_size=0.25), scoring = 'neg_median_absolute_error') print(np.mean(scores))81.009333333333333. RepeatedKFoldscores = cross_val_score(KNeighborsRegressor(n_neighbors=5),x, y, cv=RepeatedKFold(n_splits=5, n_repeats=5), scoring = 'r2') print(np.mean(scores)) scores = -1 * cross_val_score(KNeighborsRegressor(n_neighbors=5),x, y, cv=RepeatedKFold(n_splits=5, n_repeats=5), scoring = 'neg_mean_squared_error') print(np.mean(scores)) scores = -1* cross_val_score(KNeighborsRegressor(n_neighbors=5),x, y,cv=RepeatedKFold(n_splits=5, n_repeats=5), scoring = 'neg_median_absolute_error') print(np.mean(scores)) random_search_n_kFold = GridSearchCV(estimator= KNeighborsRegressor(), param_grid= {'n_neighbors': ll}, scoring= 'r2', cv= RepeatedKFold(n_splits=5, n_repeats=5)) random_search_n_kFold.fit(x, y) random_search_n_kFold.best_params_ random_search_n_kFold = GridSearchCV(estimator= KNeighborsRegressor(), param_grid= {'n_neighbors': ll}, scoring= 'neg_mean_squared_error', cv= RepeatedKFold(n_splits=5, n_repeats=5)) random_search_n_kFold.fit(x, y) random_search_n_kFold.best_params_ random_search_n_kFold = GridSearchCV(estimator= KNeighborsRegressor(), param_grid= {'n_neighbors': ll}, scoring= 'neg_median_absolute_error', cv= RepeatedKFold(n_splits=5, n_repeats=5)) random_search_n_kFold.fit(x, y) random_search_n_kFold.best_params_ scores = cross_val_score(KNeighborsRegressor(n_neighbors=3),x, y, cv=RepeatedKFold(n_splits=5, n_repeats=5), scoring = 'r2') print(np.mean(scores)) scores = -1 * cross_val_score(KNeighborsRegressor(n_neighbors=3),x, y, cv=RepeatedKFold(n_splits=5, n_repeats=5), scoring = 'neg_mean_squared_error') print(np.mean(scores)) scores = -1* cross_val_score(KNeighborsRegressor(n_neighbors=2),x, y, cv=RepeatedKFold(n_splits=5, n_repeats=5), scoring = 'neg_median_absolute_error') print(np.mean(scores)) def plot_validation_curve(estimator, title, X, y, param_name, param_range, cv, scoring='r2'): train_scores, test_scores = validation_curve( estimator, X, y, param_name=param_name, param_range=param_range, cv=cv, scoring=scoring, n_jobs=1) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.title(title) plt.xlabel(param_name) plt.ylabel("Score") plt.ylim(0.0, 1.1) lw = 2 plt.plot(param_range, train_scores_mean, label="Training score", color="darkorange", lw=lw) plt.fill_between(param_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2, color="darkorange", lw=lw) plt.plot(param_range, test_scores_mean, label="Cross-validation score", color="navy", lw=lw) plt.fill_between(param_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2, color="navy", lw=lw) plt.legend(loc="best") return plt n_range = range(3,33,3) plot_validation_curve(KNeighborsRegressor(), 'knn', x, y, param_name='n_neighbors', param_range=n_range, cv=RepeatedKFold(n_splits=5, n_repeats=5), scoring="r2")When I first ran this, my dataframes weren't "aligned". So it's very important to check your datasets after every load. The correspondence between dates and topics and numerical features is critical for training!print(len(dates)) print(len(topics)) print(len(nums)) print(sum(nums.favorite_count >= 1)) sum(nums.index == dates.index) == len(dates) sum(nums.index == topics.index) == len(dates) sgd = SGDRegressor() sgd sgd = SGDRegressor().fit(topics.values, nums.favorite_count)Well, that was **much** faster...predicted_favorites = sgd.predict(topics.values) predicted_favorites np.sum(predicted_favorites >= 1)Well that seems more "balanced" at least. And it's nice to have a continuous score.np.sum(nums.favorite_count.values >= 1) from pug.nlp.stats import Confusion results = pd.DataFrame() results['predicted'] = pd.Series(predicted_favorites >= 1) results['truth'] = pd.Series(nums.favorite_count >= 1) conf = Confusion(results) conf results.predicted.corr(results.truth)Wait, why are we classifying with a regressor anyway?pd.Series(predicted_favorites).corr(nums.favorite_count)Not so hot...Balance the training again? Get rid of some negatives?pos = np.array(nums.favorite_count >= 1) neg = ~pos portion_pos = 2 * float(sum(pos)) / len(nums) mask = ((np.random.binomial(1, portion_pos, size=len(nums)).astype(bool) & neg) | pos) sgd = SGDRegressor().fit(topics[mask], nums.favorite_count[mask] >= 1) print(portion_pos) print(sum(mask)) print(sum(pos) * 2) print(sum(neg)) len(nums) results = pd.DataFrame() # you have to adjust the threshold as you add more negatives results['predicted'] = pd.Series(sgd.predict(topics.values) > .3) results['truth'] = pd.Series(nums.favorite_count.values >= 1) conf = Confusion(results) conf results.predicted.corr(results.truth) conf.stats_dictSo once again about 38% correlation is the best we can do... Don't try this at home! It never finished after 10 hours of running on my laptopsvr = SVR().fit(topics.values, nums.favorite_count)First we say what query and project we are interested inThis is using the NACSOS platformpid = 234 qid = 8318 qids = [8318, 8421, 8503] merged_query = Query.objects.get(pk=8510) docids = set(Doc.objects.filter(query__in=qids).values_list('id',flat=True)) docs = Doc.objects.filter(id__in=docids) p = Project.objects.get(pk=pid) #print(q.doc_set.count()) docs.count()And we retrieve those documents that have been screened in that query, and get the majority ratingimport statistics def mode_na(x): try: return statistics.mode([y for y in x if y is not np.NaN]) except: return np.NaN columns = [ 'id', 'content', 'title', 'PY', 'wosarticle__de', 'wosarticle__wc' ] # First get only the ratings from updated tags update_tags = Tag.objects.filter(pk__in=[5598, 5597, 5577, 5562, 5551, 5550, 5632, 5630, 5629]) updated_dos = DocOwnership.objects.filter( tag__in=update_tags, relevant__gt=0, doc__content__iregex='\w' ) dodf = pd.DataFrame.from_dict(updated_dos.order_by('finish').values( 'id','doc_id','user__username','relevant' )) dodf.loc[dodf['relevant']==2,'relevant'] = 0 dodf.loc[dodf['relevant']==3,'relevant'] = np.NaN gdf = dodf.groupby('doc_id').agg(lambda x: mode_na(x)).reset_index()[['doc_id','relevant']].rename(columns={"doc_id":"id"}) gdf['seen'] = 1 gdf.loc[pd.isna(gdf['relevant']),'seen'] = np.NaN updated_df = gdf ddf = pd.DataFrame.from_dict( Doc.objects.filter(pk__in=updated_df['id']).values(*columns) ) updated_df = pd.merge(ddf,updated_df) # Now get majority ratings from the rest of documents # Majority ratings dos = DocOwnership.objects.filter( query__in=qids, relevant__gt=0, doc__content__iregex='\w' ).exclude(doc__id__in=updated_df["id"]) dodf = pd.DataFrame.from_dict(dos.order_by('finish').values( 'id','doc_id','user__username','relevant' )) dodf.loc[dodf['relevant']==2,'relevant'] = 0 dodf.loc[dodf['relevant']==3,'relevant'] = np.NaN gdf = dodf.groupby('doc_id').agg(lambda x: mode_na(x)).reset_index()[['doc_id','relevant']].rename(columns={"doc_id":"id"}) gdf['seen'] = 1 gdf.loc[pd.isna(gdf['relevant']),'seen'] = np.NaN seen_df = gdf ddf = pd.DataFrame.from_dict( Doc.objects.filter(pk__in=seen_df['id']).values(*columns) ) seen_df = pd.merge(ddf,seen_df) seen_df = pd.concat([seen_df,updated_df]) print(seen_df.shape) seen_df.head() #ADDED FOR TEST testing=False if testing: seen_df = seen_df.head(1000) print(seen_df.shape)We have a separate list of documents in the query that have not been screenedt0 = time.time() #docs = Doc.objects.filter(pk__lt=1000,query=merged_query) docs = Doc.objects.filter(id__in=docids) unseen_docs = docs.filter( content__iregex='\w', wosarticle__dt__iregex='Article|Review' ).exclude( wosarticle__dt__iregex='book|proceedings|comment|retracted|editorial' ).exclude( id__in=seen_df['id'] ) unseen_df = pd.DataFrame.from_dict(list(unseen_docs.values(*columns))) unseen_df = unseen_df[columns] unseen_df['relevant'] = 0 unseen_df['seen'] = 0 print(unseen_df.shape) print(time.time()-t0) unseen_df.head() #ADDED FOR TEST if testing: unseen_df = unseen_df.head(1000) print(unseen_df.shape)And we merge the two, with a column "seen" recording if a document has been seen or notdjango.db.connection.close() df = (pd.concat([seen_df,unseen_df]) .sort_values('id') .sample(frac=1, random_state=1) .reset_index(drop=True) ) y = df['relevant'] seen_index = df[df['seen']==1].index unseen_index = df[df['seen']==0].index print(df.shape) df.head(10)(286144, 8)We also get the assignments of these documents to categories, and add these as further columns in our dataframeagain we get the majority ratings# first get only those ratings from the updated tags dudf = pd.DataFrame.from_dict( DocUserCat.objects.filter( doc__pk__in=updated_df['id'],category__project=p ).values('user__username','doc__id','category__level','category__name') ) updated_docusers = updated_dos.values_list('doc__id',"user__username") dudf = dudf[dudf[['doc__id', 'user__username']].apply(tuple, axis=1).isin(updated_docusers)] dudf['v'] = 1 dudf['cname'] = dudf['category__level'].astype(str) + " - " + dudf['category__name']# dudf = dudf.drop(columns=['category__level','category__name']) dudf_wide = dudf.pivot_table(index=['doc__id','user__username'],columns="cname", values="v").fillna(0) updated_gudf = dudf_wide.groupby(['doc__id']).agg(lambda x: st.mode(x)[0][0]).reset_index() dudf = pd.DataFrame.from_dict( DocUserCat.objects.filter( doc__pk__in=set(seen_df['id']) - set(updated_df['id']),category__project=p ).values('user__username','doc__id','category__level','category__name') ) dudf['v'] = 1 dudf['cname'] = dudf['category__level'].astype(str) + " - " + dudf['category__name']# dudf = dudf.drop(columns=['category__level','category__name']) dudf_wide = dudf.pivot_table(index=['doc__id','user__username'],columns="cname", values="v").fillna(0) gudf = dudf_wide.groupby(['doc__id']).agg(lambda x: st.mode(x)[0][0]).reset_index() #merge updated and old gudf = pd.concat([gudf,updated_gudf]) print(gudf.shape) gudf.tail()(1038, 4)And mergedf = pd.merge(df, gudf.rename(columns={"doc__id":"id"}), how="left").fillna(0) print(df.shape) df.head()(286144, 11)We have a look at the distribution of relevant and not relevant documentsfig, ax = plt.subplots() n = seen_df.shape[0] y = df['relevant'] for i, (name, group) in enumerate(seen_df.groupby('relevant')): ax.bar(i, group.shape[0]) ax.text(i, group.shape[0]+n*0.02, f'{group.shape[0]/n:.0%}',ha="center") ax.set_xticks([0,1]) ax.set_ylim(ymax=ax.get_ylim()[1]*1.05) ax.set_xlabel('Relevance') ax.set_ylabel('Number of documents')Now we "vectorize" the data, representing each document as a weighted count of its words and phrases (bag of words and bigrams). Or if we already did this previously, then we reload the vectorized dataimport pickle import scipy revectorize = True y = df['relevant'] if revectorize: vec = TfidfVectorizer( ngram_range=(1,2), min_df=4, max_df=0.8, strip_accents='unicode', max_features=10000, use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words="english",tokenizer=lu.tokenize ) vec.fit(df.loc[seen_index,'content'].astype(str)) ab_X = vec.transform(df['content'].astype(str)) with open ('vec.pickle','wb') as f: #changed dir from data/vec.pickle to just vec.pickle pickle.dump(vec, f) import scipy.sparse scipy.sparse.save_npz('X.npz', ab_X) else: with open ('vec.pickle','rb') as f: vec = pickle.load(f) ab_X = scipy.sparse.load_npz('X.npz')#changed dir from data/vec.pickle to just vec.pickle print(ab_X.shape)/home/galm/software/django/tmv/venv/lib/python3.6/site-packages/sklearn/feature_extraction/text.py:484: UserWarning: The parameter 'token_pattern' will not be used since 'tokenizer' is not None' warnings.warn("The parameter 'token_pattern' will not be used" /home/galm/software/django/tmv/venv/lib/python3.6/site-packages/sklearn/feature_extraction/text.py:386: UserWarning: Your stop_words may be inconsistent with your preprocessing. Tokenizing the stop words generated tokens ['make'] not in stop_words. 'stop_words.' % sorted(inconsistent))We build 3 types of classifiers, and test these with k-fold cross validationdef cross_validate_models(X,y,clf_models, seen_index, n_splits=10, classes=None, upsample=False,roundup=False, df=None, stratified_k=False, test_index=None, p_threshold=None): if stratified_k: label_encoder = LabelEncoder() kf = StratifiedKFold(n_splits=n_splits) kfs = kf.split(X[seen_index],label_encoder.fit_transform(y[seen_index])) else: kf = KFold(n_splits=n_splits) kfs = kf.split(X[seen_index],y[seen_index]) i=0 def tpr(y_true, y_pred): return roc_curve(y_true, y_pred)[1] def fpr(y_true, y_pred): return roc_curve(y_true, y_pred)[0] def prec(y_true, y_pred): return precision_recall_curve(y_true,y_pred)[0] def rec(y_true, y_pred): return precision_recall_curve(y_true,y_pred)[1] scores = [ # name, function, on y when multiclas, on each y when multiclass, # proba ('p',precision_score, True, True, False), ('r',recall_score, True, True, False), ('f1', f1_score, True, True, False), ('e', accuracy_score, True, True, False), ('i', None, False, False, False), ('auc', roc_auc_score, True, True, True), ('tpr', tpr, False, True, True), ('fpr', fpr, False, True, True), ('prec', prec, False, True, True), ('rec', rec, False, True, True) ] if classes: scores += [ ('cov_err', coverage_error, True, False, False), ('LRAP', label_ranking_average_precision_score, True, False, False), ('LRL', label_ranking_loss, True, False, False) ] for model in clf_models: for m in scores: model[m[0]] = [] metrics = ['e'] if classes: for j, y_class in enumerate(classes): for m in scores: if m[1]: model[f'{m[0]}\n{y_class}'] = [] metrics += [f'p\n{y_class}', f'r\n{y_class}'] if test_index is not None: test_preds = [] for k_train, k_test in kfs: k_train = seen_index[k_train] k_test = seen_index[k_test] if test_index is not None: k_test = test_index if upsample: ros = RandomOverSampler(random_state=42) if classes: lp = LabelPowerset() yt = lp.transform(y) X_train, y_resampled = ros.fit_resample(X[k_train],yt[k_train]) y_train = lp.inverse_transform(y_resampled).todense() else: X_train, y_train = ros.fit_resample(X[k_train],y[k_train].todense()) else: X_train = X[k_train] y_train = y[k_train] i+=1 print(i) for model in clf_models: clf = model['model'] model['i'].append(i) #clf = SVC(kernel='rbf',probability=True) clf.fit(X_train,y_train) predictions = clf.predict(X[k_test]) try: predictions_proba = clf.predict_proba(X[k_test]) except: predictions_proba = predictions print("WARNING! Can't predict probabilities with this model, just using binary predictions") if hasattr(predictions_proba,"todense"): predictions_proba = predictions_proba.todense() if hasattr(predictions,"todense"): predictions = predictions.todense() if test_index is not None: test_preds.append(predictions_proba) if p_threshold is not None: predictions = np.where(predictions_proba>=p_threshold,1,0)[:,1] if classes: if roundup: for j, c in enumerate(predictions_proba.argmax(axis=1)): predictions[j,c] = 1 for m in scores: if m[4]: y_pred = predictions_proba else: y_pred = predictions if not m[1] or not m[2]: continue try: model[m[0]].append(m[1](y[k_test],y_pred,average="weighted")) except TypeError: model[m[0]].append(m[1](y[k_test],y_pred)) except ValueError: pass for j, y_class in enumerate(classes): # if y[k_train,i].sum() == 0: # print("no labels for {y_class}") for m in scores: if not m[1]: continue if m[3]: # if do this metric on each class if m[4]: # if use probabilities y_pred = predictions_proba else: y_pred = predictions try: model[f'{m[0]}\n{y_class}'].append(m[1](y[k_test,j],y_pred[:,j])) except: model[f'{m[0]}\n{y_class}'].append(None) if df is not None: df.loc[k_test,f"{y_class} - k_prediction"] = predictions_proba[:,j] df.loc[k_test,f"{y_class} - k_prediction_binary"] = predictions[:,j] else: for m in scores: if not m[1]: continue model[m[0]].append(m[1](y[k_test],predictions)) if df is not None: df.loc[k_test, "y_k_prediction"] = predictions_proba[:,1] if classes: if df is not None: return clf_models, metrics, df return clf_models, metrics else: if df is not None: return clf_models, df elif test_index is not None: return clf_models, np.array(test_preds) return clf_models from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import KFold from importlib import reload import scoping.utils.learning_utils as lu reload(scoping.utils.learning_utils) #NB: keep the preferred model last if you want to use the y_k prediction as it overwrites with every new model models = [ { 'title':"Neural Net", 'model': MLPClassifier( solver='lbfgs', alpha=0.1, hidden_layer_sizes=(10), random_state=2020 ) }, { 'title': 'Bayes', 'model': MultinomialNB(alpha=0.1) }, { 'title': 'RandForest', 'model': RandomForestClassifier(n_estimators=1000, max_depth=None, max_features="sqrt", min_samples_split=2, random_state= 2020) }, { 'title': 'SVM-rbf', 'model': SVC(kernel='rbf',class_weight='balanced',probability=True, random_state= 2020) }, ] models, df = lu.cross_validate_models(ab_X, y, models, seen_index,df=df) fig, axs = plt.subplots(1, len(models),dpi=125,figsize=(8,5),sharey=True) lu.plot_model_output(models, ['p','r','e'], fig, axs) plt.xlabel(['precision', 'recall', 'accuracy']) plt.show() model_df = pd.DataFrame(models) model_df.to_csv(f"Ar&Rev_models_relevance_{now}.csv",index=False) #removed doctables/ from dirWe can choose the best performing model (for our purposes) and fit it on our unseen data, in order to predict the probability that our unseen documents are relevantclf = SVC(kernel='rbf',class_weight='balanced',probability=True) y = df['relevant'] clf.fit(ab_X[seen_index],y[seen_index]) y_pred = clf.predict_proba(ab_X[unseen_index]) plt.close() threshold = 0.33 df['0 - relevance - prediction'] = None df.loc[unseen_index,'0 - relevance - prediction'] = y_pred[:,1] predicted_index = df[df['0 - relevance - prediction']>threshold].index fig, ax = plt.subplots() pdf = df.sort_values('0 - relevance - prediction').reset_index(drop=True) ax.plot(pdf.index,pdf['0 - relevance - prediction']) ax.axhline(threshold,linestyle="--",color="grey") rel = df[pdf['0 - relevance - prediction']>threshold].shape[0] tot = pdf[pdf['0 - relevance - prediction'].notna()].shape[0] print(f"{rel} out of {tot} documents are predicted to be relevant") df[['id','0 - relevance - prediction']].to_csv(f'predictions_{now}.csv',index=False)#removed doctables/ from dir plt.show()/home/galm/software/django/tmv/venv/lib/python3.6/site-packages/pandas/core/indexes/range.py:720: FutureWarning: Support for multi-dimensional indexing (e.g. `obj[:, None]`) is deprecated and will be removed in a future version. Convert to a numpy array before indexing instead. return super().__getitem__(key) /home/galm/software/django/tmv/venv/lib/python3.6/site-packages/matplotlib/cbook/__init__.py:1402: FutureWarning: Support for multi-dimensional indexing (e.g. `obj[:, None]`) is deprecated and will be removed in a future version. Convert to a numpy array before indexing instead. ndim = x[:, None].ndim /home/galm/software/django/tmv/venv/lib/python3.6/site-packages/matplotlib/axes/_base.py:278: FutureWarning: Support for multi-dimensional indexing (e.g. `obj[:, None]`) is deprecated and will be removed in a future version. Convert to a numpy array before indexing instead. y = y[:, np.newaxis]Saving the outcome#Added to write relevant docs to csv #threshold = 0.33 # set if you didn't set it above RelevantDocs = df[(df['0 - relevance - prediction'] >= threshold) | (df['relevant'] == 1)] RelevantDocs[['id', 'PY', 'title', 'content']].to_csv(f'{now}_Ar&REv_PredictedRelevantSVM.csv', index = False) RelevantDocs.shape #writing a smaller DF with just the IDs, human-made labels, and machine prediction outdf = df[['id', 'relevant', 'seen', '0 - relevance - prediction']] outdf.loc[outdf['seen'] ==0, 'relevant'] = None outdf = outdf[['id', 'relevant', '0 - relevance - prediction']] outdf.columns = ['id', 'label', 'prediction'] outdf.to_csv(f'IdLabelPrediction_{now}.csv', index=False) outdf.head() #create a new query that can be used for topic modelling for example newq, created = Query.objects.get_or_create( project=p, creator=User.objects.get(username=""), title="The final run v2" ) T = Doc.query.through dqs = [T(doc_id=did, query=newq) for did in list(RelevantDocs['id'])] T.objects.bulk_create(dqs) newq.r_count = newq.doc_set.count() newq.save() # Create a tag of positvely predicted so we can screen based on those if we want tag, created = Tag.objects.get_or_create( title="positively predicted".format(now), query=q ) tag.save() for i, row in RelevantDocs.iterrows(): #using the index of the newly created dataframe on the full df d = Doc.objects.get(pk=row['id']) d.tag.add(tag) tag.update_tag()Evaluation of model#SIMPLE EXAMPLE TO GET AN IDEA FOR HOW MUCH ADDED TRAINING DATA ADDS #Requires re-training so will take a while from sklearn.model_selection import cross_val_score from sklearn import metrics #clf = SVC(kernel='rbf',class_weight='balanced',probability=True) #y = df['relevant'] #WITH A RANDOM SAMPLE scores = [] steps = np.linspace(0.05,1,10,endpoint=True) #basically: take a random sample from the relevance column, use the index to create a matching sample from ab_x for different sizes for n in steps: ysample = y[seen_index].sample(n=math.floor(y[seen_index].shape[0]*n)) score = cross_val_score(clf, ab_X[ysample.index], ysample, cv=5, scoring='f1') scores.append([len(ysample), score.mean(), score.std()*2]) scores plt.close() fig, ax = plt.subplots(figsize=(10,6)) xplot = [score[0] for score in scores] yplot = [score[1] for score in scores] yerr = [score[2] for score in scores] ax.plot(xplot, yplot, 'k-o') ax.fill_between(xplot,np.subtract(yplot,yerr), np.add(yplot, yerr), color='gray', alpha=0.2) ax.set_ylabel('f1 score') ax.set_xlabel('Number of documents in sample') ax.set_title('f1 scores for increasing sizes of test set', fontsize=14) ax.grid() plt.show() #Using the original k-fold crossvalidation: Area under curve plots from scipy import interp fig = plt.figure(dpi=125, figsize=(8,5)) for j, model in enumerate(models): ax = fig.add_subplot(2,3,j+1) mean_fpr = np.linspace(0, 1, 100) mean_auc = np.mean(model[f'auc']) std_auc = np.std(model[f'auc']) tprs = [] for i in range(len(model['i'])): viz = ax.plot( model[f'fpr'][i], model[f'tpr'][i], lw=0.5, alpha=0.5, color=f"C1" ) interp_tpr = interp(mean_fpr, model[f'fpr'][i], model[f'tpr'][i]) interp_tpr[0] = 0.0 tprs.append(interp_tpr) mean_tpr = np.mean(tprs, axis=0) mean_tpr[-1] = 1.0 ax.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), lw=1, alpha=.8) ax.plot([0, 1], [0, 1], linestyle='--', lw=1, color='r', label='Chance', alpha=.8) ax.legend(fontsize=5) ax.set_aspect("equal") ax.set_title(model['title'],fontsize=8) ax.set_ylabel('True positive rate') ax.set_xlabel('False positive rate') std_tpr = np.std(tprs, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2, label=r'$\pm$ 1 std. dev.') fig.tight_layout() #Precission-recall plots from scipy import interp fig = plt.figure(dpi=125, figsize=(8,5)) for j, model in enumerate(models): ax = fig.add_subplot(2,3,j+1) mean_fpr = np.linspace(0, 1, 100) mean_auc = np.mean(model[f'auc']) std_auc = np.std(model[f'auc']) tprs = [] for i in range(len(model['i'])): viz = ax.plot( model[f'rec'][i], model[f'prec'][i], lw=0.5, alpha=0.5, color=f"C1" ) ax.set_ylabel('precision') ax.set_xlabel('recall') ax.set_title(model['title'],fontsize=8) fig.tight_layout() from sklearn.metrics import f1_score, auc #predict probabilities #lr_probs = clf.predict_proba(testX) #r_probs = lr_probs[:, 1] #keep positive outcomes fig, ax = plt.subplots(dpi=150) lr_probs = df.loc[seen_index,'y_k_prediction'] lr_true = df.loc[seen_index,'relevant'] # predict class values #yhat = clf.predict(testX) # summarize scores lr_precision, lr_recall, _ = lr_precision, lr_recall, thresholds = precision_recall_curve(lr_true, lr_probs) #lr_f1, lr_auc = f1_score(lr_true, lr_probs), auc(lr_true, lr_probs) #print('For sample: f1=%.3f auc=%.3f' % (lr_f1, lr_auc)) # plot the precision-recall curves no_skill = len(lr_true[lr_true==1]) / len(lr_true) plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill') plt.plot(lr_recall, lr_precision, marker='.', label='SVM') # axis labels plt.xlabel('Recall') plt.ylabel('Precision') # show the legend plt.legend() # show the plot plt.show() plt.plot(thresholds,lr_precision[:-1]) plt.plot(thresholds,lr_recall[:-1]) #If we want to play with the thresholds, we can use this function to see what the added/removed data looks like import seaborn as sns def precision_recall_bins(lr_true, lr_probs, bt): fig, ax = plt.subplots(dpi=150) categories = [] for i in range(len(bt)-1): categories.append((bt[i+1],bt[i])) l = 0 for i,c in enumerate(categories): y_thresh_pred = np.where(lr_probs>c[0],1,0) y_bin_pred = np.where(np.logical_and(lr_probs<=c[1],lr_probs>c[0]),1,0) bin_p = precision_score(lr_true,y_bin_pred) thresh_p = precision_score(lr_true,y_thresh_pred) thresh_r = recall_score(lr_true,y_thresh_pred) width = y_bin_pred.sum()/lr_true.shape[0] if i==0: rl = "Relevant" il = "Irrelevant" else: rl=None il=None ax.bar(l, 1-bin_p,bottom=bin_p, width=width, align="edge",color="#fc8d59",ec="grey",alpha=0.5,label=il) ax.bar(l, bin_p, width=width, align="edge",color="#91bfdb",ec="grey",alpha=0.5,label=rl) ly = 1.15+0.2*i ax.annotate( f"$p>{c[0]}$ \nrecall: {thresh_r:.0%}\nprecision: {thresh_p:.0%}", (l+width,ly),(0,ly), arrowprops=dict(facecolor='black', shrink=0.04,width=0.2,headwidth=4), va="bottom", fontsize=6 ) l += width sns.despine() ax.spines['left'].set_bounds(0, 1) ax.set_ylim(ymax=ly) ax.set_yticks([t for t in ax.get_yticks() if t <=1]) ax.set_ylabel("Bin composition") ax.set_xlabel("Cumulative proportion of documents at threshold") ax.legend(loc="right") fig.tight_layout() precision_recall_bins(lr_true, lr_probs, [1,0.68, 0.5, 0.35, 0]) #Start with 1, end with 0 so we see whole dataset precision_recall_bins(lr_true, lr_probs, [1,0.35, 0]) plt.plot(thresholds,lr_recall[:-1]) from sklearn.metrics import f1_score, auc #predict probabilities #lr_probs = clf.predict_proba(testX) #r_probs = lr_probs[:, 1] #keep positive outcomes lr_probs = df.loc[seen_index,'y_k_prediction'] lr_true = df.loc[seen_index,'relevant'] # predict class values #yhat = clf.predict(testX) # summarize scores lr_precision, lr_recall, _ = lr_precision, lr_recall, thresholds = precision_recall_curve(lr_true, lr_probs) #lr_f1, lr_auc = f1_score(lr_true, lr_probs), auc(lr_true, lr_probs) #print('For sample: f1=%.3f auc=%.3f' % (lr_f1, lr_auc)) # plot the precision-recall curves no_skill = len(lr_true[lr_true==1]) / len(lr_true) plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill') plt.plot(lr_recall, lr_precision, marker='.', label='SVM') # axis labels plt.xlabel('Recall') plt.ylabel('Precision') # show the legend plt.legend() # show the plot plt.show() plt.plot(thresholds,lr_precision[:-1])Getting false positives/-negatives for a given threshold#highest predicted df.sort_values("y_k_prediction",ascending=False).head() #playing with the threshold (kind of irrelevant given additional analysis above, but easy reference) threshold = 0.36 likely = np.where(df['y_k_prediction']>threshold,1,0) p = precision_score(df.loc[seen_index,"relevant"],likely[seen_index]) r = recall_score(df.loc[seen_index,"relevant"],likely[seen_index]) print(f"Threshold: {threshold}, precision: {p:.2g}, recall: {r:.2g}") #print false positives just above the border #threshold = 0.33 fp = df[(df['relevant']==0) & (df[f'y_k_prediction']>threshold)] print(fp.shape) django.db.connection.close() for i, row in fp.sort_values('y_k_prediction',ascending=True).head(3).iterrows(): d = Doc.objects.get(pk=row['id']) print() print(row['title']) print(row['content']) print(row['id']) print(row['y_k_prediction']) print(d.docownership_set.filter(query=qid).values('user__username','relevant','finish','tag__title')) #False postives fp = df[(df['relevant']==0) & (df[f'y_k_prediction']>0.5)] print(fp.shape) django.db.connection.close() #print highest scoring ones for i, row in fp.sort_values('y_k_prediction',ascending=False).head(3).iterrows(): d = Doc.objects.get(pk=row['id']) print() print(row['title']) print(row['content']) print(row['id']) print(row['y_k_prediction']) print(d.docownership_set.filter(query=qid).values('user__username','relevant','finish','tag__title')) #False negatives fn = df[(df['relevant']==1) & (df[f'y_k_prediction']<0.5)] print(fn.shape) django.db.connection.close() for i, row in fn.sort_values('y_k_prediction',ascending=True).head(3).iterrows(): d = Doc.objects.get(pk=row['id']) print() print(row['title']) print(row['content']) print(row['id']) print(row['y_k_prediction']) print(d.docownership_set.filter(query=qid).values('user__username','relevant','finish','tag__title')) #create a tag so I can check them out on the platform tempq = merged_query #8510 tag, created = Tag.objects.get_or_create( title=f"false negatives{now}".format(now), query=tempq ) tag.save() for i, row in fn.sort_values('y_k_prediction',ascending=True).head(20).iterrows(): #using the index of the newly created dataframe on the full df d = Doc.objects.get(pk=row['id']) d.tag.add(tag) tag.update_tag() tag, created = Tag.objects.get_or_create( title=f"false positives{now}".format(now), query=tempq ) tag.save() for i, row in fp.sort_values('y_k_prediction',ascending=False).head(20).iterrows(): d = Doc.objects.get(pk=row['id']) d.tag.add(tag) tag.update_tag()Now we want to look at impact, mitigation and adaptationdf['categories'] = df['1 - Mitigation'] + df['1 - Adaptation'] + df['1 - Impact'] print("seen documents:",df.loc[df['seen']==1,].shape[0]) print("seen mitigation documents:", df.loc[df['seen']==1,'1 - Mitigation'].sum()) print("seen adaptation documents", df.loc[df['seen']==1,'1 - Adaptation'].sum()) print("seen impact documents", df.loc[df['seen']==1,'1 - Impact'].sum()) print("seen in multiple", df.loc[df['categories']>=2,].shape[0]) print("seen non-mit/ad documents",df.loc[(df['seen']==1) & (df['categories']==0),].shape[0]) fig, ax = plt.subplots(dpi=150) mitigation = df.loc[df['seen']==1,'1 - Mitigation'].sum() adaptation = df.loc[df['seen']==1,'1 - Adaptation'].sum() impact = df.loc[df['seen']==1,'1 - Impact'].sum() overlap = df.loc[df['categories']>=2,].shape[0] nonmitad = df.loc[(df['seen']==1) & (df['categories']==0),].shape[0] lw = 1 alpha=0.5 ec = "grey" ax.bar( 0,mitigation, alpha=alpha, label="Mitigation", lw=lw, edgecolor=ec, tick_label = str(mitigation) ) ax.bar( 0,adaptation,bottom=mitigation-overlap/3, alpha=alpha, label="Adaptation", lw=lw, edgecolor=ec ) ax.bar( 0,impact,bottom=mitigation-2*overlap/3+adaptation , alpha=alpha, label="impact", lw=lw, edgecolor=ec ) ax.bar( 0,nonmitad,bottom=mitigation-overlap+adaptation+impact, alpha=alpha, label="Not relevant", lw=lw, edgecolor=ec, ) ax.legend()We want to do multiclass prediction nowso we name the classes and create a matrix of class labelsclasses = ["1 - Mitigation","1 - Adaptation", "1 - Impact"] y = np.matrix(df[classes]) print(y.shape) rel_index = df.loc[df["relevant"]==1,].index(286144, 3)and cross validate a model to see how well we can predictfrom importlib import reload import scoping.utils.learning_utils as lu reload(scoping.utils.learning_utils) from sklearn.model_selection import KFold multimodels = [ { 'title': 'One vs rest SVC balanced', 'model': OneVsRestClassifier(SVC(kernel='linear', class_weight="balanced", probability=True)) }, ] multimodels, metrics, df = lu.cross_validate_models(ab_X, y, multimodels, rel_index, classes=classes, df=df) fig, axs = plt.subplots(1, len(multimodels),dpi=125,figsize=(8,5),sharey=True) lu.plot_model_output(multimodels, metrics, fig, [axs]) plt.show() descriptives = [ #'e', 'p', 'r', 'e\n1 - Mitigation', 'p\n1 - Mitigation', 'r\n1 - Mitigation', 'e\n1 - Adaptation', 'p\n1 - Adaptation', 'r\n1 - Adaptation', 'r\n1 - Impact', 'p\n1 - Impact', 'r\n1 - Impact' ] for des in descriptives: print(f'{des} - {statistics.mean(multimodels[0][des])}') print(multimodels[0][des]) print() #With better formatting and the overall relevance included in one graph boxplotdf = pd.DataFrame() boxplotdf['Category'] = descriptives gendf = pd.DataFrame({'Category' : ['e\n1 - Relevance', 'p\n1 - Relevance', 'r\n1 - Relevance']}) #get scores as list boxplotdf['Scores'] = [multimodels[0][des] for des in boxplotdf['Category']] gendf['Scores'] = [models[len(models)-1][des] for des in ['e', 'p', 'r']] #merge the general and the multi-model boxplotdf = pd.concat([gendf, boxplotdf], axis=0, ignore_index=True).reset_index() #make a new row for each in list (basically: unfold the list to new variable, add new variable as column & drop old column) s = boxplotdf.apply(lambda x: pd.Series(x['Scores']),axis=1).stack().reset_index(level=1, drop=True) s.name = 'Scores' boxplotdf = boxplotdf.drop('Scores', axis=1).join(s) #Get the category and the metric separately new = boxplotdf["Category"].str.split("\n1 - ", n = 1, expand = True) boxplotdf["Metric"]= new[0] boxplotdf.drop(columns =["Category"], inplace = True) boxplotdf["Category"]= new[1] #rename di = {'e': "Accuracy", 'p': "Precision", 'r': "Recall"} boxplotdf.replace({"Metric": di}, inplace=True) #plot sns.set(style="whitegrid") fig, ax = plt.subplots(figsize=(10,6),dpi=150) ax = sns.boxplot(x="Category", y="Scores", hue="Metric", data=boxplotdf, palette="Set2", linewidth = 2) ax = sns.swarmplot(x="Category", y="Scores", hue="Metric", dodge=True, #add points data=boxplotdf, color="grey", alpha = 0.75, size=4 ) ax.set_ylabel('Score', fontsize = 10, weight = 'bold') ax.set_xlabel('Classifier', fontsize = 10, weight = 'bold') title = 'Performance of Category Classifier ' subtitle = 'Accuracy, precision and recall per classifier for 10 k-fold cross-validation' ax.text(x=0.5, y=1.1, s=title, fontsize=12, weight='bold', ha='center', va='bottom', transform=ax.transAxes) ax.text(x=0.5, y=1.05, s=subtitle, fontsize=10, alpha=0.75, ha='center', va='bottom', transform=ax.transAxes) handles, labels = ax.get_legend_handles_labels() #l = plt.legend() ax.legend(handles[0:3], labels[0:3], fontsize=10, title_fontsize='10', loc = 'lower left', facecolor = 'white', edgecolor='grey') fig.tight_layout(rect=[0, 0, 1, 0.98]) fig.savefig(f'{title}_{subtitle}.png') for des in descriptives: print(f'{des} - - {statistics.mean(multimodels[0][des])}') print() print ('- - - - - ') for des in ['e', 'p', 'r']: print(f'{des} - - {statistics.mean(multimodels[0][des])}') print() statistics.mean(multimodels[0]['r\n1 - Impact']) boxplotdf = pd.DataFrame() boxplotdf['Category'] = descriptives gendf = pd.DataFrame({'Category' : ['e\n1 - Relevance', 'p\n1 - Relevance', 'r\n1 - Relevance']}) #get scores as list boxplotdf['Scores'] = [multimodels[0][des] for des in boxplotdf['Category']] gendf['Scores'] = [models[0][des] for des in ['e', 'p', 'r']] #merge the general and the multi-model boxplotdf = pd.concat([gendf, boxplotdf], axis=0, ignore_index = True) #make a new row for each in list (basically: unfold the list to new variable, add new variable as column & drop old column) s = boxplotdf.apply(lambda x: pd.Series(x['Scores']),axis=1).stack().reset_index(level=1, drop=True) s.name = 'Scores' boxplotdf = boxplotdf.drop('Scores', axis=1).join(s) boxplotdf from sklearn.model_selection import cross_val_score cross_val_score(clf, ab_X[seen_index],y[seen_index], cv=5, scoring='recall_samples')/home/galm/software/django/tmv/venv/lib/python3.6/site-packages/sklearn/model_selection/_validation.py:552: FitFailedWarning: Estimator fit failed. The score on this train-test partition for these parameters will be set to nan. Details: Traceback (most recent call last): File "/home/galm/software/django/tmv/venv/lib/python3.6/site-packages/sklearn/model_selection/_validation.py", line 531, in _fit_and_score estimator.fit(X_train, y_train, **fit_params) File "/home/galm/software/django/tmv/venv/lib/python3.6/site-packages/sklearn/svm/_base.py", line 162, in fit accept_large_sparse=False) File "/home/galm/software/django/tmv/venv/lib/python3.6/site-packages/sklearn/base.py", line 432, in _validate_data X, y = check_X_y(X, y, **check_params) File "/home/galm/software/django/tmv/venv/lib/python3.6/site-packages/sklearn/utils/validation.py", line 73, in inner_f return f(**kwargs) File "/home/galm/software/django/tmv/venv/lib/python3.6/site-packages/sklearn/utils/v[...]Multi-matriximport seaborn as sns sns.reset_defaults() classes = ["1 - Mitigation","1 - Adaptation", "1 - Impact"] cols = [f"{c} - k_prediction_binary" for c in classes] print(cols) y_pred = np.array(list(df.loc[seen_index,cols].values)) y_true = y[seen_index] confusion_matrix = np.zeros(shape=(len(classes)+1,len(classes)+1),dtype=float) norm = True for j, c in enumerate(classes): #rows for k, c in enumerate(classes): #cols n = 0 for i in range(len(seen_index)): if j==k: if y_true[i,j]+y_pred[i,k]==2: n+=1 else: if y_pred[i,j]+y_true[i,k]==2 and y_pred[i,k]+y_true[i,k]!=2: n+=1/y_pred[i,].sum() confusion_matrix[j,k]=n sums_pred = y_pred.sum(axis=1).ravel() confusion_matrix[k+1,j]=y_true[np.argwhere(sums_pred==0).ravel(),j].sum() sums_true = y_true.sum(axis=1).ravel() confusion_matrix[j,k+1]=y_pred[np.argwhere(sums_true==0).ravel(),j].sum() confusion_matrix[j+1,k+1] = y_pred[np.argwhere(sums_true+sums_pred==0)].shape[0] if norm is not False: confusion_matrix = confusion_matrix/confusion_matrix.sum(axis=0,keepdims=True) fig, ax = plt.subplots(dpi=125, figsize=(8,8)) if norm is not False: mat = ax.imshow(confusion_matrix, cmap="Blues", vmin=0, vmax=1, origin="lower") else: mat = ax.imshow(confusion_matrix, cmap="Blues",origin="lower") classes = classes + ["None"] for j, c in enumerate(classes): for k, c in enumerate(classes): n = confusion_matrix[j,k] if n > np.max(confusion_matrix)/2: color="#d9d9d9" else: color="#949680" if norm is not False: ax.text(k,j, f"{confusion_matrix[j,k]:.0%}",ha="center",color=color) else: ax.text(k,j, f"{confusion_matrix[j,k]:.1f}",ha="center",color=color) if j < len(classes)-1: ax.text(j,len(classes)-0.25,f"{y_true[:,j].sum():,.0f}",ha="center",va="center") ax.text(len(classes)-0.25, j, f"{y_pred[:,j].sum():,.0f}",ha="center",va="center") ax.set_xticks(list(range(len(classes)))) ax.set_xticklabels([x.replace("12 - ","") for x in classes],rotation=45,ha="right") ax.set_yticks(list(range(len(classes)))) ax.set_yticklabels([x.replace("12 - ","") for x in classes],ha="right") ax.set_xlabel("True value", fontsize = 10, weight = 'bold') ax.set_ylabel("Predicted value", fontsize = 10, weight = 'bold') #ax.set_xlim(xmax=len(classes)+1) cbar = fig.colorbar(mat, ax=ax, pad=0.15, shrink = 0.5) if norm is not False: cbar.set_label("% of True values") title = "Confusion matrix for predicted categories" ax.text(x=0.5, y=1.1, s=title, fontsize=12, weight='bold', ha='center', va='bottom', transform=ax.transAxes) fig.tight_layout(rect=[0, 0, 1, 0.98]) fig.savefig(f'confusion_matrix.png') fig.show model_df = pd.DataFrame(multimodels) #model_df.to_csv(f"multimodels_adaptation_mitigation_{now}.csv",index=False)Fitting multi-classpredicted_index = df[df['0 - relevance - prediction']>0.33].index LabeledRelevant_index = df[df['relevant']==1].index clf = OneVsRestClassifier(SVC(kernel='linear', class_weight="balanced", probability=True)) classes = ["1 - Mitigation","1 - Adaptation", "1 - Impact"] y = np.matrix(df[classes]) y.shape clf.fit(ab_X[seen_index],y[seen_index]) #fit on the labeled as relevant data predictions = clf.predict_proba(ab_X[predicted_index]) #get probability scores for those predicted to be relevant binary = clf.predict(ab_X[predicted_index]) #get binary scores too #Add predictions to the dataframe or nan if no prediction for i,c in enumerate(classes): df[f"{c} - prediction"] = np.NaN df.loc[predicted_index, f"{c} - prediction"] = predictions[:, i] #Check print(df[df['1 - Adaptation - prediction'] > .1].shape) df[df['1 - Adaptation - prediction'] > .1].head(20)(3281, 23)False positives & NegativesFirst, let's look into the high number of adaptation docs being classified as impact#Just because I got tired of getting the titles confused, import markdown display from IPython.display import Markdown, display def printmd(string): display(Markdown(string)) confusion = df[ (df['1 - Adaptation'] == 1) & (df['1 - Adaptation - k_prediction_binary'] == 0) & (df['1 - Impact - k_prediction_binary'] == 1) ] for i, row in confusion.sort_values('1 - Impact - k_prediction',ascending=False).head(5).iterrows(): d = Doc.objects.get(pk=row['id']) print() printmd(f"**{row['title']}**") print(row['content']) #print(row['id']) printmd(f"*Impact prediction: {row['1 - Impact - k_prediction']} -- Adaptation prediction: {row['1 - Adaptation - k_prediction']}*" ) #print(d.docownership_set.filter(query=qid).values('user__username','relevant','finish','tag__title')) #Listing all false positives and false negatives #NB: this includes docs belonging to multiple classes where only one was classified wrongly classes_pred = [(c, c+" - k_prediction_binary") for c in classes[0:-1]] c_fp = pd.DataFrame(columns = df.columns) c_fn = pd.DataFrame(columns = df.columns) for c, c_p in classes_pred: c_fn = c_fn.append(df[(df[c]==1) & (df[c_p]==0)]) c_fp = c_fp.append(df[(df[c]==0) & (df[c_p]==1)]) print(f"False negatives: {c_fn.shape[0]} \nFalse positives: {c_fp.shape[0]}") df['categories'] = df['1 - Mitigation'] + df['1 - Adaptation'] + df['1 - Impact'] print(f"Total categorised: {df.loc[df['categories']>=1].shape[0]}") #create a tag so I can check them out on the platform tempq = merged_query tag, created = Tag.objects.get_or_create( title=f"Categories f neg {now}".format(now), query=tempq ) tag.save() for i, row in c_fn.iterrows(): #using the index of the newly created dataframe on the full df d = Doc.objects.get(pk=row['id']) d.tag.add(tag) tag.update_tag() #same for false positives tag, created = Tag.objects.get_or_create( title=f"Categories f pos {now}".format(now), query=tempq ) tag.save() for i, row in c_fp.iterrows(): #using the index of the newly created dataframe on the full df d = Doc.objects.get(pk=row['id']) d.tag.add(tag) tag.update_tag()To CSVcolumns = ['1 - Adaptation', '1 - Mitigation', '1 - Impact' ] ycols = [col + ' - k_prediction' for col in columns] pcols = [col + ' - prediction' for col in columns] outdf = df[['id', 'relevant', 'seen']] outdf[ycols] = df[ycols] outdf[pcols] = df[pcols] outdf.head() #writing a smaller DF with just the IDs, human-made labels, and machine prediction #outdf = df[['id', 'relevant', 'seen', '0 - relevance - prediction']] outdf.loc[outdf['seen'] ==0, 'relevant'] = None for n, ycol in enumerate(ycols): outdf[ycol] = np.where(outdf['seen'] == 1, outdf[ycol], outdf[pcols[n]]) outdf = outdf.rename(columns={ 'relevant': 'label', '1 - Impact - k_prediction': 'impact', '1 - Adaptation - k_prediction': 'adaptation', '1 - Mitigation - k_prediction': 'mitigation' }) outdf = outdf[['id', 'label', 'adaptation', 'mitigation', 'impact']].dropna(thresh=5) outdf.to_csv(f'IdCategoryPrediction.csv', index=False) outdf.head() outdf.shape outdf.to_csv(f'IdCategoryPrediction.csv', index=False) outdf.shape outdf[outdf['mitigation'] == 1].shapeimport numpy as np k=np.log(19999/((20000/345)-1))/50 def c(t): return 20000 / (1+19999 * np.exp(-k*t)) t = np.arange(0,175,0.01) c(t) dcdt=k*c(t)*(1-(c(t)/20000)) import matplotlib.pyplot as plt plt.figure(figsize=(18,10)) plt.plot(t, c(t),c="red",linestyle="--") plt.axvline(np.log(19999)/k) plt.plot(c(t),dcdt) plt.axhline(590, linestyle="--")Sample Notebook: BigQuery table stats Imports%load_ext google.cloud.bigquery !pip install --user facets-overviewLoad data%%bigquery total_births SELECT * FROM `bigquery-public-data.samples.natality` LIMIT 100000 total_births.describe().transpose()Visualize statistics# Create the feature stats for the datasets and stringify it. import base64 from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator gfsg = GenericFeatureStatisticsGenerator() proto = gfsg.ProtoFromDataFrames([{'name': 'total_births', 'table': total_births}]) protostr = base64.b64encode(proto.SerializeToString()).decode("utf-8") # Display the facets overview visualization for this data from IPython.core.display import display, HTML HTML_TEMPLATE = """ """ html = HTML_TEMPLATE.format(protostr=protostr) display(HTML(html))Zero Workersfield = 'workers_0' df.plot(kind='scatter', x=field+'_control', y=field+'_result')1 Workerfield = 'workers_1' df.plot(kind='scatter', x=field+'_control', y=field+'_result')2 Workersfield = 'workers_2' df.plot(kind='scatter', x=field+'_control', y=field+'_result')3 Or More Workers# 3 Workers field = 'workers_3_plus' df.plot(kind='scatter', x=field+'_control', y=field+'_result')#Major libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import copy as cp from datetime import datetime import warnings warnings.filterwarnings("ignore") #Cross Validattion tools from sklearn import datasets from sklearn.feature_selection import RFE from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler #Import Supervised Learning models from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet from sklearn.linear_model import Ridge from sklearn.linear_model import BayesianRidge from sklearn.tree import DecisionTreeRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn import svm import xgboost as xgbData Visualization# connect to google drive and extract csv files metrics= pd.read_csv('/content/drive/My Drive/Projects/FINESSE/post_metrics_and_comments.csv') metrics.head() # make a copy of original drive for modifications new_metrics = cp.deepcopy(metrics) # remove Null values new_metrics.dropna(axis= 1, inplace= True) new_metrics.info() RangeIndex: 11637 entries, 0 to 11636 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 post_url 11637 non-null object 1 username 11637 non-null object 2 date_posted 11637 non-null object 3 img_urls 11637 non-null object 4 max_likes 11637 non-null int64 5 max_comments 11637 non-null int64 6 followers 11637 non-null int64 7 following 11637 non-null int64 8 concatenated_comments 11637 non-null object dtypes: int64(4), object(5) memory usage: 818.4+ KBSince we have the maximum number of likes for each post(max_likes), the main criteria for trending post can be ** the number of likes divided by mean(average) of likes for each username assigned to all the post_url**. it also can simply called **weighted_mean_likes**. Therefore we use feature engineering to extract features that help our regression model to predict the **weighted_mean_likes** more accuretely. Feature Engineering Median, means and weighted mean of likes and commentsAdding mean of likes/comments and weighted mean of likes/comments as 4 new featuresdef weighted_mean_columns(dataframe, col, new_col): mean = dataframe.groupby('username')[col].mean() # def weighted_average(matrix, col_name) hash ={} for idx in range(len(mean)): hash[mean.index[idx]] = mean[idx] dataframe[new_col] = None for idx in range(len(dataframe)): username = dataframe.username[idx] dataframe[new_col][idx] = round(dataframe[col][idx].astype(float) / hash[username] , 6 ) def mean(dataframe, col, new_col): mean = dataframe.groupby('username')[col].mean() hash = {} for idx in range(len(mean)): hash[mean.index[idx]] = mean[idx] dataframe[new_col] = None for idx in range(len(dataframe)): username = dataframe.username[idx] dataframe[new_col][idx] = round(hash[username] , 6) def median(dataframe, col, new_col): median = dataframe.groupby('username')[col].median() hash = {} for idx in range(len(median)): hash[median.index[idx]] = median[idx] dataframe[new_col] = None for idx in range(len(dataframe)): username = dataframe.username[idx] dataframe[new_col][idx] = round(hash[username] , 6) # running functions weighted_mean_columns(new_metrics, 'max_likes', 'weighted_mean_likes') weighted_mean_columns(new_metrics, 'max_comments', 'weighted_mean_comments') mean(new_metrics, 'max_likes', 'mean_likes') mean(new_metrics, 'max_comments', 'mean_comments') median(new_metrics, 'max_likes', 'median_likes') # changing the data type from object to float new_metrics['weighted_mean_likes'] = new_metrics['weighted_mean_likes'].astype(float) new_metrics['weighted_mean_comments'] = new_metrics['weighted_mean_comments'].astype(float) new_metrics['mean_likes'] = new_metrics['mean_likes'].astype(float) new_metrics['mean_comments'] = new_metrics['mean_comments'].astype(float) new_metrics['median_likes'] = new_metrics['median_likes'].astype(float) new_metrics.head() # remove all the null values new_metrics.dropna(axis=0, inplace=True) new_metrics.isnull().sum()Adding days of week(Monday, Tuesday, ...)#find the start and end date of data print(new_metrics['date_posted'].min()) print(new_metrics['date_posted'].max()) # Adding days as a features new_metrics['Monday'] = 0 new_metrics['Tuesday'] = 0 new_metrics['Wednesday'] = 0 new_metrics['Thursday'] = 0 new_metrics['Friday'] = 0 new_metrics['Saturday'] = 0 new_metrics['Sunday'] = 0 for i, dates in enumerate(new_metrics['date_posted']): if i == 0: start_day = datetime(2020,4,1) date_time = datetime.strptime(dates, f'%Y-%m-%d') n = (date_time - start_day).days %7 if n==0: new_metrics['Wednesday'][i] = 1 elif n==1: new_metrics['Thursday'][i] = 1 elif n==2: new_metrics['Friday'][i] = 1 elif n==3: new_metrics['Saturday'][i] = 1 elif n==4: new_metrics['Sunday'][i] = 1 elif n==5: new_metrics['Monday'][i] = 1 elif n==6: new_metrics['Tuesday'][i] =1 new_metrics.head()Adding days that days that post was in top trending hashtags:hashtags = pd.read_csv('/content/drive/My Drive/Projects/FINESSE/hashtag_top_appearances.csv') hashtags.head() hashtags['post_url'] = hashtags['post_url'].astype(str) hashtags.info() new_metrics['days_in_hashtag_top_section'] = 0 for idx, hashtag1 in enumerate(new_metrics['post_url']): for idx2, hashtag2 in enumerate(hashtags['post_url']): if hashtag2 == hashtag1: new_metrics['days_in_hashtag_top_section'][idx] = hashtags['days_in_hashtag_top_section'][idx2] break new_metrics.head()Adding number of posts for each usernamenew_metrics['number_of_posts'] = 0 dummy = new_metrics.groupby('username')['post_url'].count() for idx, line in enumerate(new_metrics['username']): new_metrics['number_of_posts'][idx] = dummy[line] new_metrics.head()Correlation Analysis Find the correlation of other parameters tocorrelation = new_metrics.corr(method='spearman') columns = correlation.nlargest(10, 'weighted_mean_likes').index correlation_map = np.corrcoef(new_metrics[columns].values.T) sns.set(font_scale=0.6) sns.set(rc={'figure.figsize':(11.7,8.27)}) heatmap = sns.heatmap(correlation_map, cbar=True, annot=True, square=True, fmt='.2f', yticklabels=columns.values, xticklabels=columns.values) plt.show()The most significant correlations are listed below:correlations=new_metrics.corr() attrs = correlations.iloc[:-1,:-1] # all except target threshold = 0.5 important_corrs = (attrs[abs(attrs) > threshold][attrs != 1.0]).unstack().dropna().to_dict() unique_important_corrs = pd.DataFrame( list(set([(tuple(sorted(key)), important_corrs[key]) \ for key in important_corrs])), columns=['Attribute Pair', 'Correlation']) # sorted by absolute value unique_important_corrs = unique_important_corrs.loc[ abs(unique_important_corrs['Correlation']).argsort()[::-1]] unique_important_corrs new_metrics[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday','Saturday','Sunday','weighted_mean_likes']].groupby(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday','Saturday','Sunday'], as_index=False).mean().sort_values(by='Sunday', ascending=False) sns.distplot(new_metrics['weighted_mean_likes'], color="r", kde=False) plt.title("Distribution of Likes Divided by Mean") plt.ylabel("Number of Occurences") plt.xlabel("Number of Likes Divided by Mean Likes for each user");Return unbiased skew over requested axis. $# Measuring the skewness of the weighted_mean_likes column which # Skewness is the indication of symmetry in the distribution and the equatuin is E((X-mean)/(standard_deviation)**3) new_metrics['weighted_mean_likes'].skew() # measuring the Kurtosis of the weighted_mean_likes column. # Kurtosis measure the strength of long tail in the distribution and the equation is E((X-mean)/(standard_deviation)**4) new_metrics['weighted_mean_likes'].kurt() plt.scatter(new_metrics['followers'], new_metrics["weighted_mean_likes"], color='orange') plt.title("Number of Likes Divided by Mean Likes vs Number of Followers") plt.xlabel("Number of Followers") plt.ylabel("Number of Likes Divided by Mean Likes");Training and Predictions with the modelnew_metrics.drop(['post_url','username', 'date_posted', 'img_urls', 'concatenated_comments'], axis=1, inplace= True) new_metrics.sort_values(by = 'weighted_mean_likes', ascending = True, inplace= True) new_metrics.tail() new_metrics_original = cp.deepcopy(new_metrics) X, y = new_metrics.drop('weighted_mean_likes',axis=1), new_metrics.weighted_mean_likes X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=44) # X.info() models = [] models.append(('GBM',GradientBoostingRegressor())) models.append(('LR',LinearRegression())) models.append(('BYR', BayesianRidge())) models.append(("KNN", KNeighborsRegressor())) models.append(('XGB', xgb.XGBRegressor())) for name,model in models: kfold = KFold(n_splits=2, random_state=22) cv_result = cross_val_score(model,X_train,y_train, cv = kfold, scoring = "r2") print(name, cv_result) X, y = new_metrics_original.drop('weighted_mean_likes', axis=1), new_metrics_original.weighted_mean_likes X_train2, X_test2, y_train2, y_test2 = train_test_split(X, y, test_size=0.2, shuffle = False) model = xgb.XGBRegressor() model.fit(X_train,y_train) predictions = model.predict(X_test2) y_train.head() compare = pd.DataFrame({'Prediction': predictions, 'Ground Truth' : y_test2}) print(compare.tail(10)) pred= sorted(predictions, reverse= True) print(pred[:10])[19:15:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror. Prediction Ground Truth 4048 5.976807 5.518340 10091 3.592964 5.602607 10917 5.343872 5.697391 3451 4.395563 5.768078 5364 5.564166 5.918774 1706 5.726154 6.253734 3868 5.595409 6.448727 2911 6.724216 7.675192 3120 7.076157 9.623617 3406 9.577948 11.498964 [9.577948, 7.076157, 6.7242155, 5.9768066, 5.7261543, 5.595409, 5.5641656, 5.343872, 4.4611506, 4.452263]Top 5 predicted post are as follows:metrics.loc[[3406, 3120, 2911, 4048, 1706], 'post_url']Automatic alignment on labelsdata = {'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'], 'population': [11.3, 64.3, 81.3, 16.9, 64.9], 'area': [30510, 671308, 357050, 41526, 244820], 'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']} countries = pd.DataFrame(data).set_index('country') countries`Series` alignment Let's define a table with natural increase rates in 2013 (data from World Bank):death_rate = pd.Series([10, 9, 11, 8, 9], index=['Poland','United Kingdom', 'Germany', 'Netherlands', 'France']) print(death_rate) birth_rate = pd.Series([10, 9, 10, 12], index=['Netherlands', 'Germany', 'Poland', 'France']) print(birth_rate)Netherlands 10 Germany 9 Poland 10 France 12 dtype: int64Now we calculate the natural increae by subtracting death rate from birth rate:natural_increase = birth_rate - death_rate print(natural_increase)France 3 Germany -2 Netherlands 2 Poland 0 United Kingdom NaN dtype: float64Note that the rows where the two series did not overlap contain missing values (`NaN` = Not a Number) and that the data were properly aligned on the index.pop_change = pd.DataFrame({'death rate' : death_rate, 'birth rate' : birth_rate, 'natural increase' : natural_increase})Missing values We can remove the missing data using `dropna` method:pop_change.dropna(inplace=True) pop_changeEXERCISE: Calculate estimated population in 2014 by summing the population and natural increase (remember that the natural increase is given per 1000 people). EXERCISE: Calculate ratio of the highest and lowest estimated population in 2014. Joining two data frames Let's now try to add the data to the country data:countries.join(pop_change)There are four different ways we can handle the missing rows: * left (default) — take all the rows of the *left* data frame* right — take all the rows of the *right* data frame* inner — take the common rows of both data frames* outer — take all the rows present in either or both data framesNote that the methods are similar to SQL JOIN clause.countries.join(pop_change, how='right')EXERCISE: Try inner and outer join. What's the difference? Groupby operations Some 'theory': the groupby operation (split-apply-combine)By "group by" we are referring to a process involving one or more of the following steps* **Splitting** the data into groups based on some criteria* **Applying** a function to each group independently* **Combining** the results into a data structureSimilar to SQL `GROUP BY` The example of the image in pandas syntax:df = pd.DataFrame({'key':['A','B','C','A','B','C','A','B','C'], 'data': [0, 5, 10, 5, 10, 15, 10, 15, 20]}) df df.groupby('key').aggregate('sum') # np.sum df.groupby('key').sum()You can also simply count members of each split:df.groupby('key').size()Movie database These exercises are based on the [PyCon tutorial of hodes](https://github.com/brandon-rhodes/pycon-pandas-tutorial/) (so all credit to him!) and the datasets he prepared for that. You can download these data from here: [`titles.csv`](https://drive.google.com/file/d/0B3G70MlBnCgKa0U4WFdWdGdVOFU/view?usp=sharing) and [`cast.csv`](https://drive.google.com/file/d/0B3G70MlBnCgKRzRmTWdQTUdjNnM/view?usp=sharing) and put them in the `/data` folder.cast = pd.read_csv('data/cast.csv') cast[10:15] titles = pd.read_csv('data/titles.csv') titles.head()EXERCISE: Using groupby(), plot the number of films that have been released each year in the history of cinema. EXERCISE: Use groupby() to determine how many roles are listed for each of The Pink Panther movies. Custom grouping criteria You can also group by the values on another array under the condition that this array has the length equal to the number of rows:greek = ['α', 'β', 'β', 'β', 'β', 'α', 'β','α', 'α'] df.groupby(greek).max()The values for the grouping array can be also computed from values in the data frame. For example, to count odd and even number in the data column we could simply:df.groupby(df['data'] % 2).size()EXERCISE: Using groupby(), plot the number of films that have been released each **decade** in the history of cinema. EXERCISE: Use groupby() to plot the number of "Hamlet" films made each decade. Multiple groups Note that you can also groupby on multiple keys:df['type'] = np.where(df['data'] % 2, 'odd', 'even') print(df) df.groupby(['type', 'key']).sum() df['type'] = np.where(df['data'] % 2, 'odd', 'even') print(df) df['data']data key type 0 0 A even 1 5 B odd 2 10 C even 3 5 A odd 4 10 B even 5 15 C odd 6 10 A even 7 15 B odd 8 20 C evenNote that it creates a *hierarchical index*. More on that later. EXERCISE: List each of the characters that has portrayed at least twice. EXERCISE: List, in order by year, each of the films in which has played more than 1 role. EXERCISE: How many leading (n=1) roles were available to actors, and how many to actresses, in each year of the 1950s? Value counts A useful shortcut to calculate the number of occurences of certain values is `value_counts` (this is somewhat equivalent to `df.groupby(key).size())`)For example, what are the most occuring movie titles?titles.title.value_counts().head()EXERCISE: What are the 11 most common character names in movie history? Custom aggregate functions Aggregate function could be any function accepting the `Series` object. For example, let's calculate most frequent apperances in each year of last decade:def most_frequent(x): return x.value_counts().index[0] cast.loc[(cast['year'] >= 2010) & (cast['year'] < 2020), ['year', 'name']].groupby('year').agg(most_frequent)Chapter 3. Classification MNISTScikit-Learn provides many helper functions to download popular datasets. MNIST is one of them, which is a set of 70,000 small images of digits handwritten by high school students and employees of the US Census Bureau. Each image is labeled with the digit it represents.from sklearn.datasets import fetch_openml mnist = fetch_openml('mnist_784', version=1) mnist.keys() X, y = mnist['data'], mnist['target'] print('X size', X.shape) print('label size:', y.shape)X size (70000, 784) label size: (70000,)There are 70,000 images, and each image has 784 features. This is because each image is 28 × 28 pixels, and each feature simply represents one pixel’s intensity, from 0 (white) to 255 (black). Let’s take a peek at one digit from the dataset. All you need to do is grab an instance’s feature vector, reshape it to a 28 × 28 array, and display it using Matplotlib’s `imshow()` function:import matplotlib as mpl import matplotlib.pyplot as plt some_digit = X.iloc[0].values some_digits_image = some_digit.reshape(28, 28) plt.imshow(some_digits_image, cmap='binary') plt.axis("off") plt.show() print(y[0]) # Let's cast labels as integers import numpy as np y = y.astype(np.uint8)The MNIST dataset is actually already split into a training set (the first 60,000 images) and a test set (the last 10,000 images):X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]Training a Binary ClassifierLet's try to identify only one digit "5". This will be an example of _binary classifier_, capable of distibuishing between just two classes, 5 and not-5.# Set for all 5s y_train_5 = (y_train == 5) y_test_5 = (y_test == 5)Now let’s pick a classifier and train it. A good place to start is with a _Stochastic Gradient Descent (SGD)_ classifier. This classifier deals with large datasets efficiently, because it deals with training instances independetly, one at a time.from sklearn.linear_model import SGDClassifier sgd_clf = SGDClassifier(random_state=42) sgd_clf.fit(X_train, y_train_5) # Predicting if the digit is 5 sgd_clf.predict([some_digit])Performance Measures Measuring Accuracy Using Cross-ValidationRemember that K-fold cross-validation means splitting the training set into K folds (in this case, three), then making predictions and evaluatingthem on each fold using a model trained on the remaining folds.from sklearn.model_selection import cross_val_score cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring='accuracy')before we get too excited, let’s look at a very dumb classifier that just classifies every single image in the “not-5” class:from sklearn.base import BaseEstimator class Never5Classifier(BaseEstimator): def fit(self, X, y=None): pass def predict(self, X): return np.zeros((len(X), 1), dtype=bool) never_5_clf = Never5Classifier() cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring='accuracy')That’s right, it has over 90% accuracy! This is simply because only about 10% of the images are 5s, so if you always guess that an image is not a 5, you will be right about 90% of the time.This demonstrates why accuracy is generally not the preferred performance measure for classifiers, especially when you are dealing with _skewed datasets_. Confusion MatrixThe general idea is to count the number of times instances of class A are classified as class B. For example, to know the number of times the classifier confused images of 5s with 3s, you would look in the fifth row and third column of the confusion matrix.To compute the confusion matrix, you first need to have a set of predictions so that they can be compared to the actual targets. The `cross_val_predict()`performs K-fold cross-validation, but instead of returning the evaluation scores, it returns the predictions made on each test fold. This means that you get a clean prediction for each instance in the training set.from sklearn.model_selection import cross_val_predict y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)Let's get the confusion matrix and pass the target classes and the predicted classesfrom sklearn.metrics import confusion_matrix confusion_matrix(y_train_5, y_train_pred)Each row in a confusion matrix represents an _actual class_, while each column represents a _predicted class_. The first row of this matrix considers non-5 images (the negative class): 53,892 of them were correctly classified as non-5s (they are called true negatives), while the remaining 687 were wrongly classified as 5s (false positives). The second row considers the images of 5s (the positive class): 1,891 were wrongly classified as non-5s (false negatives), while the remaining 3,530 were correctly classified as 5s (true positives).An interesting one to look at the accuracy of the positive predictions; this is called the precision of the classifier. _precision_= $\frac{TP}{TP+FP}$. The $TP$ is the number of true positives, and $FP$ is the number of false positives.precision is typically used along with another metric named _recall_, also called sensitivity or the true positive rate (TPR): this is the ratio of positive instances that are correctly detected by the classifier. _recall_=$\frac{TP}{TP+FN}$. $FN$ is the number of false negatives. Precision and Recallfrom sklearn.metrics import precision_score, recall_score precision_score(y_train_5, y_train_pred) recall_score(y_train_5, y_train_pred)It is often convenient to combine precision and recall into a single metric called the $F_1$ score, in particular if you need a simple way to compare two classifiers. The $F_1$ score is the harmonic mean of precision and recall. Whereas the regular mean treats all values equally, the harmonic mean gives much more weight to low values.$F_1 = \frac{2}{\frac{1}{precision} + \frac{1}{recall}}=\frac{TP}{TP+\frac{FN+FP}{2}}$from sklearn.metrics import f1_score f1_score(y_train_5, y_train_pred)The $F_1$ score favors classifiers that have similar precision and recall. Thisis not always what you want: in some contexts you mostly care aboutprecision, and in other contexts you really care about recall. For example,if you trained a classifier to detect videos that are safe for kids, you wouldprobably prefer a classifier that rejects many good videos (low recall) butkeeps only safe ones (high precision), rather than a classifier that has amuch higher recall but lets a few really bad videos show up in yourproduct (in such cases, you may even want to add a human pipeline tocheck the classifier’s video selection). On the other hand, suppose youtrain a classifier to detect shoplifters in surveillance images: it is probablyfine if your classifier has only 30% precision as long as it has 99% recall(sure, the security guards will get a few false alerts, but almost allshoplifters will get caught).Unfortunately, you can’t have it both ways: increasing precision reducesrecall, and vice versa. This is called the precision/recall trade-off.How to decide which threshold to use for **SGDClassifier**? First, use the `cross_val_predict()` function to get the scores of all instances in the training set, but this time specify that you want to return decision scores instead of predictions:y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method='decision_function')compute precision and recall for all possible thresholds:from sklearn.metrics import precision_recall_curve precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores) # Plot precision and recall as functions of the threshold value def plot_precision_recall_vs_threshold(precisions, recalls, thresholds): plt.figure(figsize=(8,4)) plt.plot(thresholds, precisions[:-1], 'b--', label='Precision') plt.plot(thresholds, recalls[:-1], 'g-', label='Recall') plt.grid(axis='both') plt.legend() plot_precision_recall_vs_threshold(precisions, recalls, thresholds) plt.show()Suppose you decide to aim for 90% precision. You look up the first plotand find that you need to use a threshold of about 8,000. To be moreprecise you can search for the lowest threshold that gives you at least 90%precision ( `np.argmax()` will give you the first index of the maximumvalue, which in this case means the first `True` value).threshold_90_precision = thresholds[np.argmax(precisions >= 0.90)] # Now let's make prediction with this threshold y_train_pred_90 = (y_scores >= threshold_90_precision) print("Precision:", precision_score(y_train_5, y_train_pred_90)) print("Recall:", recall_score(y_train_5, y_train_pred_90))Precision: 0.9000345901072293 Recall: 0.4799852425751706A high-precision classifier is not very useful if its recall is too low! The ROC CurveThe _receiver operating characteristic (ROC)_ curve is another commontool used with binary classifiers. It is very similar to the precision/recallcurve, but instead of plotting precision versus recall, the ROC curve plotsthe true positive rate (another name for recall) against the false positiverate (FPR). The FPR is the ratio of negative instances that are incorrectlyclassified as positive. It is equal to 1 – the true negative rate (TNR), whichis the ratio of negative instances that are correctly classified as negative.The TNR is also called specificity. Hence, the ROC curve plots sensitivity(recall) versus 1 – specificityfrom sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_train_5, y_scores) # Plot FPR against the TPR def plot_roc_curve(fpr, tpr, label=None): """Plots ROC Curve (FPR agains TPR)""" plt.figure(figsize=(5, 5)) plt.plot(fpr, tpr, linewidth=3, label=label) plt.plot([0, 1], [0, 1], 'k--') plt.grid(axis='both') plt.ylabel('True Positive Rate (Recall)') plt.xlabel('False Positive Rate') plt.ylim(0, 1) plt.xlim(0, 1) plot_roc_curve(fpr, tpr) plt.show()One way to compare classifiers is to measure the area under the curve(AUC). A perfect classifier will have a ROC AUC equal to 1, whereas apurely random classifier will have a ROC AUC equal to 0.5. Scikit-Learnprovides a function to compute the ROC AUC:from sklearn.metrics import roc_auc_score print('ROC AUC Score:', roc_auc_score(y_train_5, y_scores))ROC AUC Score: 0.9604938554008616As a rule of thumb, you should prefer the PR curvewhenever the positive class is rare or when you care more about the false positivesthan the false negatives. Otherwise, use the ROC curve.# Let's train random forest model from sklearn.ensemble import RandomForestClassifier forest_clf = RandomForestClassifier(random_state=42) y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method='predict_proba') # The roc_curve() function expects labels and scores, # but instead of scores you can give it class probabilities y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5,y_scores_forest) plot_roc_curve(fpr_forest, tpr_forest, "Random Forest") print('ROC AUC score:', roc_auc_score(y_train_5, y_scores_forest))ROC AUC score: 0.9983436731328145You now know how to train binary classifiers, choose the appropriatemetric for your task, evaluate your classifiers using cross-validation,select the precision/recall trade-off that fits your needs, and use ROCcurves and ROC AUC scores to compare various models. Multiclass Classification_Multinomial classifier_ distinguishes between more than two classes. Even though some algorithms only perform binary classifications, there are ways to still use them for multiple classes classification. One way is to create a system that classifies 10 different type images by taking one class at a time and classifying it against the rest. This is called _one-versus-rest_ (OvR) strategy. Another strategy is to train a binary classifier for every pair of classes: distinguish between 0 and 1 class, then between 0 and 2, then between 0 and 3 and so on. This is called _one-versus-one_ (OvO) strategy. For $N$ classes you need to train $\frac{N\times (N-1)}{2}$ classifiers.Scikit-Learn detects when you try to use a binary classification algorithm for a multiclass classification task, and it automatically runs OvR or OvO, depending on the algorithm:from sklearn.svm import SVC svm_clf = SVC() svm_clf.fit(X_train, y_train) svm_clf.predict([some_digit])This code trains the SVC on the training set using the original target classes from 0 to 9 ( y_train ), instead of the 5-versus-the-rest target classes ( y_train_5 )# This will return 10 scores per instance some_digit_scores = svm_clf.decision_function([some_digit]) some_digit_scores # Highest score is the one corresponding to class 5 np.argmax(some_digit_scores) svm_clf.classes_ svm_clf.classes_[5]**WARNING**: When a classifier is trained, it stores the list of target classes in its `classes_` attribute, ordered by value.If you want to force Scikit-Learn to use _one-versus-one_ or _one-versus-the-rest_, you can use the `OneVsOneClassifier` or `OneVsRestClassifier` classes# Classify using OvR from sklearn.multiclass import OneVsRestClassifier ovr_clf = OneVsRestClassifier(SVC()) ovr_clf.fit(X_train, y_train) ovr_clf.predict([some_digit]) len(ovr_clf.estimators_) # Trying SGD Classifier. No need for OvR or OvO as # SGD directly classifies into multiple classes. sgd_clf.fit(X_train, y_train) sgd_clf.predict([some_digit]) sgd_clf.decision_function([some_digit])You can see that the classifier is fairly confident about its prediction: almost all scores are largely negative, while class 5 has a score of ...You can evaluate this classifier using `cros_val_score()`.cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy")We get 86% on all test folds. If we used a random classifier we would get 10%. We still can get a better score by scaling the inputs:from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train.astype(np.float64)) cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring='accuracy')Error AnalysisOne way to improve selected model is to analyze the types of errors it makes.# Let's look at the confusion matrix y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3) conf_mx = confusion_matrix(y_train, y_train_pred) print(conf_mx) # Let's look ath the image of the confusion matrix plt.matshow(conf_mx, cmap=plt.cm.gray) plt.show()The 5s look slightly darker than the other digits, which could mean that there arefewer images of 5s in the dataset or that the classifier does not perform aswell on 5s as on other digits. In fact, you can verify that both are the case.Let’s focus the plot on the errors. First, you need to divide each value inthe confusion matrix by the number of images in the corresponding classso that you can compare error rates instead of absolute numbers of errors:row_sums = conf_mx.sum(axis=1, keepdims=True) norm_conf_mx = conf_mx / row_sums np.fill_diagonal(norm_conf_mx, 0) plt.matshow(norm_conf_mx, cmap=plt.cm.gray) plt.show()Analyzing individual errors can also be a good way to gain insights onwhat your classifier is doing and why it is failing, but it is more difficultand time-consuming. For example, let's ploit 3s and 5s:def plot_digits(instances, images_per_row=10, **options): """ Plots digit with a grid form - correctly & incorrecly guessed """ size = 28 images_per_row = min(len(instances), images_per_row) images = [instance.reshape(size,size) for instance in instances.values] n_rows = (len(instances) - 1) // images_per_row + 1 row_images = [] n_empty = n_rows * images_per_row - len(instances) images.append(np.zeros((size, size * n_empty))) for row in range(n_rows): rimages = images[row * images_per_row : (row + 1) * images_per_row] row_images.append(np.concatenate(rimages, axis=1)) image = np.concatenate(row_images, axis=0) plt.imshow(image, cmap = mpl.cm.binary, **options) plt.axis("off") cl_a, cl_b = 3, 5 X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)] X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_a)] X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_b)] X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)] plt.figure(figsize=(8,8)) plt.subplot(221); plot_digits(X_aa[:25], images_per_row=5) plt.subplot(222); plot_digits(X_ab[:25], images_per_row=5) plt.subplot(223); plot_digits(X_ba[:25], images_per_row=5) plt.subplot(224); plot_digits(X_bb[:25], images_per_row=5) plt.show()Most misclassified images seem like obvious errors to us, and it’s hard to understand why the classifier made the mistakes it did. 3 The reason is that we used a simple `SGDClassifier`, which is a linear model. All it does is assign a weight per class to each pixel, and when it sees a new image it just sums up the weighted pixelintensities to get a score for each class. So since 3s and 5s differ only by a few pixels, this model will easily confuse them.The main difference between 3s and 5s is the position of the small line that joins the top line to the bottom arc. If you draw a 3 with the junctionslightly shifted to the left, the classifier might classify it as a 5, and vice versa. In other words, this classifier is quite sensitive to image shifting and rotation. So one way to reduce the 3/5 confusion would be to preprocess the images to ensure that they are well centered and not too rotated. This will probably help reduce other errors as well. Multilabel ClassificationSay the classifier has been trained to recognize three faces, Alice, Bob, and Charlie. Then when the classifier is shown a picture of Alice and Charlie, it should output [1, 0, 1] (meaning “Alice yes, Bob no, Charlie yes”). Such a classification system that outputs multiple binary tags is called _a multilabel classification_ system.from sklearn.neighbors import KNeighborsClassifier y_train_large = (y_train >= 7) y_train_odd = (y_train % 2 == 1) y_multilabel = np.c_[y_train_large, y_train_odd] knn_clf = KNeighborsClassifier() knn_clf.fit(X_train, y_multilabel)This code creates a y_multilabel array containing two target labels for each digit image: the first indicates whether or not the digit is large (7, 8,or 9), and the second indicates whether or not it is odd.# Prediction with Multilabel classifier knn_clf.predict([some_digit])And it gets it right! The digit 5 is indeed not large ( False ) and odd ( True ).One approach to evaluate multilable classifier is to measure $F_1$ score for each individual label, then compare the average score.y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3) f1_score(y_multilabel, y_train_knn_pred, average='macro')If for example we have many more pictures of Alice then of Bob or Charlie, you may want to give more weight to the classifier's score on picture of Alice. Simple option is to give each label a weight equal to its support. This can be done by setting `average="weighted"`. Multioutput ClassificationMulticlass or multioutput classification can have more than two possible values.Let's first build a system that removes noise from images. The classifier's output is multilabel (one label per pixel) and each label can have multiple values (pixel intensity ranges from 0 to 255).noise = np.random.randint(0, 100, (len(X_train), 784)) X_train_mod = X_train + noise noise = np.random.randint(0, 100, (len(X_test), 784)) X_test_mod = X_test + noise y_train_mod = X_train y_test_mod = X_test def plot_digit(data): image = data.reshape(28, 28) plt.imshow(image, cmap = mpl.cm.binary, interpolation="nearest") plt.axis("off") # Let's take a look at the example some_index = 0 plt.subplot(121); plot_digit(X_test_mod.iloc[some_index].values) plt.subplot(122); plot_digit(y_test_mod.iloc[some_index].values) plt.show() # Let's run the classifier knn_clf.fit(X_train_mod, y_train_mod) clean_digit = knn_clf.predict([X_test_mod.iloc[some_index].values]) plot_digit(clean_digit)Lets get the sun and moon coordinatessun_ra, sun_dec = [], [] moon_ra, moon_dec = [], [] for year in range(2020, 2025): for month in range(1,13): if month == 2: days = 28 else: days = 30 for day in range(1,days+1): tt = Time(datetime(year, month, day, 12, 0, 0), scale='utc') _sun = get_sun(tt) sun_ra.append(_sun.ra.to(u.deg).value) sun_dec.append(_sun.dec.to(u.deg).value) _moon = get_moon(tt) moon_ra.append(_moon.ra.to(u.deg).value) moon_dec.append(_moon.dec.to(u.deg).value) sun_ra = np.array(sun_ra) sun_dec = np.array(sun_dec) moon_ra = np.array(moon_ra) moon_dec = np.array(moon_dec) fig = plt.figure(figsize=(10,5)) sub = fig.add_subplot(111) sub.scatter(((tiles['RA'][in_desi] - 80) % 360) + 80, tiles['DEC'][in_desi], s=5, c='k') sub.scatter(((sun_ra - 80) % 360) + 80, sun_dec, c='C1', s=50) sub.scatter(((moon_ra - 80) % 360) + 80, moon_dec, c='C0', s=50) sub.set_xlabel('RA', fontsize=25) sub.set_xlim(80, 440) sub.set_ylabel('Dec', fontsize=25) sub.set_ylim(-25., 90.) sub.set_title('DESI footprint', fontsize=30) is_bgs = in_desi & (tiles['PROGRAM'] == 'BRIGHT') fig = plt.figure(figsize=(10,5)) sub = fig.add_subplot(111) sub.scatter(((tiles['RA'][is_bgs] - 80) % 360) + 80, tiles['DEC'][is_bgs], s=5, c='k') sub.scatter(((sun_ra - 80) % 360) + 80, sun_dec, c='C1', s=1) sub.scatter(((moon_ra - 80) % 360) + 80, moon_dec, c='C0', s=1) sub.set_xlabel('RA', fontsize=25) sub.set_xlim(80, 440) sub.set_ylabel('Dec', fontsize=25) sub.set_ylim(-25., 90.) sub.set_title('BGS footprint: %i tiles' % np.sum(is_bgs), fontsize=30)Now lets convert to ecliptic coordinatessun_coord = SkyCoord(ra=sun_ra * u.deg, dec=sun_dec * u.deg, frame='icrs') moon_coord = SkyCoord(ra=moon_ra * u.deg, dec=moon_dec * u.deg, frame='icrs') tile_coord = SkyCoord(ra=tiles['RA'] * u.deg, dec=tiles['DEC'] * u.deg, frame='icrs') fig = plt.figure(figsize=(10,5)) sub = fig.add_subplot(111) sub.scatter(((tile_coord.barycentrictrueecliptic.lon.value[is_bgs]-80) % 360)+80, tile_coord.barycentrictrueecliptic.lat.value[is_bgs], s=5, c='k') sub.scatter(((sun_coord.barycentrictrueecliptic.lon.value-80) % 360)+80, sun_coord.barycentrictrueecliptic.lat, s=1, c='C1') sub.scatter(((moon_coord.barycentrictrueecliptic.lon.value-80) % 360)+80, moon_coord.barycentrictrueecliptic.lat, s=1, c='C0') sub.set_xlabel('Longitude', fontsize=25) sub.set_xlim(80, 440) sub.set_ylabel('Latitude', fontsize=25) sub.set_ylim(-45., 100.) sub.set_title('BGS footprint: %i tiles, $%.f deg^2$' % (np.sum(is_bgs), 14000), fontsize=30) is_ngc = (tile_coord.galactic.b.value >= 0.) is_sgc = (tile_coord.galactic.b.value < 0.) np.sum(is_bgs & is_ngc & (tile_coord.barycentrictrueecliptic.lat.to(u.deg).value < 8.1))/np.sum(is_bgs) * 14000 foot13000 = ((is_bgs & is_ngc) | (is_bgs & is_sgc & (np.abs(tile_coord.barycentrictrueecliptic.lat.to(u.deg).value) > 6.5))) foot12000 = ((is_bgs & is_ngc & (tile_coord.barycentrictrueecliptic.lat.to(u.deg).value > 8.1)) | (is_bgs & is_sgc)) foot11000 = ((is_bgs & is_ngc & (tile_coord.barycentrictrueecliptic.lat.to(u.deg).value > 8.1)) | (is_bgs & is_sgc & (np.abs(tile_coord.barycentrictrueecliptic.lat.to(u.deg).value) > 6.5))) foot10000 = ((is_bgs & is_ngc & (tile_coord.barycentrictrueecliptic.lat.to(u.deg).value > 12.6)) | (is_bgs & is_sgc & (np.abs(tile_coord.barycentrictrueecliptic.lat.to(u.deg).value) > 9.7))) for _foot, lbl in zip([foot13000, foot12000, foot11000, foot10000], [13000, 12000, 11000, 10000]): foot = is_bgs & _foot fig = plt.figure(figsize=(10,5)) sub = fig.add_subplot(111) sub.scatter(((tiles['RA'][foot] - 80) % 360) + 80, tiles['DEC'][foot], s=5, c='k') sub.scatter(((sun_ra - 80) % 360) + 80, sun_dec, c='C1', s=1) sub.scatter(((moon_ra - 80) % 360) + 80, moon_dec, c='C0', s=1) sub.set_xlabel('RA', fontsize=25) sub.set_xlim(80, 440) sub.set_ylabel('Dec', fontsize=25) sub.set_ylim(-25., 90.) sub.set_title('%i tiles, $%.f deg^2$' % (np.sum(foot), 14000 * float(np.sum(foot))/float(np.sum(is_bgs))), fontsize=30)Early stopping callback stop if objective function is below a certain thresholdfrom mango import Tuner param_dict = dict(x=range(-10, 10)) def objfunc(p_list): return [p['x'] ** 2 for p in p_list] def early_stop(results): ''' stop if best objective is below 2 results: dict (same keys as dict returned by tuner.minimize/maximize) ''' return results['best_objective'] <= 2 config = dict(early_stopping=early_stop) tuner = Tuner(param_dict, objfunc, conf_dict=config) results = tuner.minimize() resultsstop if objective function does not improve for n iterationsfrom mango import Tuner param_dict = dict(x=range(-10, 10)) def objfunc(p_list): return [p['x'] ** 2 for p in p_list] def early_stop(results): ''' stop if best objective does not improve for 2 iterations results: dict (same keys as dict returned by tuner.minimize/maximize) ''' current_best = results['best_objective'] patience_window = results['objective_values'][-3:] return min(patience_window) > current_best config = dict(early_stopping=early_stop) tuner = Tuner(param_dict, objfunc, conf_dict=config) results = tuner.minimize() resultsstop if objective function does not improve for n secsimport time import numpy as np from mango import Tuner param_dict = dict(x=range(-10, 10)) def objfunc(p_list): time.sleep(0.5) return [p['x'] ** 2 for p in p_list] class context: previous_best = -1.0 previous_best_time = None min_improvement_secs = 0.1 def early_stop(results): ''' stop if objective does not improve for 0.1 seconds ''' current_best = results['best_objective'] current_time = time.time() if current_best == context.previous_best and \ (current_time - context.previous_best_time > context.min_improvement_secs): print("no improvement in %f seconds: stopping early." % context.min_improvement_secs) return True context.previous_best = current_best context.previous_best_time = current_time return False config = dict(early_stopping=early_stop) tuner = Tuner(param_dict, objfunc, conf_dict=config) results = tuner.minimize() resultsMini challenge: create a pop density map for PHLphp_array, clipped_transform = msk.mask(tif_file, [mapping(geom) for geom in php.geometry.tolist()], crop=True) fig, ax = plt.subplots(facecolor='#FCF6F5FF') fig.set_size_inches(21, 14) ax.imshow(php_array[0], norm=norm, cmap=our_cmap) ax.axis('off') ax.set_title('Population Density Heatmap of The Philippines', {'fontsize':15}) #map the data to clors #imaj is now in RGBA (512x512x4) imaj = our_cmap(norm(php_array[0])) plt.show() #plt.imsave('./resources/php_popdensity.png', imaj, dpi=2000) php_array[0] php_array[0].shapeArray.reshape(-1,1)x =np.arange(0,100) y =np.arange(0,100) x lr = LinearRegression() x.shape lr.fit(x,y) x = x.reshape(-1,1) x.shape lr.fit(x,y) plt.scatter(x,y , color = 'r') plt.plot(x, lr.predict(x), color = 'b') plt.title('Linear regression done') plt.xlabel('x') plt.ylabel('y') plt.show()Contents 1. Business opportunity 2. Data collection 3. Data understanding 4. Initialisation 5. Exploratory data analysis 6. Visualizing the decision to read books without buying 7. Hypothesis testing 8. Conclusion 1. Business opportunity: A book cafe I love books and coffee. Eventually I want to open a book cafe. I want to provide reading experience at an hourly basis. People can come and read books at a very reasonable rate for 4-6 hours or more. They would be offered a complimentary beverage during their reading experience. They can purchase other soft beverages and bakery items at a chargeable basis. In this project, I did a very rough survey to gauge the need for a book cafe. Later I plan to undertake a more robust survey described in the section "Future Research" 2. Data collection Target population: People aged 20-40 who read books Sampling frame: My contacts Sampling technique: Convenience Sampling size: I was confident that atleast 50% people would respond positively to reading books without buying them. At 10% sampling error and 95% confidence level we obtained the minimum required sample size of 96 Data collection was kept anonymous because I did not want them to be conscious of what they were writing as their preferences that they might be judged. I needed the total information from a group of readers, and not tailor made suggestions for each person. So the required data was collected without intruding their privacy. I sent the [Google form](https://forms.gle/zvAFR9VowshJW3VEA) to all my contacts through different communication channels. I received 120 responses till 20-03-2021 3. Data understanding The following data was captured: | Data field | Name |Data type | Description | | :- | -: | :-: | :-: | | Timestamp | Timestamp|Numeric-continuous | Timetamp of Google form submission | | Which format do you prefer to read from? | Format|Categorical-nominal | Preferred book format | | Choice of pairing beverage? | Beverage| Categorical-nominal | Preferred beverage while reading | | Do you ever listen to music while reading? | Music|Categorical-nominal | Surrounded with sound while reading? | | How do you want to read paperbacks/hardcovers ? | Target| Categorical-nominal | Target variable: Do people want to read without buying | | How many books have you read during the last six months? | Frequency| Numeric-discrete | Frequency of reading books | | Do you want to connect with fellow book readers? | Connect| Categorical-nominal | Do book readers want to socialise at a book cafe?| | What genres do you enjoy reading? | Variety|Categorical-nominal | Variety of reader expected | | Count of genres read | VarCount | Numeric-discrete| Calculated field based on the no. of genres read| 4. Initialisationfrom initcodex import prepare, postprocess, plot1, plot2, plot3, plot4, plot5,plot6,modprocess, modeller # Codebase from copy import deepcopy import pandas as pd from sklearn.tree import DecisionTreeClassifier from sklearn import tree import matplotlib.pyplot as plt from sklearn.preprocessing import OneHotEncoder %matplotlib inline if __name__ == "__main__": dataf, format_count = prepare() # Accepts the input and improves column formatting for readability dmod = deepcopy(dataf) # Deepcopies the input dataframe, to be used for modelling purposes later dframe=modprocess(dmod) # Prepares the dataframe for feeding into a model dataf.sample(5)5. Exploratory data analysis Visualizationsplot1(dataf, format_count) #Book formatInference: 1. Maximum readers read from paperbacks or hard cover books. This supports our idea that people would prefer reading from phyical books. Hence people might come to our book cafe to read books.2. Maximum no. of genres are read by ebook readers. However we do not plan on providing an ebook reading service to our customers. 3. The range of books read by paperback and hard cover book readers are same. We need to probe further to find out the total set of different genres read by them. 4. People read more paperbacks than hard cover books, this is as expected. From here on all our analysis is on physical book readersdataf, genres, pphc, d, bv = postprocess(dataf) #Filters only physical book readers plot2(dataf, genres) #VarietyObservations:1. The no. of readers for fiction is almost double than any other category2. There are 12 genres which are read by more than 10 people in our survey 3. People who read more, read more widelyplot3(pphc, d) #Socializing quotientObservations:1. A high percentage(60%) of readers want to read books without buying them2. Above 85% of readers are open to socialising with other book readers. 48% readers want to meet with fellow book readersplot4(bv) #Beverage preferencesObservations:1. Maximum readers prefer coffee 2. Most other readers do not prefer any beverage or prefer teaHence providing a reading experience in a cafe is a good idea. We need to understand how many of people who do not drink coffee/tea, would want to meet fellow book readersplot5(dataf) #Non drinkersObservations:People who do not drink are interested to meet others. Hence, they would come to meet new and fellow book readers at the book cafe.dmod.Beverage.replace({'I do not drink but I know things':'No drink required','None':'No drink required','No drink necessary':'No drink required','I drink but not with books':'No drink required','Depends upon mood and time of day':'No drink required','There is no connection between books and beverage':'No drink required'},inplace=True) dmod.Connect.replace({'Yes! no. Well maybe....':'Maybe'},inplace=True) dmod.Music.replace({'Yes, like my life':'Yes to music','Nope':'No to music'},inplace=True) dk = dmod[["Beverage","Connect","Music"]].groupby(['Connect','Music','Beverage'])[["Beverage"]].count() #dk.columns=["Count"] dk.sort_values(["Connect","Music"], ascending=[False,True], inplace=True) dk = pd.concat([dk.iloc[0:7,:],dk.iloc[13:20,:]])Observations:1. Maximum people who drink coffee while reading want to connect with fellow book readers2. Most of the people who want to connect drink cofee3. Most people do not want to listen to music 6. Visualizing the decision to read books without buyingX,y = modeller(dmod) d=DecisionTreeClassifier(max_depth=5) d.fit(X,y) plt.figure(figsize=(30,30)) tree_drawn=tree.plot_tree(d, filled=True, feature_names=X.columns.tolist(), fontsize=12) plt.show() # Want to read books without buying ['No', 'Yes'] feats=d.feature_importances_.tolist() feat_imp = pd.Series(feats, index=X.columns) feat_impIndex 258 is a good tester. Clear rotational signal, ambiguous peak mode.j = janet.boot(uni, index=258) j.run(period_range = (1., 13.))Already have data downloaded for Gaia ID 1923544057484996224. If you want to check for new data, run `janet.update()`. ### Running Simple Astropy Lomb-Scargle on Sector 16 on star 1923544057484996224 ### ### Completed Simple Astropy Lomb-Scargle for Sector 16 on star 1923544057484996224 ### ### Saved results ### ### Running Wavelet Estimation for Sector 16 on star 1923544057484996224 ###Analyse de la *Bibliothèque* du pseudo-Apollodore ObjectifCe travail est lié à [ce projet](https://louislesueur.github.io/gods/). Le but est d'utiliser des outils de *Natural Language Processing* issus de la bibliothèque **CLTK** pour extraire les noms propres du texte et identifier les relations entre les personnages de façon automatisée.import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import AgglomerativeClustering from cltk.tag import ner from cltk.stem.lemma import LemmaReplacer from cltk.corpus.utils.formatter import tlg_plaintext_cleanupPour identifier les noms propres, on va utiliser un algorithme de CLTK qui permet d'identifier les entités nommées dans le texte, en utilisant une base de donnée pré-existante. On va définir une classe pour stocker efficacement ces entités.class Entity: def __init__(self, word): self.word = word self.occurences = [] def add_ocurence(self, occ): self.occurences.append(occ) def __str__(self): return self.word def __gt__(self, other): return self.word > other.word def __eq__(self, other): return self.word == other.word def get_table(self): return [self.word, *self.occurences]Quelques définitions+ Un texte $T$ est une suite de mots.+ Un mot est une suite finie de lettres+ Une lettre est un élément d'un alphabet $\mathcal{A}$.+ Une entité nommée est une expression linguistique le plus souvent associée à un nom propre.+ On note $o_T(m)$ l'ensemble des positions du mot $m$ dans le texte $T$ Distances entre entitésEn première approximation, on va supposer que deux entités "proches" dans le texte ont de bonnes chances d'être en relation. On va donc définir quelques distances pour définir cette proximité:+ En voyant les différentes positions d'une entité dans le texte comme un nuage de points en 1d, on peut définir une distance de type Chamfer: $d_{CH}(e_1,e_2) = \sum_{i \in o_T(e_1)} \min_{j \in o_T(e_2)} |i-j| + \sum_{i \in o_T(e_2)} \min_{j \in o_T(e_1)} |i-j|$+ On considérera aussi la moyenne entre tous les écarts: $d_{M}(e_1,e_2) = \frac{1}{N} \sum_{i \in o_T(e_2), j \in o_T(e_1)} |i-j|$+ Enfin, on peut aussi regarder le plus petit écart entre les différentes occurences: $d_{\min}(e_1,e_2)=\min_{i \in o_T(e_2), j \in o_T(e_1)} |i-j|$def chamfer(ent1, ent2): S1 = np.array(ent1.occurences) S2 = np.array(ent2.occurences) return np.sum([np.min([np.linalg.norm(x-y) for y in S2]) for x in S1]) + np.sum([np.min([np.linalg.norm(x-y) for y in S1]) for x in S2]) def meandist(ent1, ent2): S1 = np.array(ent1.occurences) S2 = np.array(ent2.occurences) return np.mean([[np.linalg.norm(x-y) for y in S2] for x in S1]) def mindist(ent1, ent2): S1 = np.array(ent1.occurences) S2 = np.array(ent2.occurences) return np.min([[np.linalg.norm(x-y) for y in S2] for x in S1])Avec ces différentes distances, on va construire des matrices de distances. Celles-ci étant symétriques, on peut ne calculer que la moitié des coefficients et symétriser après.def symmetrize(a): return a + a.T - np.diag(a.diagonal())Extraction des données de la *Bibliothèque* (n'exécuter les cellules que si nécessaire !) Le texte a été récupéré sur [Perseus](perseus.tufts.edu).data = "" with open('Apollodore_text', 'r') as file: for l in file.readlines(): data += l.strip() file.close()On élimine la ponctuation et les fioritures issues de la mise en forme de Perseus, et on applique l'algorithme de NER (*Name Entity Recognition*) de cltk.data = tlg_plaintext_cleanup(data, rm_punctuation=True, rm_periods=False) out = ner.tag_ner('greek',data, output_type=list)Le Grec ancient est une langue à cas. Donc une même entité peut être représentée par deux mots différents. On doit donc lemmatiser pour revenir au nominatif de chaque nom et s'assurer que les entités trouvées sont bien distinctes.lemmatizer = LemmaReplacer('greek') entities = [] for i in range(len(out)): if len(out[i]) == 2: ent = Entity(lemmatizer.lemmatize(out[i][0])[0]) add = True for j in range(len(entities)): if ent == entities[j]: entities[j].add_ocurence(i) add = False if add: ent.add_ocurence(i) entities.append(ent)Comme l'exécution peut prendre du temps, on sauvegarde les entités et leurs occurences dans un fichier texte.TABLE_ENTITIES = [ent.get_table() for ent in entities] np.savetxt('entities', TABLE_ENTITIES, fmt='%s')Enfin, on génère les matrices de distance et on les enregistre aussi dans des fichiers textes.CHAMFER = np.zeros((len(entities), len(entities))) MEAN = np.zeros((len(entities), len(entities))) MIN = np.zeros((len(entities), len(entities))) for i in range(len(entities)): for j in range(len(entities)): if i >= j: CHAMFER[i,j] = chamfer(entities[i], entities[j]) MEAN[i,j] = meandist(entities[i], entities[j]) MIN[i,j] = mindist(entities[i], entities[j]) CHAMFER = symmetrize(CHAMFER) MEAN = symmetrize(MEAN) MIN = symmetrize(MIN) np.savetxt('distance_matrices/chamfer',CHAMFER, fmt='%i') np.savetxt('distance_matrices/mean',MEAN, fmt='%1.3f') np.savetxt('distance_matrices/min',MIN, fmt='%1.3f')Analyse des données Généralités Récupération des données:ENTITIES = [] with open('entities', 'r') as file: for l in file.readlines(): clear = l[1:-2].replace("'", "").split(',') ENTITIES.append([clear[0], [int(i) for i in clear[1:]]]) CHAMFER = np.loadtxt('distance_matrices/chamfer', dtype="int") MEAN = np.loadtxt('distance_matrices/mean', dtype="float") MIN = np.loadtxt('distance_matrices/min', dtype="float")On peut commencer par regarder la distribution des entités dans le texte:plt.figure() plt.title("Distribution des entités") plt.xlabel("Entités") plt.ylabel("Nombre d'occurences") plt.plot(np.sort([len(ent[1]) for ent in ENTITIES])) plt.show()La distribution a donc forme exponentielle, et on a:print(f"10 entités les moins citées: {[ENTITIES[idx][0] for idx in np.argsort([len(ent[1]) for ent in ENTITIES])[:10]]}") print(f"10 entités les plus citées: {[ENTITIES[idx][0] for idx in np.argsort([len(ent[1]) for ent in ENTITIES])[-10:]]}")10 entités les moins citées: ['ἄντεια', 'τμῶλος', 'λυδός', 'ἰάρδανος', 'τιρύνθιος', 'κευθώνυμος', 'λακωνικός', 'ἐφέζομαι', 'ταίναρος', 'πυρήνη'] 10 entités les plus citées: ['ἀπόλλων', 'ἑρμῆς', 'ἀπόλάω1', 'μίνως', 'ἄργος', 'θῆβαι', 'Διὸς', 'Ζεὺς', 'ἀθήνη', 'ποσειδῶν']Sans surprise, les entités les plus cités sont des noms propres de divinités, les moins citées sont celles de personnages très anecdotiques, et de mots qui ont mal été identifiés par l'algorithme. Néanmoins, on remarque que Διὸς est présent dans les plus citées. Or, il provient clairement de l'irrégularité de la déclinaison de Zeus:| Cas | Déclinaison ||-----------|-------------------||N | ὁ Ζεύς ||V |Ζεῦ ||A | τὸν Δῐ́ᾰ / Ζῆνᾰ ||G |τοῦ Δῐός / Ζηνός ||D |τῷ Δῐῐ́ / Ζηνῐ́ |C'est un problème qu'il faudra régler à l'avenir Distances Pour une entité quelconque, on peut afficher la distance de cette entité par rapport à toutes les autres, en normalisant pour pouvoir comparer:fig = plt.figure(figsize=(15,5)) fig.add_subplot(1, 3, 1) plt.title(f"distance de {ENTITIES[33][0]} par rapport aux autres") plt.plot(np.sort(CHAMFER[33])/np.max(CHAMFER[33]), label='chamfer') plt.legend() fig.add_subplot(1, 3, 2) plt.title(f"distance de {ENTITIES[33][0]} par rapport aux autres") plt.plot(np.sort(MEAN[33])/np.max(MEAN[33]), label='mean') plt.legend() fig.add_subplot(1, 3, 3) plt.title(f"distance de {ENTITIES[33][0]} par rapport aux autres") plt.plot(np.sort(MIN[33])/np.max(MIN[33]), label='min') plt.legend() plt.show()On voit que $d_{\min}$ a de plus fortes variations, et ça sera d'ailleurs celle-ci qui donnera les meilleurs résultats pour le clustering. Clustering pour la min distance On va maintenant effectuer le clustering. Les résultats avec la distance de Chamfer et la mean distance étant mauvais, on se focalisera sur ceux donnés par la min distance. Après plusieurs tests, un regroupement hiérarchique donne des résultats satisfaisants.agg = AgglomerativeClustering(affinity="precomputed", linkage='complete', n_clusters=100) Y = agg.fit_predict(MIN) for i in range(np.max(Y)+1): print(f"cluster {i} ({len(np.where(Y==i)[0])} élements):") print([ENTITIES[idx][0] for idx in np.where(Y==i)[0]][:10])cluster 0 (16 élements): ['ἄδραστος', 'μελάμπους', 'πηρόω', 'ταλαός', 'παρθενοπαῖος', 'πρῶ/ναξ', 'Μηκιστεὺς', 'ἀριστόμαχος', 'ἀμφιθέη', 'Αἰγιαλεὺς'] cluster 1 (19 élements): ['ὀρέστης', 'ὄξυλος', 'ἀνδραίμων', 'λοκρίς', 'αἰγίμιος', 'τισαμενός', 'πελοποννήσιος', 'πελοποννήσιοι', 'κλεόδαιος', 'τήμενος'] cluster 2 (34 élements): ['ἄγριος', 'φθῖος', 'μεσσήνη', 'εὐρύθεμις', 'κλεοβοία', 'ἀλθαία', 'λήδη', 'ὑπερμνήστρη', 'ἴφικλος', 'πλήξιππος'] cluster 3 (24 élements): ['τρίτων', 'φρύγιος', 'γανυμήδης', 'καλλιρρόη', 'βατεία', 'δάρδανος', 'σαμοθράικη', 'σκάμανδρος', 'ἰδαῖος', 'δαρδάνιος'] cluster 4 (15 élements): ['κόρινθος', 'τάλος', 'σύλευς', 'ἀθηναῖος', 'μινώταυρος', 'ἀλκίππη', 'μητίων', 'αἰγληίς', 'ὀρθαίη', 'γέραιστος'] cluster 5 (15 élements): ['Ἰνδοὺς', 'σάτυρος', 'θράκη', 'ἀκταίων', 'βαλίος', 'Αἴγυπτόν', 'Πρωτεὺς', 'κύβελα', 'Ῥέας', 'βάκχη'] cluster 6 (15 élements): ['λῆμνος', 'θράικη', 'εὔνηος', 'νεβροφόνος', 'δολίων', 'κύζικος', 'Πελασγικὸν', 'ὑλάω', 'θειοδάμας', 'ἀφεταί'] cluste[...]USING: TRY, EXCEPT, & FINALLY### Create a list sales = [6212, 717, 8216, 9218, 4520, 7856, 4312]TRY SOMETHING THAT DOESN'T WORK# Try to add a list of integers and a string, if you get an error print out end program: try: sales + 'A string of characters.' except Exception as e: print('Oh No! We have an Error:', e) finally: print('End Program')Oh No! We have an Error: can only concatenate list (not "str") to list End ProgramTRY SOMETHING THAT WORKS# Try to print sales, print out the result & end program, if you get an error, print out error & end program: try: print('Wow! It''s Worked!', sales) # If you get an error, set the error as 'e', except Exception as e: print('Error:', e) finally: print('End program')Wow! Its Worked! [6212, 717, 8216, 9218, 4520, 7856, 4312] End programLibrariesimport numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as pltData# read in speedup data df_speedup = pd.read_csv('./speedup_data.csv') print(df_speedup.columns)Index(['executors', 'cores', 'rows per sec', 'addBatch', 'getBatch', 'getOffset', 'queryPlanning', 'triggerExecution', 'walCommit', 'total'], dtype='object')Speedup# select target column ('rows per sec' or 'total') y_target = 'rows per sec' # prepare data for core plot df_core = pd.DataFrame() df_core['cores'] = np.arange(1,5) for i in range(8): df_temp = df_speedup[df_speedup['executors']==i+1] df_temp.reset_index(inplace=True,drop=True) df_core[i+1] = df_temp[y_target] # Plot of speedup for different number of cores fig, ax = plt.subplots(figsize=(15,9)) x = df_core['cores'] serial = df_core[1][0] for i in range(8): y = df_core[i+1] speedup = [] for j in range(4): if y_target == 'rows per sec': speedup.append(y[j]/serial) else: speedup.append(serial/y[j]) # Plot the results plt.plot(x, speedup, '-o',label='{} worker instances'.format(i+1)) plt.xlabel('number of cores') plt.ylabel('speedup') plt.xticks(x) plt.legend() plt.title('Graph of speedup for different number of cores') plt.savefig('speedup_cores.png') plt.show()Feasibility# variables sampling_frequency = 30 # Hz number_of_sensors = 6 update_interval = 10 # prepare data for feasibility study df_feasibility = pd.DataFrame() df_feasibility['worker instances'] = df_speedup['executors'] df_feasibility['cores'] = df_speedup['cores'] df_feasibility['number of students'] = np.floor(df_speedup['rows per sec']/(sampling_frequency*number_of_sensors*update_interval)) # cast type as integers df_feasibility = df_feasibility.astype('int32') # save feasibility data df_feasibility.to_csv('./df_feasibility.csv') # plot distribution of number of students supported fig, ax = plt.subplots(figsize=(14,7)) # Plot the results sns.barplot(x='worker instances', y='number of students', hue='cores',data=df_feasibility) plt.xlabel('number of worker instances') plt.ylabel('number of students supported') ax.get_legend().remove() fig.legend(title='number of cores') plt.title('Distribution of number of students supported') plt.savefig('feasibility_worker_instances.png') plt.show()INFO8010: Homework 1In this first homework, you will familiarize yourself with the basics of differentiable programming and the PyTorch framework. At the end of this assignment, you should be able to build and train your own multi-layer perceptron. Although this assignment is optional, we **strongly** advise you to do it as you are expected to be comfortable with PyTorch at the end of this course.In the following, we provide some code blocks that are already running, along with explanations. You are asked to complete the code blocks in which the ` your code` comment is present. Sometimes, you will also have to motivate why you programmed certain things or guide us quickly through the results you obtained. If this is required you will see the following instruction:> your discussionPlease note that you will **not** have to handle in any sort of written report by the end of the assignment. We do however expect you to submit the notebook **with the solutions** to the exercises. 0. ImportsOur first step is very easy: we simply import the required libraries.import matplotlib.pyplot as plt import numpy as np import torch1. Tensors and basic operationsTensors are one of the main ingredients when it comes to modern deep learning frameworks. Almost all deep learning computations can be expressed as tensor operations which make computation fast and efficient, especially on graphics processing units (GPUs). We will now see how to manipulate tensors within the PyTorch framework.There are many valid definitions for tensors, but to keep it simple you can see them as multi-dimensional arrays. Zero-dimentional tensors are scalars, one-dimentional tensors are vectors, two-dimentional tensors are matrices, etc.In PyTorch, a tensor is an instance of the class `torch.Tensor`. PyTorch tensors are **very** similar to NumPy arrays.Almost any operations you could imagine can be performed on tensors (see [API](https://pytorch.org/docs/stable/tensors.html)). 1.1 TensorsThere are many ways to [create tensors](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor), for instance:# we can create a tensor from lists t1 = torch.tensor([[1, 2], [-7, 9]]) print(t1) # or from numpy arrays t2 = torch.from_numpy(np.array([[1., 8.], [0., 3.]])) print(t2) # or we can create a 2x3x4 tensor filled with zeros t3 = torch.zeros(2, 3, 4) print(t3)As an array may contain different types of data (e.g. float, int, boolean), so can a tensor. Let's check what types of data are in `t1`, `t2` and `t3`:print(t1.dtype, t2.dtype, t3.dtype, sep='\n')As expected, PyTorch uses an integer data type to store integer values and a floating-point data type to store real values. However, unlike NumPy, PyTorch uses a **single-precision** floating-point data type (`float32`) by default.You can convert tensors from one type to another:print(t1.float(), t2.long(), t3.double(), sep='\n')Another important property of a tensor is its shape. You can use the `.shape` property or `.size()` method to check the dimensions of a tensor. Both return a `torch.Size` object that can be manipulated with its own operations. Most of the time, you will just use `torch.Size` objects as tuples or lists (e.g. to check the size of a tensor along one of its dimension).# the full shape print(t1.shape) # the length of the tensor along its first dimension print(t2.shape[0]) # the shape of the two last dimensions print(t3.shape[-2:])1.2 OperationsWe now consider simple tensor operations on 1D (vectors) and 2D (matrices) tensors. Lets instantiate two vectors with 5 elements and two matrices of respectively 3x5 and 5x5 elements.v1 = torch.ones(5) print(v1) v2 = torch.randn(5) # standard gaussian print(v2) m1 = torch.rand(3, 5) # [0, 1) uniform print(m1) m2 = torch.eye(5) # identity matrix print(m2)The most basic operations are the addition, subtraction, multiplication and division. As in NumPy, but unlike MATLAB, the symbols `+, -, *, /` all perform **element-wise** operations.# add v1 and v2 v_sum = v1 + v2 print(v_sum) # subtract v1 from v2 v_sub = v1 - v2 print(v_sub) # multiply the elements of v1 and v2 v_mul = v1 * v2 print(v_mul) # divide the elements of v1 by v2 v_div = v1 / v2 print(v_div)When trying to perform an element-wise operation between tensors of different shapes, the tensors are [broadcast](https://numpy.org/doc/stable/user/basics.broadcasting.html) together, if possible. Notably, this allows to add or multiply tensors by scalars.# add a scalar print(v1 + 1) # multiply by scalar print(v2 * 2) # broadcast substraction between vector and matrix print(v2 - m1)Another basic operation with tensors is the inner product, denoted by the symbol `@`.# scalar product between v1 and v2 print(v1 @ v2) # matrix-vector product between m1 and v2 print(m1 @ v2) # matrix-matrix product between m1 and m2 print(m1 @ m2)AggregationCommon opperations are to aggregate the values of a tensor along a dimension into a single value. For instance, computing the sum of the rows of a matrix . A lot of aggregation methods such as `.sum`, `.prod`, `.mean`, `.max`, ... are available.# sum of rows of m1 print(m1.sum(dim=1)) # mean of columns of m2 print(m2.mean(dim=0)) # prod of the elements of v2 print(v2.prod())IndexingSometimes you would like to extract a sub-tensor from the tensor. This operation is called *slicing*.# extract elements 0 (included) to 2 (not included) print(v1[0:2]) # extract all elements but the last print(v2[:-1])You may also extract a subset of the elements by passing a list of indices.# extract the first, fourth and fifth elements v2[[0, 3, 4]]Matrices can also be sliced/indexed.m1[:4, 2:-1]Squeeze and UnsqueezeSqueezing removes dimensions of size 1 from tensors. Unsqueeze adds a dimension of size 1.# before unsqueeze print(m1.shape) # after unsqueeze m1 = m1.unsqueeze(1) print(m1.shape) # after squeeze m1 = m1.squeeze(1) print(m1.shape)ViewSometimes, tensors don't have the correct shape. For example, you might want to process 3x32x32 images as vectors of 3072 elements. The `.view` method returns a new tensor with the same data, but of a different shape.images = torch.rand(10, 3, 32, 32) print(images.shape) images_as_vectors = images.view(10, -1) print(images_as_vectors.shape)However, both tensors share the same underlying data, meaning that modifying one in-place will also modify the other.images[0, :] = torch.zeros_like(images[0]) images_as_vectors[0]It should be noted that slicing, indexing and squeezing operations actually create views of the tensor instead of making copies of the data. For a complete list of view operations in `torch`, see the [documentation](https://pytorch.org/docs/stable/tensor_view.html). Other primitivesA **lot** of useful primitives are available in `torch`. Notable examples are [exp](https://pytorch.org/docs/stable/generated/torch.exp.html), [log](https://pytorch.org/docs/stable/generated/torch.log.html), [sqrt](https://pytorch.org/docs/stable/generated/torch.sqrt.html), [ceil](https://pytorch.org/docs/stable/generated/torch.ceil.html), [clamp](https://pytorch.org/docs/stable/generated/torch.expand.html), [sort](https://pytorch.org/docs/stable/generated/torch.sort.html), [argmax](https://pytorch.org/docs/stable/generated/torch.argmax.html), [stack](https://pytorch.org/docs/stable/generated/torch.stack.html), [cat](https://pytorch.org/docs/stable/generated/torch.cat.html), [where](https://pytorch.org/docs/stable/generated/torch.where.html) and so on. Always take a look at the [documentation](https://pytorch.org/docs/stable/index.html) before implementing something. And don't forget: Google is your best friend! 1.3 Try it yourself! Create a tensor `samples` with $10^{5}$ i.i.d. normally (mean 5 and standard deviation 2) distributed values.samples = # your codeExtract the first 42 elements of `samples`.# your codeExtract the last 666 elements of `samples`.# your codeExtract all the elements of `samples` that have an odd index.# your codeCreate a view of shape 10x1000x10 of `samples`.# your codeCompute the mean of `samples` using a `for` loop:%%timeit # your codeCompute the mean and standard deviation of `samples` with the appropriate PyTorch operators:%%timeit # your codeWhat do you observe when you compare the running time of the two code snippets?> your discussion Compute the absolute-value ($L_1$) and Euclidean ($L_2$) norms of `samples`, without loops.# your codeExtract the first and last 13 elements of `samples` as two distinct vectors and compute their [outer product](https://en.wikipedia.org/wiki/Outer_product) matrix, without loops.# your codeExtract the diagonal of this matrix.# your codeTrace the curve defined by the coordinates$$x(t) = 16 \sin(t)^3$$$$y(t) = 13 \cos(t) - 5 \cos(2t) - 2 \cos(3t) - \cos(4t)$$for $t \in [0, 2\pi[$.You can use `np.pi` for the value of $\pi$ and the `matplotlib` package to plot the curve.# your codeYou can now send the result to your crush ;) 2. The autograd packageThe autograd package is what really makes PyTorch different from other algebraic language/libraries such as MATLAB or NumPy. Whenever you make operations in PyTorch, it will create a computation graph that can later be used to compute derivatives of the output quantities with respect to the input or other intermediate computation steps. The official documentation says> `torch.autograd` provides classes and functions implementing automatic differentiation of arbitrary scalar valued functions. It requires minimal changes to the existing code - you only need to declare tensors for which gradients should be computed with the `requires_grad=True` keyword.For more information about autograd you can check this short [tutorial](https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html) and the [documentation](https://pytorch.org/docs/stable/autograd.html). 2.1 A guided tourLet's see how autograd may be used to compute the derivative(s) of the following function.def f(x): return x**2 + 2 * x - 0.5 * torch.sin(x * 3) def f_prime(x): # f' return 2 * x + 2 - 1.5 * torch.cos(x * 3) def f_pprime(x): # f'' return 2 + 4.5 * torch.sin(x * 3) x = torch.randn(1, requires_grad=True) # without requires_grad, the computation graph is not created print(x) y = f(x) print(y) dy = torch.autograd.grad(y, x, create_graph=True)[0] # create_graph is necessary for higher-order derivative print(dy, f_prime(x)) ddy = torch.autograd.grad(dy, x)[0] print(ddy, f_pprime(x))As expected, autograd is able to compute gradients automatically. We can exploit this to perform **gradient descent** with parametric approximators. Let's approximate the function `f` using a polynomial.# learning set x = torch.linspace(-3, 3, 1000) y = f(x) # parametric approximator (polynomial of degree 3) def approximator(w, x): return torch.stack((torch.ones_like(x), x, x**2, x**3), dim=-1) @ w # initialize parameters at random w = torch.randn(4, requires_grad=True)Let's take a look at what happens before trainingplt.title('Before training') plt.plot(x, y, label='Ground truth') plt.plot(x, approximator(w, x).detach(), label='Approximation') plt.legend() plt.show()We perform gradient descent on the mean squared error (MSE) loss function in order to fit the learning set.lr = .001 # learning rate for i in range(1000): y_pred = approximator(w, x) mse = torch.mean((y - y_pred)**2) # mean squared error grad = torch.autograd.grad(mse, w)[0] with torch.no_grad(): w -= lr * grad # gradient descent update plt.title('After training') plt.plot(x, y, label='Ground truth') plt.plot(x, approximator(w, x).detach(), label='Approximation') plt.legend() plt.show()If the approximator became larger, calling the `torch.autograd.grad` method with all parameter tensors would become quite cumbersome. Fortunately, PyTorch provides the `.backward()` method to acumulate (in the `.grad` property) the gradient values in **all** tensors that are in the computation graph. Later on in this notebook we will see how this, together with `Optimizer` objects, makes gradient descent very simple.x = torch.rand(5) w = torch.rand(5, requires_grad=True) y = w @ x y.backward() print(x, w.grad, sep='\n')Importantly, the construction of the computation graphs is expensive and memory consuming. If some computations do not use the gradients, like evaluating an approximator or updating the parameters, one can disable the creation of the computation graph with the `torch.no_grad()` context.w = torch.rand(100, 100, requires_grad=True) x = torch.rand(100) %timeit w @ x with torch.no_grad(): %timeit w @ x2.2 Try it yourself!We would like you to perform the same kind of curve fitting that we did a few cells above, but this time we want to use the `.backward()` method instead of `torch.autograd.grad`. Remember that PyTorch will keep **accumulating** the gradient values in the `.grad` property of each tensor, i.e. the values are not replaced but added together after each backward pass. Fortunately, you can reset the values of the gradient to 0 with the `.zero_()` in-place method (e.g. `w.grad.zero_()`). To make your life easier, you can start from the learning loop three cells above, but be careful to re-initialize the parameters at random before training and to not use the `torch.autograd.grad` function.# your code3. The `torch.nn` and `torch.optim` modulesYou should now be able to define your own neural network and train it with tensorial operations and the autograd package. However this would require you to explicitely define every operation in the neural network and to keep track of all the parameters for performing gradient descent. Fortunately, PyTorch provides the `torch.nn` and `torch.optim` packages which implement everything you need to define and train a neural network efficiently.import torch.nn as nn import torch.nn.functional as F import torch.optim as optim`nn.functional`The `torch.nn.functional` (imported as `F` in this notebook) module implements many predefined functions which should simplify your life for building your own neural networks. The complete list can be accessed [here](https://pytorch.org/docs/stable/nn.functional.html). Let's play with some activation functions.x = torch.linspace(-5, 5, 100) plt.plot(x, F.relu(x), label='ReLU') plt.plot(x, F.elu(x), label='ELU') plt.plot(x, F.silu(x), label='SiLU') plt.legend() plt.show()`nn.Module`PyTorch provides a very important base class named `nn.Module`. This class is used to build complex neural networks. In fact any class which inherits from it will automatically keep track of its parameters. To define your own `nn.Module` subclass you only need to implement the `__init__` constructor and the `forward` function. Let's see an example:# we create a class 'SimpleParametricModel' which inherits from nn.Module class SimpleParametricModel(nn.Module): def __init__(self): # nn.Module's constructor has to be called BEFORE adding any sub-modules super().__init__() # we add a linear layer to our module, which is itself a module self.linear = nn.Linear(in_features=3, out_features=1, bias=True) # we add an activation function to our module self.activation = nn.ReLU() # we define the forward pass def forward(self, x): x = self.linear(x) x = self.activation(x) return xWe can now create an instance of the `SimpleParametricModel` class and have a look at it.model = SimpleParametricModel() print(model)We observe that the two components are correctly registered. We can also inspect the parameters of our model with the `.parameters()` or `.state_dict()` methods.for p in model.parameters(): print(p) model.state_dict()As expected, the parameters of the linear layer are considered as parameters of the model itself.PyTorch implements many components that are commonly used in neural networks such as convolutions, linear layers, normalizations and activations. Most of them are available as ready-to-use modules in `torch.nn` (imported as `nn` in this notebook).Let's build a simple multi-layer perceptron (MLP) with two hidden layers and a ReLU activation function.class TwoLayerMLP(nn.Module): def __init__(self, input_features, output_features, hidden_features=16): super().__init__() self.l1 = nn.Linear(input_features, hidden_features) self.l2 = nn.Linear(hidden_features, hidden_features) self.l3 = nn.Linear(hidden_features, output_features) self.activation = nn.ReLU() def forward(self, x): h1 = self.activation(self.l1(x)) h2 = self.activation(self.l2(h1)) y = self.l3(h2) return y # instantiate the network to have 3 input features and 1 output feature (and 16 hidden features) network = TwoLayerMLP(3, 1) print(network) # we usually give batches of values to networks to take advantage of parallelism # here the batch size is 32 x = torch.rand(8, 3) # call network.forward under the hood y = network(x) print(x, y, sep='\n')`nn.Sequential`Writing the forward pass can become cumbersome and dirty if you have a large number of sequential layers. For that kind of networks, you can use `nn.Sequential` as parent instead of `nn.Module`, which calls the layers sequentially within its forward pass.class ElegantTwoLayerMLP(nn.Sequential): def __init__(self, input_features, output_features, hidden_features=16): super().__init__( nn.Linear(input_features, hidden_features), nn.ReLU(), nn.Linear(hidden_features, hidden_features), nn.ReLU(), nn.Linear(hidden_features, output_features), ) # instantiate the network to have 1 input feature and 1 output feature network = ElegantTwoLayerMLP(1, 1) print(network)OptimizersYou should now be able to define any kind of neural network you like. However if you want to optimize it you would still need to iterate through all the parameters to perform a gradient descent step. Fortunately, `torch.nn.optim` (imported as `optim`) implements classes that handle that for you! For example, let's say you would like to learn the function $f(x) = x^2$ with the previous neural network, using a MSE loss (often called the criterion) and stochastic gradient descent (SGD):def f(x): return x**2 # we use the MSE loss as criterion criterion = nn.MSELoss() # we create an instance of the SGD class that will make the updates for us optimizer = optim.SGD(params=network.parameters(), lr=.01) # Let's do some learning steps with randomly generated x values: for i in range(1000): x = torch.randn(100, 1) y = f(x) y_pred = network(x) # we set all gradient values to 0 optimizer.zero_grad() # we compute the loss loss = criterion(y_pred, y) # we perform the backward pass to accumulate the gradients loss.backward() # we update the parameters optimizer.step()Let's check the results.x = torch.linspace(-3, 3, 100).unsqueeze(1) with torch.no_grad(): y = network(x) plt.plot(x, f(x), label='$f(x)$') plt.plot(x, y, label='Approximation') plt.legend() plt.show()4. Training our first neural network classifierWe now have all the necessary knowledge to build and train our very first neural network on a simple binary classification task. To do this we will start by importing a toy example dataset from the `sklearn` library, and then create and use the resulting splits for training.from sklearn.datasets import make_moons from sklearn.model_selection import train_test_split X, Y = make_moons(500, noise=0.1) # create artificial data X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=73) # create splits plt.scatter(X_train[:,0], X_train[:,1], c=Y_train) # visualize the training data plt.show()When we train a neural network in PyTorch, no matter how complex the model is, we always go through a training loop. In this loop we feed the data to the model and get its predictions. We then compare the predictions of the network to the ground truth and adjust the parameters of the model by performing gradient descent. We have already seen all the components that are necessary for going through this process, so the only thing that remains to be done is to put all the pieces of this notebook together and train our **first awesome neural network!**During the training stage we would like to keep track whether our model will improve over the different iterations. It is therefore good practice to monitor whether the loss we are minimizing decreases over time, and whether the overall performance of the model increases the more training iterations we perform. Remember that one of the key components of PyTorch are tensors, and we might not always have the data coming in this specific format. It is therefore necessary to convert it if needed.Once the data is in an appropriate format it can be given to the model, and we can obtain its predictions. This is what we usually call the **forward pass**. Once we obtain our predictions, we can compare how close they are to what we would like the network to predict: to do this we feed our predictions together with the true labels through the loss function which we are minimizing. At its early training stages the network will perform poorly, but it will improve as its weights/parameters are updated by gradient descent. We can do this very easily by obtaining the gradients of the parameters with respect to the loss function we are minimizing (**backward pass**) and adjusting these weights with the optimizer we defined previously.Once all of this is done we can measure the performance of our model, which in this case will be reflected by its accuracy in classifying the synthetic dataset we created beforehand.net = nn.Sequential( nn.Linear(X.shape[-1], 32), nn.ReLU(), nn.Linear(32, 32), nn.ReLU(), nn.Linear(32, 1), nn.Sigmoid(), ) criterion = nn.BCELoss() optimizer = optim.Adam(net.parameters(), lr=.01) train_loss = [] # where we keep track of the loss train_accuracy = [] # where we keep track of the accuracy of the model iters = 128 # number of training iterations X_train_t = torch.from_numpy(X_train).float() Y_train_t = torch.from_numpy(Y_train).float() for i in range(iters): y_pred = net(X_train_t) # forward pass y_pred = y_pred.squeeze(-1) # transform the 1-element vectors into scalars optimizer.zero_grad() # reset the gradients to 0 loss = criterion(y_pred, Y_train_t) # compute the loss loss.backward() # obtain the gradients with respect to the loss optimizer.step() # perform one step of gradient descent with torch.no_grad(): y_pred_class = y_pred > 0.5 # we assign a label (0 or 1) based on the network's prediction accuracy = (Y_train_t == y_pred_class).float().mean() # compute accuracy train_accuracy.append(accuracy.item()) train_loss.append(loss.item()) fig, axs = plt.subplots(2, 1) axs[0].set_title('Training Loss') axs[0].plot(train_loss) axs[0].set_xlabel('Iterations') axs[0].set_ylabel('Loss (Binary Cross Entropy)') axs[1].set_title('Training Accuracy') axs[1].plot(train_accuracy) axs[1].set_xlabel('Iterations') axs[1].set_ylabel('Accuracy') plt.tight_layout() plt.show() def plot_decision_boundary(X, y, model, steps=1000, cmap='Paired'): """ Function to plot the decision boundary and data points of a model. Data points are colored based on their actual label. """ cmap = plt.get_cmap(cmap) # Define region of interest by data limits xmin, xmax = X[:,0].min(), X[:,0].max() ymin, ymax = X[:,1].min(), X[:,1].max() steps = 1000 x_span = np.linspace(xmin - .5, xmax + .5, steps) y_span = np.linspace(ymin - .5, ymax + .5, steps) xx, yy = np.meshgrid(x_span, y_span) # Make predictions across region of interest with torch.no_grad(): grid = np.stack([xx, yy], axis=-1) grid = torch.from_numpy(grid).float() pred = model(grid).squeeze(-1) # Plot decision boundary in region of interest fig, ax = plt.subplots() ax.contourf(xx, yy, pred, cmap=cmap, alpha=0.5) # Get predicted labels on training data and plot train_labels = model(torch.from_numpy(X).float()) ax.scatter(X[:,0], X[:,1], c=y.ravel(), cmap=cmap, lw=0) return fig fig = plot_decision_boundary(X_test, Y_test, net, cmap = 'RdBu')4.1 Discuss the bugs After actively following the theoretical lectures of professor Louppe, our favourite Italian student remembered that a common loss function one can use when dealing with classification problems is the Cross-Entropy loss. However after implementing it in PyTorch, Al was disappointed because his code was not working.What do you think are the causes of the bug(s)? Maybe, first check the documentation of [`torch.nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html). Briefly discuss your ideas and **bonus** can you come up with a solution?net = nn.Sequential( nn.Linear(X.shape[-1], 32), nn.ELU(), nn.Linear(32, 32), nn.ELU(), nn.Linear(32, 1), ) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters(), lr=0.01) train_loss = [] train_accuracy = [] iters = 420 X_train_t = torch.from_numpy(X_train).float() Y_train_t = torch.from_numpy(Y_train).float() for i in range(iters): y_pred = net(X_train_t) y_pred = y_pred.squeeze(-1) loss = criterion(y_pred, Y_train_t) loss.backward() optimizer.step()> your discussion# your code (bonus)4.2 Try it yourself! Now try to play around with the neural network yourself: you are free to experiment with whatever you think is best exploring. Your goal is to come up with three modifications to the provided network architecture and to compare the performance of your modified models. As potential modifications you could experiment with changing the depth or width of the model, modifying the activation function, learning rate, optimizer, etc.Report your results with some appropriate plots and with a brief presentation of what you observe.# your codeInput Tables# Input table: service_territory_eia861 columns_serv_terr = [ 'utility_id_eia', 'utility_name_eia', 'report_date','county', 'state' ] data_serv_terr = [ (3989, 'City of Colorado Springs - CO', '2017-01-01', 'El Paso', 'CO'), (3989, 'City of Colorado Springs - CO', '2017-01-01', 'Teller', 'CO'), (3989, 'City of Colorado Springs - (CO)', '2018-01-01', 'El Paso', 'CO'), (3989, 'City of Colorado Springs - (CO)', '2018-01-01', 'Teller', 'CO') ] service_territory_eia861 = pd.DataFrame( data_serv_terr, columns=columns_serv_terr ) # Input table: sales_eia861 columns_serv_terr = [ 'utility_id_eia', 'utility_name_eia', 'report_date','county', 'state', 'sales' ] data_serv_terr = [ (3989, 'City of Colorado Springs - (CO)', '2017-01-01', 'El Paso', 'CO', 127682), (3989, 'City of Colorado Springs - (CO)', '2017-01-01', 'Teller', 'CO', 733947), (3989, 'City of Colorado Springs - (CO)', '2018-01-01', 'El Paso', 'CO', 87729), (3989, 'City of Colorado Springs - (CO)', '2018-01-01', 'Teller', 'CO', 992734), # CO spring incursion on the bubble town (3989, 'City of Colorado Springs - (CO)', '2018-01-01', 'Boulder', 'CO', 66666), ] service_territory_eia861 = pd.DataFrame( data_serv_terr, columns=columns_serv_terr ) # Input table: generation_eia923 columns_gen = [ 'plant_id_eia', 'generator_id', 'report_date', 'net_generation_mwh','prime_mover_code', 'topping_bottoming_code', 'balancing_authority_code_eia' ] data_gen = [ (3, '1', '2018-01-01', 10738., 'ST', 'T', 'SOCO'), (3, '1', '2018-02-01', -348., 'ST', 'T', 'SOCO'), (3, '1', '2018-03-01', -414., 'ST', 'T', 'SOCO'), (3, '1', '2018-04-01', -411., 'ST', 'T', 'SOCO'), (3, '1', '2018-05-01', pd.NA, 'CT', 'T', 'SOCO'), (3, '1', '2018-06-01', -607., 'ST', 'Top', 'SOCO'), (3, '1', '2018-07-01', 5022., 'ST', 'Top', 'SOCO'), (3, '1', '2018-08-01', -689., 'ST', 'T', 'SOCO'), (3, '1', '2018-09-01', 6718., 'ST', 'T', 'SOCO'), (3, '1', '2018-10-01', 3877., 'ST', 'T', 'SOCO'), (3, '1', '2018-11-01', pd.NA, 'ST', 'T', 'SOCO'), (3, '1', '2018-12-01', -494., 'ST', 'T', 'SOCO')] generation_eia923 = pd.DataFrame( data_gen, columns=columns_gen ) # Input table: generators_eia860 columns_gens = [ 'plant_id_eia', 'generator_id', 'report_date', 'capacity_mw','prime_mover_code', 'utility_id_eia', 'utility_name_eia', 'topping_bottoming_code', 'state' # we talked about renaming this to be state_plant ] data_gens = [ (3, '1', '2018-01-01', 153.1, 'ST', 195, 'Alabama Power Co', 'T', 'AL'), (3, '1', '2017-01-01', 153.1, 'ST', 195, 'Alabama Power Co', 'T', 'AL'), (3, '2', '2017-01-01', 50, 'ST', 195, 'Alabama Power Co', 'B', 'ALL'), (3, '2', '2018-01-01', 50, 'ST', 195, 'Alabama Power Co', 'B', 'AL'), ] generators_eia860 = pd.DataFrame( data_gens, columns=columns_gens ) # Input table: boiler_generator_assn_eia860 columns_bga = [ 'plant_id_eia', 'generator_id', 'report_date', 'boiler_id', ] data_bga = [ (3, '1', '2018-01-01', '1ST',), (3, '1', '2017-01-01', '1ST',), (3, '2', '2017-01-01', '2ST',), (3, '2', '2018-01-01', '2ST',), # oooo new plant/gen (4, 'a', '2017-01-01', 'a1',), (4, 'a', '2017-01-01', 'a2',), (4, 'a', '2017-01-01', 'a2',), (4, 'b', '2017-01-01', 'b1',) ] boiler_generator_assn_eia860 = pd.DataFrame( data_bga, columns=columns_bga )Output tables (post-harvest)# Output table: plant_entity_eia860 # Inputs: generators_eia860 + generation_eia923 # PK columns: 'plant_id_eia' # Harvested column: 'prime_mover_code' idx_cols_gen_entity = [ 'plant_id_eia', ] columns_gen_entity = idx_cols_gen_entity + ['state', 'balancing_authority_code_eia'] data_gen_entity = [ (3, 'AL','SOCO'), (4, pd.NA, pd.NA,), ] generator_entity_eia860 = pd.DataFrame( data_gen_entity, columns=columns_gen_entity ) # Output table: generator_entity_eia860 # Inputs: generators_eia860 + generation_eia923 # PK columns: 'plant_id_eia', 'generator_id' # Harvested column: 'prime_mover_code' idx_cols_gen_entity = [ 'plant_id_eia', 'generator_id', ] columns_gen_entity = idx_cols_gen_entity + ['prime_mover_code', 'topping_bottoming_code'] data_gen_entity = [ (3, '1', 'ST', 'T'), (3, '2', 'ST', 'B'), (4, 'a', pd.NA, pd.NA,), ] generator_entity_eia860 = pd.DataFrame( data_gen_entity, columns=columns_gen_entity ) # Output table: generators_eia860 (annual generator table) # Inputs: generators_eia860 # PK cols: 'plant_id_eia', 'generator_id', 'report_date', # Harvested cols: 'capacity_mw' idx_cols_gen_annual = [ 'plant_id_eia', 'generator_id', 'report_date', ] columns_gens = idx_cols_gen_annual + ['capacity_mw'] data_gens = [ (3, '1', '2018-01-01', 153.1), (3, '1', '2017-01-01', 153.1), (3, '2', '2018-01-01', 50), (3, '2', '2017-01-01', 50), (4, 'a', '2017-01-01', pd.NA,), (4, 'b', '2017-01-01', pd.NA,), ] generators_eia860 = pd.DataFrame( data_gens, columns=columns_gens ) # Output: utility_entity_eia table # Inputs: utility_ent_eia, sales_eia861, generators_eia860 # Idx_cols: 'utility_id_eia', columns_util_ent = [ 'utility_id_eia', 'utility_name_eia', ] data_util_ent = [ (3989, 'City of Colorado Springs - (CO)'), (195, 'Alabama Power Co') ] utility_ent_eia = pd.DataFrame( data_util_ent, columns=columns_util_ent ) # Association table: grab all instances of a set of primary keys # Note: I believe the only difference w/ these association tables is # that we basically just want to 'harvest' the PKs # Output: utility_assn_eia table # Inputs: service_territory_eia861, sales_eia861 # PKs: 'utility_id_eia', 'report_date', 'county', 'state' columns_util_ent = [ 'utility_id_eia', 'report_date', 'county', 'state' ] data_util_ent = [ (3989, '2017-01-01', 'El Paso', 'CO'), (3989, '2017-01-01', 'Teller', 'CO'), (3989, '2018-01-01', 'El Paso', 'CO'), (3989, '2018-01-01', 'Teller', 'CO'), (3989, '2018-01-01', 'Boulder', 'CO') ] utility_entity_eia = pd.DataFrame( data_util_ent, columns=columns_util_ent ) # Output: generation table (data table, not entity table) # Inputs: generation_eia923 (w/ cols removed via harvesting) columns_gen = [ 'plant_id_eia', 'generator_id', 'report_date', 'net_generation_mwh', ] data_gen = [ (3, '1', '2018-01-01', 10738.), (3, '1', '2018-02-01', -348.), (3, '1', '2018-03-01', -414.), (3, '1', '2018-04-01', -411.), (3, '1', '2018-05-01', pd.NA), (3, '1', '2018-06-01', -607.), (3, '1', '2018-07-01', 5022.), (3, '1', '2018-08-01', -689.), (3, '1', '2018-09-01', 6718.), (3, '1', '2018-10-01', 3877.), (3, '1', '2018-11-01', pd.NA), (3, '1', '2018-12-01', -494.)] generation_eia923 = pd.DataFrame( data_gen, columns=columns_gen ) # Output table: sales_eia861 (data table, not entity table) # Inputs: sales_eia861 (w/ cols removed via harvesting) # FK: 'utility_id_eia', 'report_date','county', 'state' columns_sales = [ 'utility_id_eia', 'report_date','county', 'state', 'sales' ] data_sales = [ (3989, '2017-01-01', '', 'CO', 127682), (3989, '2017-01-01', 'Teller', 'CO', 733947), (3989, '2018-01-01', '', 'CO', 87729), (3989, '2018-01-01', 'Teller', 'CO', 992734), # CO spring incursion on the bubble town (3989, '2018-01-01', 'Boulder', 'CO', 66666), ] sales_eia861 = pd.DataFrame( data_sales, columns=columns_sales ) # Output: boiler_generator_assn_eia860 # Input: boiler_generator_assn_eia860 columns_bga = [ 'plant_id_eia', 'generator_id', 'report_date', 'boiler_id', ] data_bga = [ (3, '1', '2018-01-01', '1ST',), (3, '1', '2017-01-01', '1ST',), (3, '2', '2017-01-01', '2ST',), (3, '2', '2018-01-01', '2ST',), (4, 'a', '2017-01-01', 'a1',), (4, 'a', '2017-01-01', 'a2',), (4, 'a', '2017-01-01', 'a2',), (4, 'b', '2017-01-01', 'b1',) ] boiler_generator_assn_eia860 = pd.DataFrame( data_bga, columns=columns_bga )Project 2: Analyzing IMDb Data_Author: (DC)_--- For project two, you will complete a series of exercises exploring movie rating data from IMDb.For these exercises, you will be conducting basic exploratory data analysis on IMDB's movie data, looking to answer such questions as:What is the average rating per genre?How many different actors are in a movie?This process will help you practice your data analysis skills while becoming comfortable with Pandas. Basic levelimport pandas as pd import matplotlib.pyplot as plt %matplotlib inlineRead in 'imdb_1000.csv' and store it in a DataFrame named movies.movies = pd.read_csv('./data/imdb_1000.csv') movies.head()Check the number of rows and columns.# Answer: #create data frame: df = pd.DataFrame(movies, index = None) #check rows: rows = len(df.axes[0]) #print rows: print(rows)Check the data type of each column.# Answer: dataTypeSeries = movies.dtypes #print data type: print(dataTypeSeries)Calculate the average movie duration.# Answer: movies.duration.mean() #answer: = 120.97957099080695Sort the DataFrame by duration to find the shortest and longest movies.# Answer: movies.sort('duration').head(1) # movies.sort('duration').tail(1)Create a histogram of duration, choosing an "appropriate" number of bins.# Answer: movies.duration.plot(kind='hist', bins=20)Use a box plot to display that same data.# Answer: movies.duration.plot(kind='box')Intermediate level Count how many movies have each of the content ratings.# Answer: movies.content_rating.value_counts()Use a visualization to display that same data, including a title and x and y labels.# Answer: movies.content_rating.value_counts().plot(kind='bar', title='Top 1000 Movies by Content Rating') plt.xlabel('Content Rating') plt.ylabel('Number of Movies')Convert the following content ratings to "UNRATED": NOT RATED, APPROVED, PASSED, GP.# Answer: movies.content_rating.replace(['NOT RATED', 'APPROVED', 'PASSED', 'GP'], 'UNRATED', inplace=True)Convert the following content ratings to "NC-17": X, TV-MA.# Answer: movies.content_rating.replace(['X', 'TV-MA'], 'NC-17', inplace=True)Count the number of missing values in each column.# Answer: movies.isnull().sum()If there are missing values: examine them, then fill them in with "reasonable" values.# Answer: #identifying misisng values movies[movies.content_rating.isnull()] #adding fill movies.content_rating.fillna('UNRATED', inplace=True)Calculate the average star rating for movies 2 hours or longer, and compare that with the average star rating for movies shorter than 2 hours.# Answer: #average star rating movies[movies.duration >= 120].star_rating.mean() #comparison movies[movies.duration < 120].star_rating.mean()Use a visualization to detect whether there is a relationship between duration and star rating.# Answer: movies.plot(kind='scatter', x='star_rating', y='duration', alpha=0.2)Calculate the average duration for each genre.# Answer: movies = pd.read_csv('./data/imdb_1000.csv') movies.groupby('genre').duration.mean() #results: genre Action 126.485294 Adventure 134.840000 Animation 96.596774 Biography 131.844156 Comedy 107.602564 Crime 122.298387 Drama 126.539568 Family 107.500000 Fantasy 112.000000 Film-Noir 97.333333 History 66.000000 Horror 102.517241 Mystery 115.625000 Sci-Fi 109.000000 Thriller 114.200000 Western 136.666667 Name: duration, dtype: float64Advanced level Visualize the relationship between content rating and duration.# Answer: #content rating: movies.boxplot(column='duration', by='content_rating') #duration: movies.duration.hist(by=movies.content_rating, sharex=True)Determine the top rated movie (by star rating) for each genre.# Answer: movies.sort('star_rating', ascending=False).groupby('genre').title.first() movies.groupby('genre').title.first()Check if there are multiple movies with the same title, and if so, determine if they are actually duplicates.# Answer: dupe_titles = movies[movies.title.duplicated()].title movies[movies.title.isin(dupe_titles)]Calculate the average star rating for each genre, but only include genres with at least 10 movies Option 1: manually create a list of relevant genres, then filter using that list# Answer: movies.genre.value_counts() top_genres = ['Drama', 'Comedy', 'Action', 'Crime', 'Biography', 'Adventure', 'Animation', 'Horror', 'Mystery'] movies[movies.genre.isin(top_genres)].groupby('genre').star_rating.mean()Option 2: automatically create a list of relevant genres by saving the value_counts and then filtering# Answer: genre_counts = movies.genre.value_counts() top_genres = genre_counts[genre_counts >= 10].index movies[movies.genre.isin(top_genres)].groupby('genre').star_rating.mean()Option 3: calculate the average star rating for all genres, then filter using a boolean Series# Answer: movies.groupby('genre').star_rating.mean()[movies.genre.value_counts() >= 10]Option 4: aggregate by count and mean, then filter using the count# Answer: genre_ratings = movies.groupby('genre').star_rating.agg(['count', 'mean']) genre_ratings[genre_ratings['count'] >= 10]Load GBIS inversion result into HDF5 format%matplotlib inline import os import numpy as np import pyproj import utm from matplotlib import pyplot as plt from mintpy.utils import readfile from mintpy import load_gbis, view model_dir = os.path.expanduser('~/Papers/2021_Kirishima/figs_src/model') os.chdir(model_dir) print('Go to directory', model_dir)Go to directory /Users/yunjunz/Papers/2021_Kirishima/figs_src/modelload GBIS result into HDF5load_gbis.main(['Shinmoe2008post/invert_1_2_C/invert_1_2_C.mat']) load_gbis.main(['Shinmoe2017pre/invert_1_2_C/invert_1_2_C.mat']) load_gbis.main(['Iwo2017pre/invert_1_2_C/invert_1_2_C.mat']) load_gbis.main(['Iwo2017post/invert_1_2_C_C2/invert_1_2_C_C2.mat'])read mat file: /Users/yunjunz/Papers/2021_Kirishima/figs_src/model/Shinmoe2008post/invert_1_2_C/invert_1_2_C.mat number of output HDF5 file: 2 creating figure in size of [12, 6] ------------------------------ read mask from file: /Users/yunjunz/Papers/2021_Kirishima/figs_src/model/data/KirishimaAlosAT424_20080929_20100705.mat delete exsited file: /Users/yunjunz/Papers/2021_Kirishima/figs_src/model/Shinmoe2008post/invert_1_2_C/KirishimaAlosAT424_20080929_20100705.h5 create HDF5 file: /Users/yunjunz/Papers/2021_Kirishima/figs_src/model/Shinmoe2008post/invert_1_2_C/KirishimaAlosAT424_20080929_20100705.h5 with w mode create dataset /hgt of float32 in size of (361, 361) with compression=None create dataset /data of float32 in size of (361, 361) with compression=None create dataset /model of float32 in size of (361, 361) with compression=None create dataset /residual of float32 in size of (361, 361) with compression=None finishe[...]Backup 1: Plot observations with height below / above the free surface - Iwo-yamafname1 = os.path.join(model_dir, 'Iwo2017pre/invert_1_2_C/KirishimaAlos2AT131_20150106_20171010.h5') fname2 = os.path.join(model_dir, 'Iwo2017pre/invert_1_2_C/KirishimaAlos2DT23_20150209_20170918.h5') # Figure 2 bounding box in X/Y S, N, W, E = 31.939, 31.954, 130.843, 130.861 ref_lat, ref_lon = 31.947, 130.853 ref_x, ref_y = utm.from_latlon(ref_lat, ref_lon)[:2] pts = [[N, W], [N, E], [S, E], [S, W]] xs = [] ys = [] for pt in pts: x, y = utm.from_latlon(pt[0], pt[1])[:2] xs.append(x - ref_x) ys.append(y - ref_y) print('X min/max: {:.1f} / {:.1f}'.format(min(xs), max(xs))) print('Y min/max: {:.1f} / {:.1f}'.format(min(ys), max(ys))) # view options cmd = f'view.py {fname1} data --noreference --noverbose --figtitle Iwo-yama --nocbar --notick ' cmd += ' --sub-lat {} {} --sub-lon {} {} '.format(S, N, W, E) # Fig. 2 #cmd += ' --sub-lat 31.935 31.96 --sub-lon 130.832 130.870 ' # GBIS bounding box atr, inps = view.prep_slice(cmd)[1:] extent = (inps.geo_box[0], inps.geo_box[2], inps.geo_box[3], inps.geo_box[1]) dem = readfile.read(fname1, datasetName='hgt', box=inps.pix_box)[0] # plot fig, axs = plt.subplots(nrows=1, ncols=2, figsize=[12, 5]) for i, (ax, fname, label) in enumerate(zip(axs, [fname1, fname2], ['asc', 'desc'])): data = readfile.read(fname, datasetName='data', box=inps.pix_box)[0] view.plot_slice(ax, data, atr, inps) ax.imshow(dem < 1300, vmin=0, vmax=1, cmap='gray_r', origin='upper', extent=extent, alpha=0.2, zorder=2) ax.set_title(label, fontsize=16) plt.show()X min/max: -958.5 / 771.5 Y min/max: -903.1 / 789.0Backup 2: Plot observations below / above the free surface - Shinmoe-dakefname1 = os.path.join(model_dir, 'Shinmoe2008post/invert_1_2_C/KirishimaAlosAT424_20080929_20100705.h5') fname2 = os.path.join(model_dir, 'Shinmoe2008post/invert_1_2_C/KirishimaAlosDT73_20081012_20100302.h5') # Figure 2 bounding box in X/Y S, N, W, E = 31.8975, 31.925, 130.868, 130.902 ref_lat, ref_lon = 31.91, 130.88 ref_x, ref_y = utm.from_latlon(ref_lat, ref_lon)[:2] pts = [[N, W], [N, E], [S, E], [S, W]] xs = [] ys = [] for pt in pts: x, y = utm.from_latlon(pt[0], pt[1])[:2] xs.append(x - ref_x) ys.append(y - ref_y) print('X min/max: {:.1f} / {:.1f}'.format(min(xs), max(xs))) print('Y min/max: {:.1f} / {:.1f}'.format(min(ys), max(ys))) # view options cmd = f'view.py {fname1} data --noreference --noverbose --figtitle Shinmoe-dake --nocbar --notick ' cmd += ' --sub-lat 31.8975 31.925 --sub-lon 130.868 130.902 ' # Fig. 2 #cmd += ' --sub-lat 31.935 31.96 --sub-lon 130.832 130.870 ' # GBIS bounding box atr, inps = view.prep_slice(cmd)[1:] extent = (inps.geo_box[0], inps.geo_box[2], inps.geo_box[3], inps.geo_box[1]) dem = readfile.read(fname1, datasetName='hgt', box=inps.pix_box)[0] # plot fig, axs = plt.subplots(nrows=1, ncols=2, figsize=[12, 5]) for i, (ax, fname, label) in enumerate(zip(axs, [fname1, fname2], ['asc', 'desc'])): data = readfile.read(fname, datasetName='data', box=inps.pix_box)[0] view.plot_slice(ax, data, atr, inps) ax.imshow(dem < 1100, vmin=0, vmax=1, cmap='gray_r', origin='upper', extent=extent, alpha=0.3, zorder=2) ax.set_title(label, fontsize=16) plt.show()X min/max: -1163.5 / 2104.9 Y min/max: -1405.5 / 1699.3Super Piano 4 Google TransformerXL Huge thanks and all the credit for this colab go out to on whose repo and code it is based: https://github.com/AniketRajpoot/DeepMusicGeneration Setup and Prep#@title Import Modules import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import music21 import os #import midifile # pre_process import numpy as np from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard from enum import Enum import matplotlib.pyplot as plt from typing import * import math import time import pickle #import modules #import XL_class #@title Define Main Constants #specifying data paths path = '/content' BPB = 4 # beats per bar TIMESIG = f'{BPB}/4' # default time signature PIANO_RANGE = (21, 108) NOTE_RANGE = (1,127) VALTSEP = -1 # separator value for numpy encoding VALTCONT = -2 # numpy value for TCONT - needed for compressing chord array SAMPLE_FREQ = 4 NOTE_SIZE = 128 DUR_SIZE = (10*BPB*SAMPLE_FREQ)+1 # Max length - 8 bars. Or 16 beats/quarternotes MAX_NOTE_DUR = (8*BPB*SAMPLE_FREQ) #tokenizing BOS = 'xxbos' PAD = 'xxpad' EOS = 'xxeos' #MASK = 'xxmask' # Used for BERT masked language modeling. #CSEQ = 'xxcseq' # Used for Seq2Seq translation - denotes start of chord sequence #MSEQ = 'xxmseq' # Used for Seq2Seq translation - denotes start of melody sequence #S2SCLS = 'xxs2scls' # deprecated #NSCLS = 'xxnscls' # deprecated SEP = 'xxsep' IN = 'xxni' #null instrument SPECIAL_TOKS = [BOS, PAD, EOS, SEP,IN] NOTE_TOKS = [f'n{i}' for i in range(NOTE_SIZE)] DUR_TOKS = [f'd{i}' for i in range(DUR_SIZE)] NOTE_START, NOTE_END = NOTE_TOKS[0], NOTE_TOKS[-1] DUR_START, DUR_END = DUR_TOKS[0], DUR_TOKS[-1] MTEMPO_SIZE = 10 MTEMPO_OFF = 'mt0' MTEMPO_TOKS = [f'mt{i}' for i in range(MTEMPO_SIZE)] SEQType = Enum('SEQType', 'Mask, Sentence, Melody, Chords, Empty') ACCEP_INS = dict() ACCEP_INS['Piano'] = 0 ACCEP_INS['Acoustic Bass'] = 1 ACCEP_INS['Acoustic Guitar'] = 2 ACCEP_INS['Violin'] = 3 ACCEP_INS['Flute'] = 4 ACCEP_INS['Contrabass'] = 5 ACCEP_INS['Trumpet'] = 6 #@title Check GPU gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) if gpu_info.find('failed') >= 0: print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ') print('and then re-execute this cell.') else: print(gpu_info) #@title Check Memory from psutil import virtual_memory ram_gb = virtual_memory().total / 1e9 print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb)) if ram_gb < 20: print('To enable a high-RAM runtime, select the Runtime > "Change runtime type"') print('menu, and then select High-RAM in the Runtime shape dropdown. Then, ') print('re-execute this cell.') else: print('You are using a high-RAM runtime!') #@title Functions 1 from enum import Enum import music21 PIANO_TYPES = list(range(24)) + list(range(80, 96)) # Piano, Synths PLUCK_TYPES = list(range(24, 40)) + list(range(104, 112)) # Guitar, Bass, Ethnic BRIGHT_TYPES = list(range(40, 56)) + list(range(56, 80)) PIANO_RANGE = (21, 109) # https://en.wikipedia.org/wiki/Scientific_pitch_notation #Using enums in python class Track(Enum): PIANO = 0 # discrete instruments - keyboard, woodwinds PLUCK = 1 # continuous instruments with pitch bend: violin, trombone, synths BRIGHT = 2 PERC = 3 UNDEF = 4 ype2inst = { # use print_music21_instruments() to see supported types Track.PIANO: 0, # Piano Track.PLUCK: 24, # Guitar Track.BRIGHT: 40, # Violin Track.PERC: 114, # Steel Drum } # INFO_TYPES = set(['TIME_SIGNATURE', 'KEY_SIGNATURE']) INFO_TYPES = set(['TIME_SIGNATURE', 'KEY_SIGNATURE', 'SET_TEMPO']) def file2mf(fp): mf = music21.midi.MidiFile() if isinstance(fp, bytes): mf.readstr(fp) else: mf.open(fp) mf.read() mf.close() return mf def mf2stream(mf): return music21.midi.translate.midiFileToStream(mf) def is_empty_midi(fp): if fp is None: return False mf = file2mf(fp) return not any([t.hasNotes() for t in mf.tracks]) def num_piano_tracks(fp): music_file = file2mf(fp) note_tracks = [t for t in music_file.tracks if t.hasNotes() and get_track_type(t) == Track.PIANO] return len(note_tracks) def is_channel(t, c_val): return any([c == c_val for c in t.getChannels()]) def track_sort(t): # sort by 1. variation of pitch, 2. number of notes return len(unique_track_notes(t)), len(t.events) def is_piano_note(pitch): return (pitch >= PIANO_RANGE[0]) and (pitch < PIANO_RANGE[1]) def unique_track_notes(t): return { e.pitch for e in t.events if e.pitch is not None } def compress_midi_file(fp, cutoff=6, min_variation=3, supported_types=set([Track.PIANO, Track.PLUCK, Track.BRIGHT])): music_file = file2mf(fp) info_tracks = [t for t in music_file.tracks if not t.hasNotes()] note_tracks = [t for t in music_file.tracks if t.hasNotes()] if len(note_tracks) > cutoff: note_tracks = sorted(note_tracks, key=track_sort, reverse=True) supported_tracks = [] for idx,t in enumerate(note_tracks): if len(supported_tracks) >= cutoff: break track_type = get_track_type(t) if track_type not in supported_types: continue pitch_set = unique_track_notes(t) if (len(pitch_set) < min_variation): continue # must have more than x unique notes if not all(map(is_piano_note, pitch_set)): continue # must not contain midi notes outside of piano range # if track_type == Track.UNDEF: print('Could not designate track:', fp, t) change_track_instrument(t, type2inst[track_type]) supported_tracks.append(t) if not supported_tracks: return None music_file.tracks = info_tracks + supported_tracks return music_file def get_track_type(t): if is_channel(t, 10): return Track.PERC i = get_track_instrument(t) if i in PIANO_TYPES: return Track.PIANO if i in PLUCK_TYPES: return Track.PLUCK if i in BRIGHT_TYPES: return Track.BRIGHT return Track.UNDEF def get_track_instrument(t): for idx,e in enumerate(t.events): if e.type == 'PROGRAM_CHANGE': return e.data return None def change_track_instrument(t, value): for idx,e in enumerate(t.events): if e.type == 'PROGRAM_CHANGE': e.data = value def print_music21_instruments(): for i in range(200): try: print(i, music21.instrument.instrumentFromMidiProgram(i)) except: pass #@title Functions 2 def file2stream(fp): if isinstance(fp, music21.midi.MidiFile): return music21.midi.translate.midiFileToStream(fp) return music21.converter.parse(fp) def npenc2stream(arr,rev_uniq_ins,bpm=120): "Converts numpy encoding to music21 stream" chordarr = npenc2chordarr(np.array(arr)) # 1. return chordarr2stream(chordarr,rev_uniq_ins,bpm=bpm) # 2. # 2. def stream2chordarr(s, note_size=NOTE_SIZE, sample_freq=SAMPLE_FREQ, max_note_dur=MAX_NOTE_DUR): "Converts music21.Stream to 1-hot numpy array" # assuming 4/4 time # note x instrument x pitch # FYI: midi middle C value=60 # (AS) TODO: need to order by instruments most played and filter out percussion or include the channel highest_time = max(s.flat.getElementsByClass('Note').highestTime, s.flat.getElementsByClass('Chord').highestTime) maxTimeStep = round(highest_time * sample_freq)+1 score_arr = np.zeros((maxTimeStep, len(s.parts), NOTE_SIZE)) def note_data(pitch, note): return (pitch.midi, int(round(note.offset*sample_freq)), int(round(note.duration.quarterLength*sample_freq))) ins=dict() for idx,part in enumerate(s.parts): notes=[] iterate = False for elem in part.flat: if isinstance(elem,music21.instrument.Instrument): if elem.instrumentName in ACCEP_INS.keys(): ins[idx] = elem.instrumentName iterate = True else : break if isinstance(elem, music21.note.Note): notes.append(note_data(elem.pitch, elem)) if isinstance(elem, music21.chord.Chord): for p in elem.pitches: notes.append(note_data(p, elem)) # sort notes by offset (1), duration (2) so that hits are not overwritten and longer notes have priority notes_sorted = sorted(notes, key=lambda x: (x[1], x[2])) if(iterate == True): for n in notes_sorted: if n is None: continue pitch,offset,duration = n if max_note_dur is not None and duration > max_note_dur: duration = max_note_dur score_arr[offset,idx, pitch] = duration score_arr[offset+1:offset+duration, idx, pitch] = VALTCONT # Continue holding not return score_arr,ins def chordarr2npenc(chordarr, skip_last_rest=True): # combine instruments result = [] wait_count = 0 for idx,timestep in enumerate(chordarr): flat_time = timestep2npenc(timestep) if len(flat_time) == 0: wait_count += 1 else: # pitch, octave, duration, instrument if wait_count > 0: result.append([VALTSEP, wait_count,-2]) result.extend(flat_time) wait_count = 1 if wait_count > 0 and not skip_last_rest: result.append([VALTSEP, wait_count,-2]) return np.array(result,dtype = int) # return np.array(result, dtype=int).reshape(-1, 2) # reshaping. Just in case result is empty # Note: not worrying about overlaps - as notes will still play. just look tied # http://web.mit.edu/music21/doc/moduleReference/moduleStream.html#music21.stream.Stream.getOverlaps def timestep2npenc(timestep, note_range=NOTE_RANGE, enc_type='full'): # inst x pitch notes = [] for i,n in zip(*timestep.nonzero()): d = timestep[i,n] if d < 0: continue # only supporting short duration encoding for now if n < note_range[0] or n >= note_range[1]: continue # must be within midi range notes.append([n,d,i]) notes = sorted(notes, key=lambda x: x[0], reverse=True) # sort by note (highest to lowest) if enc_type is None: # note, duration return [n[:2] for n in notes] if enc_type == 'parts': # note, duration, part return [n for n in notes] if enc_type == 'full': # note_class, duration , instrument return [[n, d, i] for n,d,i in notes] ###################Decoding Phase########################################################## # 1. def npenc2chordarr(npenc,note_size=NOTE_SIZE): num_instruments = 1 if npenc.shape[1] <= 2 else npenc.max(axis=0)[-1] max_len = npenc_len(npenc) # score_arr = (steps, inst, note) score_arr = np.zeros((max_len, num_instruments + 1, note_size)) idx = 0 for step in npenc: n,d,i = (step.tolist()+[0])[:3] # or n,d,i if n < VALTSEP: continue # special token if n == VALTSEP: idx += d continue score_arr[idx,i,n] = d return score_arr def npenc_len(npenc): duration = 0 for t in npenc: if t[0] == VALTSEP: duration += t[1] return duration + 1 # 2. def chordarr2stream(arr,rev_uniq_ins,sample_freq=SAMPLE_FREQ, bpm=120): duration = music21.duration.Duration(1. / sample_freq) stream = music21.stream.Score() stream.append(music21.meter.TimeSignature(TIMESIG)) stream.append(music21.tempo.MetronomeMark(number=bpm)) stream.append(music21.key.KeySignature(0)) for inst in range(arr.shape[1]): p = partarr2stream(arr[:,inst,:],inst,rev_uniq_ins,duration) stream.append(p) stream = stream.transpose(0) return stream # 2b. def partarr2stream(partarr,inst,rev_uniq_ins,duration): "convert instrument part to music21 chords" # part = music21.stream.Part() # part.append(music21.instrument.Piano()) # part_append_duration_notes(partarr, duration, part) # notes already have duration calculated l = len(rev_uniq_ins) inst = inst%l part = music21.stream.Part() if(rev_uniq_ins[inst] == 'Piano'): part.append(music21.instrument.Piano()) elif(rev_uniq_ins[inst] == 'Trumpet'): part.append(music21.instrument.Trumpet()) #elif(rev_uniq_ins[inst] == 'Flute'): #part.append(music21.instrument.Flute) elif(rev_uniq_ins[inst] == 'Trumpet'): part.append(music21.instrument.Trumpet()) elif(rev_uniq_ins[inst] == 'Violin'): part.append(music21.instrument.Violin()) elif(rev_uniq_ins[inst] == 'Acoustic Bass'): part.append(music21.instrument.AcousticBass()) elif(rev_uniq_ins[inst] == 'Cello'): part.append(music21.instrument.Contrabass()) elif(rev_uniq_ins[inst] == 'Acoustic Guitar'): part.append(music21.instrument.AcousticGuitar()) else: part.append(music21.instrument.Piano()) part_append_duration_notes(partarr, duration, part) return part def part_append_duration_notes(partarr, duration, stream): "convert instrument part to music21 chords" for tidx,t in enumerate(partarr): note_idxs = np.where(t > 0)[0] # filter out any negative values (continuous mode) if len(note_idxs) == 0: continue notes = [] for nidx in note_idxs: note = music21.note.Note(nidx) note.duration = music21.duration.Duration(partarr[tidx,nidx]*duration.quarterLength) notes.append(note) for g in group_notes_by_duration(notes): if len(g) == 1: stream.insert(tidx*duration.quarterLength, g[0]) else: chord = music21.chord.Chord(g) stream.insert(tidx*duration.quarterLength, chord) return stream from itertools import groupby # combining notes with different durations into a single chord may overwrite conflicting durations. Example: aylictal/still-waters-run-deep def group_notes_by_duration(notes): "separate notes into chord groups" keyfunc = lambda n: n.duration.quarterLength notes = sorted(notes, key=keyfunc) return [list(g) for k,g in groupby(notes, keyfunc)] # Midi -> npenc Conversion helpers def is_valid_npenc(npenc, note_range=PIANO_RANGE, max_dur=DUR_SIZE, min_notes=32, input_path=None, verbose=True): if len(npenc) < min_notes: if verbose: print('Sequence too short:', len(npenc), input_path) return False if (npenc[:,1] >= max_dur).any(): if verbose: print(f'npenc exceeds max {max_dur} duration:', npenc[:,1].max(), input_path) return False # https://en.wikipedia.org/wiki/Scientific_pitch_notation - 88 key range - 21 = A0, 108 = C8 if ((npenc[...,0] > VALTSEP) & ((npenc[...,0] < note_range[0]) | (npenc[...,0] >= note_range[1]))).any(): print(f'npenc out of piano note range {note_range}:', input_path) return False return True # seperates overlapping notes to different tracks def remove_overlaps(stream, separate_chords=True): if not separate_chords: return stream.flat.makeVoices().voicesToParts() return separate_melody_chord(stream) # seperates notes and chords to different tracks def separate_melody_chord(stream): new_stream = music21.stream.Score() if stream.timeSignature: new_stream.append(stream.timeSignature) new_stream.append(stream.metronomeMarkBoundaries()[0][-1]) if stream.keySignature: new_stream.append(stream.keySignature) melody_part = music21.stream.Part(stream.flat.getElementsByClass('Note')) melody_part.insert(0, stream.getInstrument()) chord_part = music21.stream.Part(stream.flat.getElementsByClass('Chord')) chord_part.insert(0, stream.getInstrument()) new_stream.append(melody_part) new_stream.append(chord_part) return new_stream # processing functions for sanitizing data def compress_chordarr(chordarr): return shorten_chordarr_rests(trim_chordarr_rests(chordarr)) def trim_chordarr_rests(arr, max_rests=4, sample_freq=SAMPLE_FREQ): # max rests is in quarter notes # max 1 bar between song start and end start_idx = 0 max_sample = max_rests*sample_freq for idx,t in enumerate(arr): if (t != 0).any(): break start_idx = idx+1 end_idx = 0 for idx,t in enumerate(reversed(arr)): if (t != 0).any(): break end_idx = idx+1 start_idx = start_idx - start_idx % max_sample end_idx = end_idx - end_idx % max_sample # if start_idx > 0 or end_idx > 0: print('Trimming rests. Start, end:', start_idx, len(arr)-end_idx, end_idx) return arr[start_idx:(len(arr)-end_idx)] def shorten_chordarr_rests(arr, max_rests=8, sample_freq=SAMPLE_FREQ): # max rests is in quarter notes # max 2 bar pause rest_count = 0 result = [] max_sample = max_rests*sample_freq for timestep in arr: if (timestep==0).all(): rest_count += 1 else: if rest_count > max_sample: # old_count = rest_count rest_count = (rest_count % sample_freq) + max_sample # print(f'Compressing rests: {old_count} -> {rest_count}') for i in range(rest_count): result.append(np.zeros(timestep.shape)) rest_count = 0 result.append(timestep) for i in range(rest_count): result.append(np.zeros(timestep.shape)) return np.array(result) # sequence 2 sequence convenience functions def stream2npenc_parts(stream, sort_pitch=True): chordarr = stream2chordarr(stream) _,num_parts,_ = chordarr.shape parts = [part_enc(chordarr, i) for i in range(num_parts)] return sorted(parts, key=avg_pitch, reverse=True) if sort_pitch else parts def chordarr_combine_parts(parts): max_ts = max([p.shape[0] for p in parts]) parts_padded = [pad_part_to(p, max_ts) for p in parts] chordarr_comb = np.concatenate(parts_padded, axis=1) return chordarr_comb def pad_part_to(p, target_size): pad_width = ((0,target_size-p.shape[0]),(0,0),(0,0)) return np.pad(p, pad_width, 'constant') def part_enc(chordarr, part): partarr = chordarr[:,part:part+1,:] npenc = chordarr2npenc(partarr) return npenc def avg_tempo(t, sep_idx=VALTSEP): avg = t[t[:, 0] == sep_idx][:, 1].sum()/t.shape[0] avg = int(round(avg/SAMPLE_FREQ)) return 'mt'+str(min(avg, MTEMPO_SIZE-1)) def avg_pitch(t, sep_idx=VALTSEP): return t[t[:, 0] > sep_idx][:, 0].mean() #@title Functions 3 def embedding_lookup(lookup_table, x): return tf.compat.v1.nn.embedding_lookup(lookup_table, x) def normal_embedding_lookup(x, n_token, d_embed, d_proj, initializer, proj_initializer, scope='normal_embed', **kwargs): emb_scale = d_proj ** 0.5 with tf.compat.v1.variable_scope(scope): lookup_table = tf.compat.v1.get_variable('lookup_table', [n_token, d_embed], initializer=initializer) y = embedding_lookup(lookup_table, x) if d_proj != d_embed: proj_W = tf.compat.v1.get_variable('proj_W', [d_embed, d_proj], initializer=proj_initializer) y = tf.einsum('ibe,ed->ibd', y, proj_W) else: proj_W = None ret_params = [lookup_table, proj_W] y *= emb_scale return y, ret_params def normal_softmax(hidden, target, n_token, params, scope='normal_softmax', **kwargs): def _logit(x, W, b, proj): y = x if proj is not None: y = tf.einsum('ibd,ed->ibe', y, proj) return tf.einsum('ibd,nd->ibn', y, W) + b params_W, params_projs = params[0], params[1] with tf.compat.v1.variable_scope(scope): softmax_b = tf.compat.v1.get_variable('bias', [n_token], initializer=tf.zeros_initializer()) output = _logit(hidden, params_W, softmax_b, params_projs) nll = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output) return nll, output def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = tf.einsum('i,j->ij', pos_seq, inv_freq) pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1) if bsz is not None: return tf.tile(pos_emb[:, None, :], [1, bsz, 1]) else: return pos_emb[:, None, :] def positionwise_FF(inp, d_model, d_inner, dropout, kernel_initializer, scope='ff', is_training=True): output = inp with tf.compat.v1.variable_scope(scope): output = tf.keras.layers.Dense(d_inner, activation=tf.nn.relu, kernel_initializer=kernel_initializer, name='layer_1')(inp) output = tf.keras.layers.Dropout(dropout, name='drop_1')(output, training=is_training) output = tf.keras.layers.Dense(d_model, activation=tf.nn.relu, kernel_initializer=kernel_initializer, name='layer_2')(output) output = tf.keras.layers.Dropout(dropout, name='drop_2')(output, training=is_training) output = tf.keras.layers.LayerNormalization(axis=-1)(output + inp) return output def _create_mask(qlen, mlen, same_length=False): attn_mask = tf.ones([qlen, qlen]) mask_u = tf.linalg.band_part(attn_mask, 0, -1) mask_dia = tf.linalg.band_part(attn_mask, 0, 0) attn_mask_pad = tf.zeros([qlen, mlen]) ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1) if same_length: mask_l = tf.matrix_band_part(attn_mask, -1, 0) ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1) return ret def _cache_mem(curr_out, prev_mem, mem_len=None): if mem_len is None or prev_mem is None: new_mem = curr_out elif mem_len == 0: return prev_mem else: new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:] return tf.stop_gradient(new_mem) def rel_shift(x): x_size = tf.shape(x) x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]]) x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]]) x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1]) x = tf.reshape(x, x_size) return x def rel_multihead_attn(w, r, r_w_bias, r_r_bias, attn_mask, mems, d_model, n_head, d_head, dropout, dropatt, is_training, kernel_initializer, scope='rel_attn'): scale = 1 / (d_head ** 0.5) with tf.compat.v1.variable_scope(scope): qlen = tf.shape(w)[0] rlen = tf.shape(r)[0] bsz = tf.shape(w)[1] cat = tf.concat([mems, w], 0) if mems is not None and mems.shape.ndims > 1 else w w_heads = tf.keras.layers.Dense(3 * n_head * d_head, use_bias=False, kernel_initializer=kernel_initializer, name='qkv')(cat) r_head_k = tf.keras.layers.Dense(n_head * d_head, use_bias=False, kernel_initializer=kernel_initializer, name='r')(r) w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, -1) w_head_q = w_head_q[-qlen:] klen = tf.shape(w_head_k)[0] w_head_q = tf.reshape(w_head_q, [qlen, bsz, n_head, d_head]) w_head_k = tf.reshape(w_head_k, [klen, bsz, n_head, d_head]) w_head_v = tf.reshape(w_head_v, [klen, bsz, n_head, d_head]) r_head_k = tf.reshape(r_head_k, [rlen, n_head, d_head]) rw_head_q = w_head_q + r_w_bias rr_head_q = w_head_q + r_r_bias AC = tf.einsum('ibnd,jbnd->ijbn', rw_head_q, w_head_k) BD = tf.einsum('ibnd,jnd->ijbn', rr_head_q, r_head_k) BD = rel_shift(BD) attn_score = (AC + BD) * scale attn_mask_t = attn_mask[:, :, None, None] attn_score = attn_score * (1 - attn_mask_t) - 1e30 * attn_mask_t attn_prob = tf.nn.softmax(attn_score, 1) attn_prob = tf.keras.layers.Dropout(dropatt)(attn_prob, training=is_training) attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, w_head_v) size_t = tf.shape(attn_vec) attn_vec = tf.reshape(attn_vec, [size_t[0], size_t[1], n_head * d_head]) attn_out = tf.keras.layers.Dense(d_model, use_bias=False, kernel_initializer=kernel_initializer, name='o')(attn_vec) attn_out = tf.keras.layers.Dropout(dropout)(attn_out, training=is_training) output = tf.keras.layers.LayerNormalization(axis=-1)(attn_out + w) return output def transformer(dec_inp, target, mems, n_token, n_layer, d_model, d_embed, n_head, d_head, d_inner, dropout, dropatt, initializer, is_training, proj_initializer=None, mem_len=None, cutoffs=[], div_val=1, tie_projs=[], same_length=False, clamp_len=-1, input_perms=None, target_perms=None, head_target=None, untie_r=False, proj_same_dim=True, scope='transformer'): """ cutoffs: a list of python int. Cutoffs for adaptive softmax. tie_projs: a list of python bools. Whether to tie the projections. perms: a list of tensors. Each tensor should of size [len, bsz, bin_size]. Only used in the adaptive setting. """ new_mems = [] with tf.compat.v1.variable_scope(scope, reuse= tf.compat.v1.AUTO_REUSE): if untie_r: r_w_bias = tf.compat.v1.get_variable('r_w_bias', [n_layer, n_head, d_head], initializer=initializer) r_r_bias = tf.compat.v1.get_variable('r_r_bias', [n_layer, n_head, d_head], initializer=initializer) else: r_w_bias = tf.compat.v1.get_variable('r_w_bias', [n_head, d_head], initializer=initializer) r_r_bias = tf.compat.v1.get_variable('r_r_bias', [n_head, d_head], initializer=initializer) qlen = tf.shape(dec_inp)[0] mlen = tf.shape(mems[0])[0] if mems is not None else 0 klen = qlen + mlen if proj_initializer is None: proj_initializer = initializer embeddings, shared_params = normal_embedding_lookup( x=dec_inp, n_token=n_token, d_embed=d_embed, d_proj=d_model, initializer=initializer, proj_initializer=proj_initializer) attn_mask = _create_mask(qlen, mlen, same_length) pos_seq = tf.range(klen - 1, -1, -1.0) if clamp_len > 0: pos_seq = tf.minimum(pos_seq, clamp_len) inv_freq = 1 / (10000 ** (tf.range(0, d_model, 2.0) / d_model)) pos_emb = positional_embedding(pos_seq, inv_freq) output = tf.keras.layers.Dropout(rate=dropout)(embeddings, training=is_training) pos_emb = tf.keras.layers.Dropout(rate=dropout)(pos_emb, training=is_training) if mems is None: mems = [None] * n_layer for i in range(n_layer): # cache new mems new_mems.append(_cache_mem(output, mems[i], mem_len)) with tf.compat.v1.variable_scope('layer_{}'.format(i)): output = rel_multihead_attn( w=output, r=pos_emb, r_w_bias=r_w_bias if not untie_r else r_w_bias[i], r_r_bias=r_r_bias if not untie_r else r_r_bias[i], attn_mask=attn_mask, mems=mems[i], d_model=d_model, n_head=n_head, d_head=d_head, dropout=dropout, dropatt=dropatt, is_training=is_training, kernel_initializer=initializer) output = positionwise_FF( inp=output, d_model=d_model, d_inner=d_inner, dropout=dropout, kernel_initializer=initializer, is_training=is_training) # apply Dropout output = tf.keras.layers.Dropout(dropout)(output, training=is_training) loss, logits = normal_softmax( hidden=output, target=target, n_token=n_token, params=shared_params) return loss, logits, new_mems #@title Functions 4 class TransformerXL(object): ######################################## # initialize ######################################## def __init__(self, vocab_size, checkpoint=None, is_training=False, training_seqs=None): # load dictionary self.event2word = vocab_size # model settings self.x_len = 512 #input sequence length self.mem_len = 512 # self.n_layer = 6 self.d_embed = 768 self.d_model = 768 self.dropout = 0.1 ## self.n_head = 12 self.d_head = self.d_model // self.n_head self.d_ff = 3072 self.n_token = (self.event2word) self.learning_rate = 1e-4 ## self.group_size = 3 self.entry_len = self.group_size * self.x_len # mode self.is_training = is_training self.training_seqs = training_seqs self.checkpoint = checkpoint if self.is_training: # train from scratch or finetune self.batch_size = 8 else: # inference self.batch_size = 1 # load model self.load_model() ######################################## # load model ######################################## def load_model(self): tf.compat.v1.disable_eager_execution() # placeholders ---> train self.x = tf.compat.v1.placeholder(tf.int32, shape=[self.batch_size, None]) self.y = tf.compat.v1.placeholder(tf.int32, shape=[self.batch_size, None]) self.mems_i = [tf.compat.v1.placeholder(tf.float32, [self.mem_len, self.batch_size, self.d_model]) for _ in range(self.n_layer)] # placeholders ---> test self.x_t = tf.compat.v1.placeholder(tf.int32, shape=[1, None]) self.y_t = tf.compat.v1.placeholder(tf.int32, shape=[1, None]) self.mems_it = [tf.compat.v1.placeholder(tf.float32, [self.mem_len, 1, self.d_model]) for _ in range(self.n_layer)] # model self.global_step = tf.compat.v1.train.get_or_create_global_step() # initialize parameters initializer = tf.compat.v1.initializers.random_normal(stddev=0.02, seed=None) proj_initializer = tf.compat.v1.initializers.random_normal(stddev=0.01, seed=None) with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope()): xx = tf.transpose(self.x, [1, 0]) yy = tf.transpose(self.y, [1, 0]) loss, self.logits, self.new_mem = transformer( dec_inp=xx, target=yy, mems=self.mems_i, n_token=self.n_token, n_layer=self.n_layer, d_model=self.d_model, d_embed=self.d_embed, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_ff, dropout=self.dropout, dropatt=self.dropout, initializer=initializer, proj_initializer=proj_initializer, is_training=self.is_training, mem_len=self.mem_len, cutoffs=[], div_val=-1, tie_projs=[], same_length=False, clamp_len=-1, input_perms=None, target_perms=None, head_target=None, untie_r=False, proj_same_dim=True) self.avg_loss = tf.reduce_mean(loss) # vars all_vars = tf.compat.v1.trainable_variables() print ('num parameters:', np.sum([np.prod(v.get_shape().as_list()) for v in all_vars])) grads = tf.gradients(self.avg_loss, all_vars) grads_and_vars = list(zip(grads, all_vars)) # gradient clipping def ClipIfNotNone(grad): if grad is None: return grad return tf.clip_by_norm(grad, 100.) grads_and_vars = [(ClipIfNotNone(grad), var) for grad, var in grads_and_vars] all_trainable_vars = tf.reduce_sum([tf.reduce_prod(v.shape) for v in tf.compat.v1.trainable_variables()]) # optimizer #warmup_steps = 0 # increase the learning rate linearly #if warmup_steps > 0: # warmup_lr = tf.compat.v1.to_float(self.global_step) / tf.compat.v1.to_float(warmup_steps) \ # * self.learning_rate #else: # warmup_lr = 0.0 decay_lr = tf.compat.v1.train.cosine_decay( self.learning_rate, global_step=self.global_step, decay_steps=200000, alpha=0.004) #lr_decay_warmup = tf.where(self.global_step < warmup_steps, # warmup_lr, decay_lr) #decay_lr = tf.compat.v1.train.cosine_decay_warmup( ## # self.learning_rate, # global_step=self.global_step, # decay_steps=200000, # warmup_steps=16000, # alpha=0.004 #) #try: #self.optimizer = tfa.optimizers.LAMB(learning_rate=decay_lr) #print('LAMBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB') #except: #self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=decay_lr) #print('ADAMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM') #pass self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=decay_lr) self.train_op = self.optimizer.apply_gradients(grads_and_vars, self.global_step) # saver self.saver = tf.compat.v1.train.Saver(max_to_keep=100) config = tf.compat.v1.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True self.sess = tf.compat.v1.Session(config=config) # load pre-trained checkpoint or note if self.checkpoint: self.saver.restore(self.sess, self.checkpoint) else: self.sess.run(tf.compat.v1.global_variables_initializer()) ######################################## # train ######################################## def train(self, training_data, output_checkpoint_folder): # check output folder if not os.path.exists(output_checkpoint_folder): os.mkdir(output_checkpoint_folder) # shuffle index = np.arange(len(training_data)) np.random.shuffle(index) training_data = training_data[index] num_batches = len(training_data) // self.batch_size st = time.time() for e in range(1000): total_loss = [] for i in range(num_batches): segments = training_data[self.batch_size*i:self.batch_size*(i+1)] batch_m = [np.zeros((self.mem_len, self.batch_size, self.d_model), dtype=np.float32) for _ in range(self.n_layer)] for j in range(self.group_size): batch_x = segments[:, j, 0, :] batch_y = segments[:, j, 1, :] # prepare feed dict feed_dict = {self.x: batch_x, self.y: batch_y} for m, m_np in zip(self.mems_i, batch_m): feed_dict[m] = m_np # run _, gs_, loss_, new_mem_ = self.sess.run([self.train_op, self.global_step, self.avg_loss, self.new_mem], feed_dict=feed_dict) batch_m = new_mem_ total_loss.append(loss_) # print ('Current lr: {}'.format(self.sess.run(self.optimizer._lr))) print('>>> Epoch: {}, Step: {}, Loss: {:.5f}, Time: {:.2f}'.format(e, gs_, loss_, time.time()-st)) print('i : ',i,' j : ',j) if not i % 500: self.saver.save(self.sess, '{}/model-{:03d}-{:.3f}'.format(output_checkpoint_folder, e, np.mean(total_loss))) print ('[epoch {} avg loss] {:.5f}'.format(e, np.mean(total_loss))) if not e % 6: self.saver.save(self.sess, '{}/model-{:03d}-{:.3f}'.format(output_checkpoint_folder, e, np.mean(total_loss))) # stop if np.mean(total_loss) <= 0.0001: break ######################################## # search strategy: temperature (re-shape) ######################################## def temperature(self, logits, temperature): probs = np.exp(logits / temperature) / np.sum(np.exp(logits / temperature)) return probs ######################################## # search strategy: nucleus (truncate) ######################################## def nucleus(self, probs, p): probs /= sum(probs) sorted_probs = np.sort(probs)[::-1] sorted_index = np.argsort(probs)[::-1] cusum_sorted_probs = np.cumsum(sorted_probs) after_threshold = cusum_sorted_probs > p if sum(after_threshold) > 0: last_index = np.where(after_threshold)[0][-1] candi_index = sorted_index[:last_index] else: candi_index = sorted_index[:3] # just assign a value candi_probs = [probs[i] for i in candi_index] candi_probs /= sum(candi_probs) word = np.random.choice(candi_index, size=1, p=candi_probs)[0] return word ######################################## # evaluate (for batch size = 1) ######################################## def evaluate(self, notes, num_notes, k, strategies, use_structure=False, init_mem = None): batch_size = 1 # initialize mem if init_mem is None: batch_m = [np.zeros((self.mem_len, batch_size, self.d_model), dtype=np.float32) for _ in range(self.n_layer)] print('new memmmmm') else: batch_m = init_mem initial_flag = True fail = 0 i = 0 while i < num_notes: if fail>200: print('Fail : ',fail) #continue # prepare input if initial_flag: temp_x = np.zeros((batch_size, len(notes[0]))) for b in range(batch_size): for z, t in enumerate(notes[b]): temp_x[b][z] = t initial_flag = False else: temp_x = np.zeros((batch_size, 1)) for b in range(batch_size): temp_x[b][0] = notes[b][-1] # prepare feed dict # inside a feed dict # placeholder : data # put input into feed_dict feed_dict = {self.x: temp_x} # put memeory into feed_dict for m, m_np in zip(self.mems_i, batch_m): feed_dict[m] = m_np # model (prediction) _logits, _new_mem = self.sess.run([self.logits, self.new_mem], feed_dict=feed_dict) #print('mem : ',_new_mem,' shape : ',len(_new_mem)) #print('shape : ',_logits.shape) logits = _logits[-1, 0] # temperature or not if k == 0: ran = float((np.random.randint(14,16))/10) else: ran = float((np.random.randint(7,10))/10) probs = self.temperature(logits=logits, temperature=ran) # sampling # note : the generated tokenized event #ran_n = float((np.random.randint(90,98))/100) note = self.nucleus(probs=probs, p=0.90) if note not in tokenizer.index_word: continue if (tokenizer.index_word[int(notes[0][-1])])[0] == 'n' and (tokenizer.index_word[int(note)])[0] != 'd': print((tokenizer.index_word[int(notes[0][-1])]),' : ', tokenizer.index_word[int(note)]) fail += 1 continue if (tokenizer.index_word[int(notes[0][-1])])[0] == 'd' and ((tokenizer.index_word[int(note)])[0] != 'i' and (tokenizer.index_word[int(note)]) != 'xxni'): fail += 1 print((tokenizer.index_word[int(notes[0][-1])]),' : ',tokenizer.index_word[int(note)]) continue if ((tokenizer.index_word[int(notes[0][-1])])[0] == 'i' or tokenizer.index_word[int(notes[0][-1])] == 'xxni') and ((tokenizer.index_word[int(note)])[0] != 'n' and (tokenizer.index_word[int(note)]) != 'xxsep'): fail += 1 print((tokenizer.index_word[int(notes[0][-1])]),' : ',tokenizer.index_word[int(note)]) continue if (tokenizer.index_word[int(notes[0][-1])]) == 'xxsep' and ((tokenizer.index_word[int(note)])[0] != 'd' and (tokenizer.index_word[int(note)])[0] != 'n'): fail += 1 print((tokenizer.index_word[int(notes[0][-1])]),' : ',tokenizer.index_word[int(note)]) continue # add new event to record sequence notes = np.append(notes[0], note) notes = np.reshape(notes, (1, len(notes))) #print('notes : ',notes.shape) # re-new mem batch_m = _new_mem fail = 0 i += 1 return notes[0] ######################################## # predict (for batch size = 1) ######################################## def predict(self, notes, num_notes, k, strategies, use_structure=False): prediction = self.evaluate(notes, num_notes, k, strategies, use_structure) predicted_sentence = [] for i in prediction: # print('helllllo',int(i)) i = int(i) if i < len(tokenizer.word_index) and i>0: predicted_sentence.append(tokenizer.index_word[i]) return predicted_sentence #@title Functions 5 def get_all_midi_dir(root_dir): all_midi = [] for dirName, _, fileList in os.walk(root_dir): for fname in fileList: if '.mid' in fname: all_midi.append(dirName + '/' + fname) return all_midi def get_data(notes_chords, sequence_length): # sequence_length = 100 notes_input = [] notes_output = [] shift = 1 for i in range(0, len(notes_chords) - sequence_length, 1): temp_input = '' temp_output = '' for j in range(i,i + sequence_length): temp_input += notes_chords[j] + ' ' notes_input.append(temp_input) for j in range(i+shift,i + sequence_length+shift): temp_output += notes_chords[j] + ' ' notes_output.append(temp_output) n_patterns = len(notes_input) # notes_normalized_input = np.reshape(notes_input, (n_patterns, sequence_length)) # notes_normalized_input = notes_normalized_input / float(n_vocab) #notes_output = np.array(notes_output) return (notes_input, notes_output) ######################################## # Prepare data ######################################## def xl_data(input_, output, group_size): training_data = [] pairs = [] for i in range(0, len(input_)): x, y = input_[i], output[i] pairs.append([x, y]) pairs = np.array(pairs) # put pairs into training data by groups for i in range(0, len(pairs) - group_size + 1, group_size): segment = pairs[i:i+group_size] assert len(segment) == group_size training_data.append(segment) training_data = np.array(training_data) return training_data #@title Functions 6 def check_valid_ins(ins): count = 0 ls = list(set(val for val in ins.values())) for i in ls: if i == 'Piano': count+= 1 elif i == 'Acoustic Bass' or i == 'Electric Bass': count += 1 elif i == 'Acoustic Guitar' or i == 'Electric Guitar': count += 1 elif i == 'Violin': count += 1 elif i == 'Flute': count += 1 if(count>=2): return True return False #@title Functions 7 #required listsx chordarr_list = [] npenc_list = [] ins_list = [] #@title Process MIDIs #read multiple files i = 0 overall = 0 for file_name in get_all_midi_dir('/content/midis'): if overall>90: break print('Now loading, ',i,' : ',overall,': \n',file_name) try: mf = file2mf(file_name) except: continue pass try: stream =mf2stream(mf) except: continue pass i += 1 overall += 1 chordarr,ins = stream2chordarr(stream) if (not(check_valid_ins(ins))): print('Discarding File :\n', file_name) try: print('1') except: print('2') pass i -= 1 continue ins_list.append(ins) chordarr_list.append(chordarr) #@title Save Chords and Instruments Lists with open('/content/chord_list', 'wb') as filepath: pickle.dump(chordarr_list, filepath) with open('/content/ins_list', 'wb') as filepath: pickle.dump(ins_list, filepath) #@title Load Chord and Instruments Lists with open('/content/chord_list_2018', 'rb') as filepath: chordarr_list = pickle.load(filepath) with open('/content/ins_list_2018', 'rb') as filepath: ins_list = pickle.load(filepath) #@title Process Chords and Instruments into Final List print('Processing Now') #making uniq list for transformation res = list(set(val for dic in ins_list for val in dic.values())) uniq_ins = dict() for i in range(len(res)): uniq_ins[res[i]] = i INS_TOKS = [f'i{i}' for i in range(len(uniq_ins))] rev_uniq_ins = {value : key for (key, value) in uniq_ins.items()} for c in range(len(chordarr_list)): npenc = chordarr2npenc(chordarr_list[c]) for i in npenc: if(i[2] == -2): i[2] = -2 else: i[2] = uniq_ins[ins_list[c][i[2]]] npenc_list.append(npenc) #the final list or sequence final_list = [] for npenc in npenc_list: final_list.append(BOS) final_list.append(PAD) for i in range(len(npenc)): if(npenc[i][0] == -1): x = SEP else: x = 'n' + str(npenc[i][0]) if npenc[i][1] > 16: npenc[i][1] = 8 y = 'd' + str(npenc[i][1]) if(npenc[i][2] == -2): z = IN else: z = 'i' + str(npenc[i][2]) final_list.append(x) final_list.append(y) final_list.append(z) final_list.append(PAD) final_list.append(EOS) unique_notes = list(set(final_list)) n_vocab = len(set(unique_notes)) tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='') tokenizer.fit_on_texts(final_list) #@title Save Final List with open('/content/final_list_single', 'wb') as filepath: pickle.dump(final_list, filepath) #@title Load Final List with open('/content/final_list_single', 'rb') as filepath: final_list = pickle.load(filepath) final = [] count = 129 word_index = dict() index_word = dict() for item in final_list: if item in word_index: final.append(word_index[item]) continue if item[0] == 'n': final.append(int(item[1:])) word_index[item] = final[-1] index_word[final[-1]] = item else : final.append(int(count)) word_index[item] = final[-1] index_word[final[-1]] = item count += 1 #@title Define and Initialize Sequence Length sequence_length = 512 network_input,network_output = get_data(final_list,sequence_length) print(network_input.shape) print(network_output.shape) #@title Initialize Vocabulary MAX_LENGTH = n_vocab network_in = tokenizer.texts_to_sequences(network_input) network_in = tf.keras.preprocessing.sequence.pad_sequences(network_in, padding='post') network_out = tokenizer.texts_to_sequences(network_output) network_out = tf.keras.preprocessing.sequence.pad_sequences(network_out, padding='post') VOCAB_SIZE = len(tokenizer.word_index)+1 print(VOCAB_SIZE) #@title Initialize and Declare the Model # decoder inputs use the previous target as input # remove START_TOKEN from targets group_size = 3 data = xl_data(network_in, network_out, group_size) network_in = [] network_out = [] train_len = int(len(data)*0.7) training_data = data[:train_len] val_data = data[train_len:] # declare model model = TransformerXL( vocab_size=VOCAB_SIZE, checkpoint=None, is_training=True) model.summary() VOCAB_SIZE training_data.shape tokenizer.word_indexTrain#@title Train the Model # train model.train(training_data, output_checkpoint_folder='/content')Generate#@title Declare Prediction Model (Run only once) # Predict model_p = TransformerXL( #vocab_size=107, vocab_size=VOCAB_SIZE, checkpoint='/content/model-000-0.279', is_training=False) #@title Generate Output notes = [] num = np.random.randint(len(val_data)-(2*sequence_length)) #num = 160 num_seq = 1 lenxx = 512 for i in range(num_seq): notes.append(val_data[num+int(i*sequence_length/3), i, 0, :]) notes = np.array(notes)[0][-lenxx:] notes = notes.flatten() notes = np.reshape(notes, (1, len(notes))) notes.shape len(notes[0]) network_input[train_len + (num*3)] flag = False final_output = [] #oooh = [] lens_in = len(notes[0]) shift = 1 num_times = 2 num_notes = 1576 #num_notes = sequence_length-lenxx k = 3 print('num_notes : ',num_notes) final_output = [] for i in range(1): print("########################################################################## : ",i) print('lens_in : ',lens_in) output = model_p.predict(notes, num_notes, k, strategies=['temperature', 'nucleus'], use_structure=True) lens = len(output) notes_temp = [] count = 0 print(enumerate(output)) for index, j in enumerate(output): if index >= (lens_in)-shift: print(' j : ',j,' index : ',index) if i == 0 and flag == False: final_output.append(j) flag = True count = 0 notes_temp.append(tokenizer.word_index[j]) elif (final_output[-1])[0] == 'n' and (j)[0] == 'd': final_output.append(j) count = 0 notes_temp.append(tokenizer.word_index[j]) elif (final_output[-1])[0] == 'd' and ((j)[0] == 'i' or (j) == 'xxni'): final_output.append(j) count = 0 notes_temp.append(tokenizer.word_index[j]) elif ((final_output[-1])[0] == 'i' or (final_output[-1]) == 'xxni') and ((j)[0] == 'n' or (j) == 'xxsep'): final_output.append(j) count = 0 notes_temp.append(tokenizer.word_index[j]) elif (final_output[-1]) == 'xxsep' and ((j)[0] == 'd' or (j)[0] == 'n') and count <= 1: final_output.append(j) count += 1 notes_temp.append(tokenizer.word_index[j]) notes = np.array(notes_temp)[-lenxx:] lens_in = len(notes) notes = np.reshape(notes, (1, len(notes))) print('\nlast : ', final_output[-1]) print('\nlen : ', lens_in) print("The iteration output : ",final_output,'\n') store_ = final_output temps = network_input[train_len + (num*3)] + network_input[train_len + (num*3) + 640] + network_input[train_len + (num*3) + 1280] #temps = network_input[train_len + (num*3)] notes_in = [] temp = '' for sentence in temps: for i in sentence: if i != ' ': temp += i else: notes_in.append(temp) temp = '' len(notes_in) #final_output = notes_in + store_ #final_output = store_ final_output = notes_in print(len(final_output)) npenc_out = [] pred = [] for i in final_output: if i != 'xxeos' and i != 'xxpad' and i != 'xxbos': pred.append(i) while(pred[0][0] == 'd' or pred[0][0] == 'i' or pred[0] == 'xxni'): pred = pred[1:] print(pred) npenc_out = [] x = 0 y = len(pred) for i in range(x,y,3): x = i if not( x+1=3: print(i) i[1] = 2 if not(i[0] != -1 and i[2] == -2): npenc_out_1.append(i) s = npenc2stream(npenc_out_1,rev_uniq_ins,120) s.write('midi', fp='/content/output1.mid') #npenc_out[:]Machine Learning OverviewMachine learning is the ability of computers to take a dataset of objects and learn patterns about them. This dataset is structured as a table, where each row is a vector representing some object by encoding their properties as the values of the vector. The columns represent **features** - properties that all the objects share.There are, broadly speaking, two kinds of machine learning. **Supervised learning** has an extra column at the end of the dataset, and the program learns to predict the value of this based on the input features for some new object. If the output value is continuous, it is **regression**, otherwise it is **classification**. **Unsupervised learning** seeks to find patterns within the data by, for example, clustering.![Machine Learning Overview](img/machine-learning-overview.png) Supervised LearningOne of the most critical concepts in supervised learning is the dataset. This represents the knowledge about the set of objects in question that you wish the machine to learn. It is essentially a table where the rows represent objects, and the columns represent the properties. 'Training' is essentially the creation of an object called a model, which can take a row missing the last column, and predict what its value will be by examining the data in the dataset. For example...import pandas as pd iris_dataset = pd.read_csv("../data/iris.csv") iris_dataset.head()Here a dataset has been loaded from CSV into a pandas dataframe. Each row represents a flower, on which four measurements have been taken, and each flower belongs to one of three classes. A supervised learning model would take this dataset of 150 flowers and train such that any other flower for which the relevant measurements were known could have its class predicted. This would obviously be a classification problem, not regression.A very simple model would take just two features and map them to one of two classes. The dataset can be reduced to this form asd follows:simple_iris = iris_dataset.iloc[0:100, [0, 2, 4]] simple_iris.head() simple_iris.tail()Because this is just two dimensions, it can be easily visualised as a scatter plot.import sys sys.path.append("..") import numerus.learning as ml ml.plot_dataset(simple_iris)The data can be seen to be **linearly separable** - there is a line that can be drawn between them that would separate them perfectly.One of the simplest classifiers for supervised learning is the perceptron. Perceptrons have a weights vector which they dot with an input vector to get some level of activation. If the activation is above some threshold, one class is predicted - otherwise the other is predicted. Training a perceptron means giving the model training inputs until it has values for the weights and threshold that effectively separate the classes.The data must be split into training and test data, and then a perceptron created from the training data.train_simple_iris, test_simple_iris = ml.split_data(simple_iris) ml.plot_dataset(train_simple_iris, title="Training Data") perceptron = ml.Perceptron(train_simple_iris) print(perceptron)import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt %matplotlib inline import numpy as np (X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data() len(X_test) len(X_train) X_train.shape plt.matshow(X_train[0]) y_train[0] X_train = X_train / 255 X_test = X_test / 255 X_train_flattened = X_train.reshape(len(X_train), 28*28) X_test_flattened = X_test.reshape(len(X_test), 28*28) X_train_flattened.shape X_train_flattened model = keras.Sequential([ keras.layers.Dense(10, input_shape = (784,), activation='sigmoid') ]) model.compile( optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'] ) model.fit(X_train_flattened, y_train, epochs = 5) model.evaluate(X_test_flattened, y_test) plt.matshow(X_test[0]) y_predicted = model.predict(X_test_flattened) y_predicted[0] np.argmax(y_predicted[0]) y_predicted_labels = [np.argmax(i) for i in y_predicted] cm = tf.math.confusion_matrix(labels=y_test, predictions=y_predicted_labels) cm import seaborn as sn plt.figure(figsize=(10, 7)) sn.heatmap(cm, annot = True, fmt = 'd') plt.xlabel('Predicted') plt.ylabel('Truth') model = keras.Sequential([ keras.layers.Flatten(input_shape =(28, 28)), keras.layers.Dense(100, activation='relu'), keras.layers.Dense(10, activation='sigmoid') ]) model.compile( optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'] ) model.fit(X_train, y_train, epochs = 5) model.evaluate(X_test, y_test) y_predicted = model.predict(X_test) y_predicted_labels = [np.argmax(i) for i in y_predicted] cm = tf.math.confusion_matrix(labels=y_test, predictions=y_predicted_labels) import seaborn as sn plt.figure(figsize=(10, 7)) sn.heatmap(cm, annot = True, fmt = 'd') plt.xlabel('Predicted') plt.ylabel('Truth')**Desafio 007****Python 3 - 1º Mundo**Descrição: Criar um programa que leia as duas notas de um aluno, calcule e mostre a sua média.Link: https://www.youtube.com/watch?v=_QfISzy0IKs&list=PLHz_AreHm4dm6wYOIW20Nyg12TAjmMGT-&index=8nome = input('Escreva o nome do aluno: ') n1 = float(input('Nota 1: ')) n2 = float(input('Nota 2: ')) print(f'A média do aluno {nome} é de {((n1+n2)/2)}.')Hierarchical Probabilistic U-Net Copyright 2019 DeepMind Technologies LimitedLicensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. Colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deepmind/deepmind-research/blob/master/hierarchical_probabilistic_unet/HPU_Net.ipynb)If you haven't already opened this notebook in colab, you can click the button above to open it there.!pip install tensorflow==1.14.0 !pip install tensorflow-probability==0.7.0 !pip install dm-sonnet==1.35 from glob import glob import matplotlib import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import numpy as np import os import tensorflow as tf import sonnet as sntClone github repo!git clone https://github.com/deepmind/deepmind-research.git deepmind_research %cd deepmind_research/hierarchical_probabilistic_unet from model import HierarchicalProbUNetLoad LIDC test data from Google Cloud Storage!gsutil cp gs://hpunet-data/lidc_crops/test.tar.gz /tmp/ !tar xvfz /tmp/test.tar.gz -C /tmp/Load pretrained weights from Google Cloud Storage!gsutil cp gs://hpunet-data/model_checkpoint/checkpoint.tar.gz /tmp/ !tar xvfz /tmp/checkpoint.tar.gz -C /tmp/ #@title Utility to function to batch-up the data. test_img_dir = '/tmp/test' def make_batch(patient_ixs): """Assembles a batch of the first image for each specified patient. Args: patient_ixs: A list of integers specifying the patients to choose. Returns: A tuple holding the batched image of shape (b, 128, 128, 1) and the batched segmentations of shape (b, 128, 128, 4). """ img, seg = [], [] patient_dirs = glob(os.path.join(test_img_dir, 'images', '*')) for i in patient_ixs: # get the first image for the patient img_path = glob(os.path.join(patient_dirs[i], '*'))[0] image = matplotlib.image.imread(img_path) image = image[np.newaxis, ..., np.newaxis] img.append(image) # get the corresponding ground truth labels gt_base_path = img_path.replace('images', 'gt') labels = [] for l in range(4): gt_path = gt_base_path.replace('.png', '_l{}.png'.format(l)) label = matplotlib.image.imread(gt_path) labels.append(label[np.newaxis, ..., np.newaxis]) labels = np.concatenate(labels, axis=-1) seg.append(labels) img_batch = np.concatenate(img, axis=0) seg_batch = np.concatenate(seg, axis=0) # The images and segmentations have a spatial shape of 180 x 180. Crop to # spatial shape 128 x 128. return img_batch[:, 26:-26, 26:-26], seg_batch[:, 26:-26, 26:-26]Run a pre-trained model on the LIDC Test Set.First construct a graph for the model. Here we set-up different sampling configurations to examine the sampling behavior when different latent scales are fixed to their means.---tf.reset_default_graph() hpu_net = HierarchicalProbUNet(name='model/HPUNet') _GRADER_TO_RECONSTRUCT = 3 _NUM_SAMPLES = 16 _INSTANCE_INDICES = range(10) _NUM_CLASSES = 2 img, seg = make_batch(patient_ixs=_INSTANCE_INDICES) img_placeholder = tf.placeholder(shape=img.shape, dtype=tf.float32, name='img') seg_placeholder = tf.placeholder(shape=seg.shape, dtype=tf.float32, name='seg') seg_one_hot = tf.one_hot(tf.cast(seg[..., _GRADER_TO_RECONSTRUCT], tf.uint8), depth=_NUM_CLASSES) reconstruction = hpu_net.reconstruct(seg_one_hot, img_placeholder) sample_full = hpu_net.sample(img_placeholder) sample_local = hpu_net.sample(img_placeholder, mean=[1, 1, 1, 0]) sample_global = hpu_net.sample(img_placeholder, mean=[0, 1, 1, 1])Restore the model from a checkpoint, run a reconstruction and the different sampling configurations.saver = tf.train.Saver() with tf.train.MonitoredTrainingSession() as sess: # restore checkpoint saver.restore(sess, '/tmp/checkpoint/tf_graph_data.ckpt') # sample from the full hierarchy rec, s = sess.run([reconstruction, sample_full], feed_dict={img_placeholder: img, seg_placeholder: seg}) samples = [s[..., np.newaxis]] if _NUM_SAMPLES > 1: for _ in range(_NUM_SAMPLES - 1): s = sess.run(sample_full, feed_dict={img_placeholder: img}) samples.append(s[..., np.newaxis]) samples = np.concatenate(samples, axis=-1) # sample from global latent only samples_global = [] for _ in range(_NUM_SAMPLES): s = sess.run(sample_global, feed_dict={img_placeholder: img}) samples_global.append(s[..., np.newaxis]) samples_global = np.concatenate(samples_global, axis=-1) # sample from most local latents only samples_local = [] for _ in range(_NUM_SAMPLES): s = sess.run(sample_local, feed_dict={img_placeholder: img}) samples_local.append(s[..., np.newaxis]) samples_local = np.concatenate(samples_local, axis=-1)Plot Examples.Plot batches of images, the corresponding 4 segmentation masks, a posterior reconstruction of one of the masks as well as a number of samples.#@title Utility functions for plotting. def to_rgb(arr, cmap={0: (0, 0, 0), 1: (255, 255, 255)}): """ Transform an integer-labeled segmentation map using an rgb color-map. :param arr: img_arr w/o a color-channel :param cmap: dictionary mapping from integer class labels to rgb values :return: """ new_arr = np.zeros(shape=(arr.shape)+(3, )) for c in cmap.keys(): ixs = np.where(arr == c) new_arr[ixs] = [cmap[c][i] / 255. for i in range(3)] return new_arr def make_plot(img, seg, rec, samples, fs=6): """ Make a grid plot of a batch of images, set of ground truth segmentations and corresponding reconstructions and samples. Each example is displayed in a column of the plot. :img: Batch of images, array of shape (b, h, w, 1). :seg: Batch of segmentations, array of shape (b, h, w, num_graders). :samples: Batch of samples, array of shape (b, h, w, num_classes, num_samples). :fs: Font size, integer. """ num_samples = samples.shape[-1] num_graders = seg.shape[-1] bs = img.shape[0] num_rows = 1 + num_graders + 1 + num_samples f = plt.figure(figsize=(bs * fs, num_rows * fs)) outer = gridspec.GridSpec(6, 1, wspace=0.1, hspace=0.1, height_ratios=[1, num_graders, 1, num_samples, 1, 1]) img_spec = gridspec.GridSpecFromSubplotSpec( 1, bs, subplot_spec=outer[0], wspace=0.0, hspace=0.0) grader_spec = gridspec.GridSpecFromSubplotSpec( num_graders, bs, subplot_spec=outer[1], wspace=0.0, hspace=0.0) rec_spec = gridspec.GridSpecFromSubplotSpec( 1, bs, subplot_spec=outer[2], wspace=0.0, hspace=0.0) sample_spec = gridspec.GridSpecFromSubplotSpec( num_samples, bs, subplot_spec=outer[3], wspace=0.0, hspace=0.0) grader_std_spec = gridspec.GridSpecFromSubplotSpec( 1, bs, subplot_spec=outer[4], wspace=0.0, hspace=0.0) sample_std_spec = gridspec.GridSpecFromSubplotSpec( 1, bs, subplot_spec=outer[5], wspace=0.0, hspace=0.0) for j in range(bs): # image ax = plt.subplot(img_spec[0, j]) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.imshow(np.concatenate([img[j] for _ in range(3)], axis=-1)) if j == 0: ax.annotate('CT scan', (-0.2, 0.5), xycoords='axes fraction', va='center', rotation=90) # ground-truth for i in range(num_graders): ax = plt.subplot(grader_spec[i, j]) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.imshow(to_rgb(seg[j, ..., i])) if j == 0 and i == 1: ax.annotate('Graders', (-0.2, 0.0), xycoords='axes fraction', va='center', rotation=90) # reconstruction ax = plt.subplot(rec_spec[0, j]) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.imshow(to_rgb(np.argmax(rec[j], axis=-1))) if j == 0: ax.annotate('Reconstruction', (-0.2, 0.5), xycoords='axes fraction', va='center', rotation=90) # samples if num_samples % 2 != 0: xy = (-0.2, 0.5) else: xy = (-0.2, 1.0) for i in range(num_samples): ax = plt.subplot(sample_spec[i, j]) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.imshow(to_rgb(np.argmax(samples[j, ..., i], axis=-1))) if j == 0 and i == num_samples // 2: ax.annotate('Samples', xy, xycoords='axes fraction', va='center', rotation=90) # grader standard deviation ax = plt.subplot(grader_std_spec[0, j]) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) std = np.std(seg[j], axis=-1) plt.imshow(std, cmap=plt.get_cmap('jet')) if j == 0: ax.annotate('Grader', (-0.4, 0.5), xycoords='axes fraction', va='center', rotation=90) ax.annotate('Std. Dev.', (-0.2, 0.5), xycoords='axes fraction', va='center', rotation=90) # sample standard deviation ax = plt.subplot(sample_std_spec[0, j]) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) samples_argmax = np.argmax(samples[j], axis=-2) std = np.std(samples_argmax, axis=-1) plt.imshow(std, cmap=plt.get_cmap('jet')) if j == 0: ax.annotate('Samples', (-0.4, 0.5), xycoords='axes fraction', va='center', rotation=90) ax.annotate('Std. Dev.', (-0.2, 0.5), xycoords='axes fraction', va='center', rotation=90)Plot samples from the full hierarchy.When sampling from the full hierarchy of the prior, none of the latent scales is constrained to their respective mean(s).make_plot(img, seg, rec, samples, fs=2)Plot samples from the global latent onlyWhen sampling from the global latent, all but the most global latent scale are constrained to their means.make_plot(img, seg, rec, samples_global, fs=2)Plot samples from the local latents onlyWhen sampling from the local latents, all but the most local latent scales are constrained to their means.make_plot(img, seg, rec, samples_local, fs=2)**Numpy Assignment** **Write a function so that the columns of the output matrix are powers of the input vector.** **The order of the powers is determined by the increasing boolean argument. Specifically, when increasing is False, the i-th output column is the input vector raised element-wise to the power of N - i - 1.** **Hint:** *Such a matrix with a geometric progression in each row is named for .*import numpy as np def alexaxndreTheophile(inputVector, n, increasing): if increasing: outMatrix = np.matrix([x**i for x in inputVector for i in range(n)]).reshape(inputVector.size, n) else: outMatrix = np.matrix([x**(n-i-1) for x in inputVector for i in range(n)]).reshape(inputVector.size, n) return outMatrix inputVector = np.array([1,2,3,4,5]) outMatrix = alexaxndreTheophile(inputVector, inputVector.size, increasing = True) print("When Increasing = True: \n",outMatrix) outMatrix = alexaxndreTheophile(inputVector, inputVector.size, increasing = False) print("\nWhen Increasing = False: \n",outMatrix)When Increasing = True: [[ 1 1 1 1 1] [ 1 2 4 8 16] [ 1 3 9 27 81] [ 1 4 16 64 256] [ 1 5 25 125 625]] When Increasing = False: [[ 1 1 1 1 1] [ 16 8 4 2 1] [ 81 27 9 3 1] [256 64 16 4 1] [625 125 25 5 1]]**Problem Statement** **Given a sequence of n values x1, x2, ..., xn and a window size k>0, the k-th moving average of the given sequence is defined as follows:** **The moving average sequence has n-k+1 elements as shown below.** **The moving averages with k=4 of a ten-value sequence (n=10) is shown below:** input | 10 20 30 40 50 60 70 80 90 100 --- | --- y1 | 25 = (10+20+30+40)/4 y2 | 35 = (20+30+40+50)/4 y3 | 45 = (30+40+50+60)/4 y4 | 55 = (40+50+60+70)/4 y5 | 65 = (50+60+70+80)/4 y6 | 75 = (60+70+80+90)/4 y7 | 85 = (70+80+90+100)/4 Thus, the moving average sequence has n-k+1=10-4+1=7 values. **Question:** *Write a function to find moving average in an array over a window:* *Test it over [3, 5, 7, 2, 8, 10, 11, 65, 72, 81, 99, 100, 150] and window of 3.*def movingAverage(inputVector, windowLength): c = 1 movingAvg = np.convolve(inputVector, np.ones(windowLength), 'valid') / windowLength for i in movingAvg: print("y{0} = {1:.2f}".format(c, i)) c += 1 inputVector = np.array([3, 5, 7, 2, 8, 10, 11, 65, 72, 81, 99, 100, 150]) windowLength = 3 movingAverage(inputVector, windowLength)y1 = 5.00 y2 = 4.67 y3 = 5.67 y4 = 6.67 y5 = 9.67 y6 = 28.67 y7 = 49.33 y8 = 72.67 y9 = 84.00 y10 = 93.33 y11 = 116.33Algorithm - Linear Regression !def hypothesis(theta,x): return theta[0] + theta[1]*x def error(X,Y, theta): total_error = 0 m = X.shape[0] for i in range(m): total_error += (Y[i] - hypothesis(theta, X[i]))**2 return 0.5*total_error def gradient(Y,X,theta): grad = np.array([0.0,0.0]) m = X.shape[0] for i in range(m): grad[0] += -1*(Y[i] - hypothesis(theta,X[i])) grad[1] += -1*(Y[i] - hypothesis(theta,X[i]))*X[i] return grad def gradientDescent(X,Y,learning_rate, maxItr): grad = np.array([0.0, 0.0]) theta = np.array([0.0,0.0]) e = [] for i in range(maxItr): grad = gradient(Y,X,theta) ce = error(X,Y,theta) theta[0] = theta[0] - learning_rate*grad[0] theta[1] = theta[1] - learning_rate*grad[1] e.append(ce) print(ce) return theta, e theta,e = gradientDescent(X,Y,learning_rate=0.001,maxItr=100) print(theta[0], theta[1]) plt.scatter(X,Y) plt.plot(X,hypothesis(theta,X), color='r') plt.show() plt.plot(e) plt.show() print(e[50]) print(e[99]) ### Convergence CriteriaCamera Task#import torchvision.transforms as transforms TASK = 'object_behavior' CATEGORIES = ['background1','redlight','greenlight','bottle'] DATASETS = ['A'] import torch import torchvision #device = torch.device('cuda') output_dim = len(CATEGORIES) #number of categories # RESNET 18 model_class = torchvision.models.resnet18(pretrained=False) model_class.fc = torch.nn.Linear(512, output_dim) model_class = model_class.cuda().eval().half() #model_class = model_class.to(device) model_class.load_state_dict(torch.load('classification_behavior_model.pth')) from torch2trt import torch2trt data = torch.zeros((1, 3, 224, 224)).cuda().half() model_trt = torch2trt(model_class, [data], fp16_mode=True) torch.save(model_trt.state_dict(), 'trt_classification_behavior_model.pth')Predict Loan Eligibility for Dream Housing Finance company Dream Housing Finance company deals in all kinds of home loans. They have presence across all urban, semi urban and rural areas. Customer first applies for home loan and after that company validates the customer eligibility for loan.Company wants to automate the loan eligibility process (real time) based on customer detail provided while filling online application form. These details are Gender, Marital Status, Education, Number of Dependents, Income, Loan Amount, Credit History and others. To automate this process, they have provided a dataset to identify the customers segments that are eligible for loan amount so that they can specifically target these customers. **Data Dictionary**Train file: CSV containing the customers for whom loan eligibility is known as 'Loan_Status' ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAlwAAAGRCAYAAACwgm1HAAAgAElEQVR4nOzdTWvjyr8v+q8u60U4kB4YetqTjOLAgnsDa/Jnc2DJOBB71AT2GfQZNT3Yh7jDxZFmYY12D5seSYYV7Ia9z6zBkwWWR4FLTxs8iMF6F3UHenBVqfTgB8VO8v2AoWNJpZJUVf5J+kltCSEEiIiIiKg2/9e+K0BERET00jHgIiIiIqoZAy4iIiKimjHgIiIiIqoZAy4iIiKimjHgIiIiIqrZb2UzWJb1FPUgIiIiOnibvk2rNODapnCi58yyLLZ9IiJKbXMRircUiYiIiGrGgIuIiIioZgy4iIiIiGrGgIuIiIioZgy4iIiIiGrGgIuIiIioZtUCrpkLy7LUz22QTg5upe8vfIRbVCgqqw1/UT5vOGwb65M3T3u4Tc3o1TK1f+njzvZdwQDuDvoesOov+98mIqKXpVrAdXoN8ejBTv4eTEVER) Test file: CSV containing the customer information for whom loan eligibility is to be predicted ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAlcAAAFvCAYAAABn+u7ZAAAgAElEQVR4nOzdv2vjWr8v/rcu+49wIFMYpp0mVRzY8L2B3TwcLmwZB2JXQ+C5xZwqTHEu8YSLLXVhqjPlMJVk2MEeOM/pBtxssFwFvkw74CIG679Yt9APr7W0JMu2HDvJ+wWGiX4sLUlrLX8sfaSxhBACRERERFSJ/7HvChARERG9JAyuiIiIiCrE4IqIiIioQgyuiIiIiCr0m2miZVlPXQ8iIiKig7Tus3/G4GqTgoheAsuy2PaJiCi1yQUn3hYkIiIiqhCDKyIiIqIKMbgiIiIiqhCDKyIiIqIKMbgiIiIiqhCDKyIiIqIK5QdXUxeWZamffpDODvrS9Asf4RaViMpqwp+vXjYcNI31yVumOdimZvRqmdq/9HGn+65gALeCvgcs+8v+94mI6GXID65ObyAePdjJ370JxKdGOrvxSUTzexOIv9qo7baeqdrlEJPeqmW+wGs9TX3ohTq9gRCLZTvqTSCEgBATOAC6jWoCm72Y+/AZSBER7UzxbcHjNr74cXh1O4Z+nSj4NsNHKeDaVOOTgBBDtI+3Lopoxxq4SX503HfwYW9XRhu4EWKDHzYh/OsOZtKU2uUQQgjcnFZbQyKi12plzlXt92Z89aqLsfJrN8Ds7RW2D62InpnjczTjK1qj9tfMj45DFvSP0Lnfdy2IiF621Qnt0hdJ94f0NTKdAb8nv5lD+BdqTsoy10md506XfzcHoZK7tcz5iPNJVua4zLSyV+yLnEfzXG/p0AGoof4u+fcDZmmuoN7WzdP1/EIljzBdT+4DTfjz5d/uVJ7nItC325fzxdw0+Av6Fs5uo393G9G8/87LT9Ryzsz70oQ/kJbLyYEkInp1hIE+eeHbAoAAbOE9CiHEQng9Tyy0+ba/EOLRE7ayrBBCTIQDSGVEf9v+QpnnBPHSPenvwInXc8REqPOX20jKkOrXwrJOQiqnN1nO7yUlEkXUtr9sR3pbWfaJpN0my8ZtMHDM/xYL4bWWfcPYd1px30r7Utz+47/N/ULqY0ldk2VanliIhVg8ZvtaUV9J/k7qmK4j16s3kf5e9lEiopciJ1QqVOpVDLXLj3AAACMM/w6B+Rj4Y5nrkeRsDC9rwHEdJwVlOcEQ7eMoX2R4ac4WiXKw4hyQ4/oyqV7X+xjnaTVw3pPqZxD86Ebb/6OB9MqDIY+MqIzZr1H8Lxv1YwDzMYb3AFpNnB8jbrdxe4z/3XnThD+vof1Xkl8YYvx9BMBG8/cacNzG0JhHZcN7vEEjnr8qNypq4wBOz6N+ez/EeF5DrVROYwj/c9RXTupRLWr1qEd3P+tXe2147+XEAPkqHhHR61XyPVdJ8AKMvo8R/A3UTQP81IVlnaFbUeXCQRPWmw5GqxddVRJmP6N/RbdDktsj/DKgbZ3EwdUsaqf3HRxZVtpuR79mUdD06MUBlnxLeobZweU/FdTpfqYkwhMRkVnpl4g2/oiuXeG+g7NfdTWRfe6jaVmwfpynj6pvJc73+IAv6usgSkh+beex/UX8SD2fUKRNBRjHuUu2rz/U4WCSti+xfH1JfNVp4dtRAKbkJ+VfcX16ddTzXmPSqqP+pHUhInqeyr+hPbnFkLkVAATfol/p9ts6MB1veeVKvS0R/j3Mv3L1cxZfAYi/7Foeroy3TGo4/zMK0eSnu4J+uReXEi2F8C/iq7MtD1+SW9tp/+jiLkkMn/to9oPox0IcTKnvYGvgytfbZQj/wt3qdvXDLN5+0heTW5VK4BTC75se6qihfe0o5YSzBwCAc/1077MjInrW1kneWvj2MtlWn54muHrLJOA4kTb925D0ukxOXyYDy9Ns38sk6k56UXKtsm4rm2APLXk3bzpRIm37acK46SM/rCGTH9yQ2mTgCKfnrNUHcsvK3c5ymt2yc9bLPpyyfl/R+nPLEU4rr55ERM9fXkxUxIpXVFiWBcNkohfv+bb9AG6c7+gEfCEoEVFVNvle4H/cTERERFQhBldEz56UB4boiVj+J8xERPvD24JEErZ9IiKS8bYgERER0Z4xuCIiIiKqEIMrIiIiogoxuCIiIiKq0G95MyzLesp6EB0Mtn0iItpGbnDFJ6boNeLTgkREJNvkBzdvCxIRERFViMEVERERUYUYXBERERFViMEVERERUYUYXBERERFViMEVERERUYXyg6upC8uylI87fcKalRLAzdQxO035XPgI911tOnjhoFnQbrQ21g/KFTr30bSa8Oc7qnSeQ+zLcx9NpU5N+HP2XSJ6IYTBcvJCeC0I9CamxQ7GwrcF4IiJPq3liYW84KMnbEAAEE7wxJWkZ0HpEnF7yWsrC99+Ru3oMPvypIdMP2XfJaJDkhMqFSp1W9B+W99heFeRVh0ra3ncxlBM4ADoNlyUvN5Ar9VxHSeFC5ygfvxEdanIQfbld3XUVi3DvktEz8grzLlq4CZwAHRxN+BNBqLng32XiJ6HyoIrPUdFyenQ8yvk3ImpG+dbhPAvnii34rgOG8Do12yXW6FXJW6//UDJcVL6wdSFZWlXXeR8qH6AoB/lZC37U7x82ofUnC2l31XUbzbty9F6LgI5J61sPlpZ7LtE9AxUElyFgyaOvjexEAJCCCx8G91GMiiH8K87gL+AEALi0YN938HXKaIvlkYXwAidNx+Az9r8XUlu9/ycMUGWKhDCvzhC5x7A7RmsH+cQQmDSA7qf4+AjbevSWoMmrAYwSfrN2zuc3UbzapdDLHx7ufBxG8NHD7a2/gd8ifqVEJi86+Boy2Bm074cDpo4ao8AdHFmjXEuBETgALd31Sbws+8S0TNQQXAV4Gt7BOe6neZN1C6/wGsB3R8BgBrafwkML2vRF8ybDkbJqqc30QAMG97jEO1jlMhzqcB8hgegXK4H0Uo1tP9awGsB6E0gPjUAAPW3UiiUtvVE3G+CGzSSUn5vKsFTsWj9UfsovYp0dgvgdrxFPtLmfXkZDDqYiHif4qtMlWLfJaJnYPvgKhnsFDXU32H56zK5lfDjHCJOSt2r+QwjHGhyL70Oxn6zLhveo0ivXEWfZbBWTZ0OrC+z7xLRM7B5cDX34Q7C9EpT9MtW866OGgK4bzo4CUT6i36/AriNLgAHHy/525eK1FFv5bRthBh/x+ZPC1ZyhXaEWRW33J5NX2bfJaLnoVRwlU0eDeC+GaL+ew1AA1e+DdyeLRNf5z7ubm1475cD8MMszpCYjtHFnsx9NK0zdGHDe9ziFz69EjWc/6m1bQBJjtXwz6sN2tBDHBA1cN4rfq1ArX4iLQ8E3zoYYYTOmyb8edTvug01wT0YFCe1P9u+zL5LRM9J7guzAkcgfmmf8WN68V863xbe43LepCet13OEE//b7v2Hts4knQdA2P4iUzeVujwA4QTZacrnwF6iSIfF2CUMfUFtm/ELOqU2pvYHR0yUMpL+oa2X6TvqfMf3hF3Ut/JesPkEffkf//4PdX+ll34a66XNN/V/9l0iOgQ5oVIhK15RYVkWDJOJXry9tv25j+abIZrJwx1ERLR3m3wvvMKXiBIdqDhZm4iInreDDq6CfsF/4rqLFxQS7Yvyzrc9/OfORERUGd4WJJKw7RMRkYy3BYmIiIj2jMEVERERUYUYXBERERFViMEVERERUYV+y5thWdZT1oPoYLDtExHRNnKDKz4xRa8RnxYkIiLZJj+4eVuQiIiIqEIMroiIiIgqxOCKiIiIqEIMroiIiIgqxOCKiIiIqEIMroiIiIgqVDK4CuFfWLCs5cedApj68Oc7qtncR9Nq7q58ogLhoLls7xc+wvwllb7hTkuWXVimSQC3H2SnWWq/VD8u9DV2L6pTmeNARPRSrQ6u5j6a1hE67yYQQqSf8x8WrEYHs10EP3MfzTcdjHZQNFEZtcshhJjAAYD7IcZ57Xz6FZ17AHAwEQI3p8XlhoMmjtobtOzpGPijoU1s4EYILHwbaHlYSP1TCIFJb/3NbCeAa52h+9SbJSI6MCuCqwDumw5GvQnEJ3Vgb3wSEIGzm1odtzF89GDvpnSi8noOHIzQ+Wa6BhTC//wAp2cDrTrqJYqrXQ6jYGhNwQ/gfEXgpmt8uoEeju1WAzdJQEpE9IoVBlfh4A5d2PDe5wzRpzcrf6kTPW/nuPJt4PYue4t6PsYQTZy/3XUdAoxxXj5QmvtwB+vddCQiouoUBFchxt9HAE5QPy5bnJp/0pQG+CiHxUUg54loOSTLPBdzrlVeHkxStp/Mz+SmEG2udvnRePUq+NbByXXbcMVKz4ValfuU328AANMZ6nk/cEzmMzzo06ZuTn2iujYHYbb/SevIOVRKPzTVN0M9HszHIqKXbrOnBZWBOgl0QvgXH4DPSc7HBCftI7hTOc+kizNrjHMR31KUrwZMXRy1TzARAkJ8AX4MlU2GgyY+4Msyn+RdB0f9QCl7mMz/9LQ3Q+ilaxiuXgUY//RwZbhyG/TP0O1N0n7goIu73AAkv9+k5c2A81U/cO47OEr6Y0PLepq6sH6cp31n4T/g7MJHKOVIjdpHcf9awGuN0HljpessfBvdz/GPmbmPD23AexTpvFH7a0HwGMBN+rwQEI8eHhp8UIWIXjhhEE1eCK8FAThiYloomd+L5waOAJD9xPMXvq2W9egJG7bwHpdlOYFUfOAIpPMnwjGVHZeXKZtoQ2qXmAgnad9xG7T9hRAianPyv9HyxEIv7NETdtxWk2Uzy6/oN0JMhOdnSlZkth840vaSfqx/1L4l12/Sk7ef378mPbUfyscp6cvRutnt2yv2iYjoUOSESoUKrlzVcP6nDaCLccFlfPutdFPE8MRSqatI8zGG99q047qW0G6nv5aXn6dO2KXXq4HzHuKrNAG+tk/w8bKWs2x8G+wa+CIW8Forii7oN+FgDPyet50cp+doapOcQO87Q7RL3+5XJbcFx3+UfKilN9G2LTDMPXZERM9f4W3B2u9N2AC6jZLvy7mfYbZJLY7rOAHwMCvK3Rjt5rUPRCU13nuw0cXdxR26vbwE8xD+xRke/AXEX22UCiFy+02I8a/66luC2ZqirQUvxX1rDdLt+9IPs/ycrflOLyKi56045+q4jWHgIMqVWpEncXoFr9WNczkSAfxSTy0lVwU+pNsIvnUwwgidN0348yjnpavlagSDdV/ESLQmOTA4buNjDxjdI/sErRYgjX7Ff5muysrLF/Wb+Rizt+dbBmg1tK8djLQ8rnDgb/GC0Yf0h07ww/xWqySYq11+hHMf5Uem5j58JrUT0UtW7v5iTt5GL5OFoeVGyTlR0jQpFwVpfoa6DceXc7Li0nvq9p3AUPbad0aJlpK2r7YrqR0+esLW8rAK22TLEU5r2V/M7TWv3zhK+88yb99Ez32Kcp7U9W1/ofaxlicmmfqq6zg9RzpG2fLSYwa1XGZcEdFzkRMqFbLiFRWWZcEwmejFY9snIiLZJt8L/I+biYiIiCrE4IqIiIioQgyuiIiIiCrE4IqIiIioQgyuiIiIiCrE4IqIiIioQgyuiIiIiCr0W94My7Kesh5EB4Ntn4iItpEbXPFFivQa8SWiREQk2+QHN28LEhEREVWIwRURERFRhRhcEREREVWIwRURERFRhRhcEREREVWIwRURERFRhfKDq6kLy7Lij4ugoJCgb6XLNgdhpRUM+hasftHW84WDJqwLH9XWiF6rbdpiKVNX62sh/Ivq+9Szkzkua5j7aFpN+PPViwZ9C+5/+WhaljT2WXCnyRLR+VjO08qduvg//WbBuKmtf+Hjv/vl6kZEz4wwWE5eCK8FAUA4gWlJIcSjJ2xAALbwHnOW2YOFbwsAAi1PLPZdGXo2crrE7tt54ETtFY6Y7KD4V6n0OYvGOdtfjhTR+GFeb9JTl11OX567dPzpGc5m4EjrZ7dNRIcl93uhwOrbgu8cOC2g+9l8BSj41sFJzwFwgvpxdUHftmqXQyx8e9/VoBci/HsI9BzYGGH49w6uJJ3eQARO9eWWFsDd5VW5fThuY/joYdUoEPSPMPxzgeFlLZ1Wu/wCrzVC54129Wnq4gwTZdm4FIxxjkb69wk83wFuz4xXHk/qyfo1tP9aoPn9SLpCRkTPXYmcqzqurh3gvoOvmc4fYHzr4PyPXVSN6FAE+No+wcdPV2i2gNH38Yu71Rz0z9DddyX2Ye7j7qeHL5lgqYb2Zw82urhLg6MAbgOYfGropQDTMfCHNv33G0x6wKj9YcWtvxra107uD1gien7KJbSfXsEzXL0KB3d48K9gGGqUPCw1byGAa1loDny4Ut5COGjCslz4gzhnoR/AnHOi5i3kzrvwMVvnSBDlmY7R7Z2jEX8J4n6IseHLMsnJCgdNQ7tfPd9QItzMMkHcb7LtXy5XnRf3i36g5FJG5Ublnd0CuD3Lr9PUjfuq2sdCyH1dvcqzuj5+WpY7zRsDTLmTRWOAvN3V+UzBtw5G7+rQQysAwHEbX3w7DY4Kx7tZHVen2emNTwvzFTDd6Tkc4w9YInqWiu8vLoTXi3OW4pyQZe7VQnitOMcgcNRcEe3vSS/JfZoIB8jklqT5CUAmFyE7Tc6DiMpL6jTpSTkOj55wWjZzrmgt2S5hbnNqLs2yrcrtddJL8n1WzY+LUfrNsq8s+5za3pW8oEdP2FJZ0TxHTLS6JfVe9kmp/qb8oLReSRny9uT6ablDpeqjz9f7uyl3sngMUI/hQng9W6lH9tyuyneKz0PLFnbuWDIRnlbGwneW20yOVXJ8A8eQw8rcK6JDlRMqFa9TXJAUXOkDcOCo/zYm4koDaTowRYOVPogsB9/s+umyyiCvfWE8esLW1le/QIhWy3SiRy/zpRoFRea2qgYocltfNV8Y+pEhmMoLgDL1gxpkaNte+Hb54CqtW0FQU1BGmfqkdTKMI0pdi8aAuFwlcMnUW6mZtg9l912f72XmKcGVVG/bX+QEV9qPQyI6GJsEV2u856qG8z9t4PYuujXw+QHee9MF8kh0q+AD8FlUm1je8rCIgsLl51MD4d9DjLRF62+Z0E7bCb51MLrv4Ei6vXV2CwBdjFfewqmj3tpmvmr2S2/hquR22PgPsefk+B3XJ2cMwHyM4b227HF9ZUL7dkL4P4DzVQ/znEr5V8xXIHrx1nqJaO3yIxyM0Ln+gCGauQNKOGji7KeHhRiiXfUThPczYy5VrX4C4AEzvjOGKhM9sDHRv8jFIspB/LHq6boZZvfyk2HrzlfV39rAz5k56Xnq4qh9gokQuDHk/jy5XdYnZwzAcR0nAB5mZdPCo+C2/PIG8zFmb8/NOVuaNP+qbXp0IMTsJ2C/rW9eFyI6GKuDq5/yQNbAlW8D9yOcXLe1AUULbNIBMMT4e/Ev7lVGv+IanF7Ba3VxpiS3BvAHYZQQihE618m8AF/bI+C+gyO+SJQ2EPTPchKY48T227Ns8vftOE1cDgd36LY8NdF51XzDD4Tky7/2exP2fQcf5ATuqSvVYblu8GOdZ/9qqL+L/zn34VeWVL1pfQyS8aRoDEAD59rTecG3DkYYofPGlNwe7Xc6vmwg/HuG+u/lguPlE4gm6wXaRHTgcu8varkNSlKtnqshLWf7CyXZFbCF00uSVf8p/qkvK9Rk1kxehpaIK5SkeC33RdmuIzyfCe20nqhL5LW9iNpe1Xwf9BzjQxsr52eSxtV2riaKS9vWchnT/tpLyvuH+PBval1N/a3wpbsr6zYxHK/y9ckeU/PDLqZk/9zjnGzX9woS2oUxV1M605nxLfMgQ08/Xlrdcl4imsm5ChyOVUQHKidUKmTFKyosy4JhMtGLt3nbD+FfHKHzbhLl/6w9n/Yl6Fu4e7swvBj0qURtY3Z9ILd0iUixyfcC/+NmInrVGp+iN6Tv5/9wjAKr4Z8LBlZELwivXBFJNmv78VWp5Em1nn51atV8OgThoImv9eGTBjlBP3qakoEV0eHa5HuBwRWRhG2fiIhkvC1IREREtGcMroiIiIgqxOCKiIiIqEIMroiIiIgq9FveDMuynrIeRAeDbZ+IiLaRG1zxiSl6jfi0IBERyTb5wc3bgkREREQVYnBFREREVCEGV0REREQVYnBFREREVCEGV0REREQVYnBFREREVKH84GrqwrKs3E9zED5hNdc099G0mvDn+64IPVdBX2rvFz7U1h7AlftDP9hJHcJBE5blYjelr1UT+BfL/XWnK5YeNA3H7ABwXCCiJ5IfXJ3eQIgFvBZg+wsIIdLPwrcxah8dyMCvmftovulgtO960LPW+BS19UkPwH0HH5QfEw3cxP3ACQTEp0bl2w8HTRy1D6MVB/0jzK6Xx6P7OT9wOqR6KzguENET2ui2YO1yCBE4ALo429Gv9o0dtzF89GBXWGTQP8Agkp6IAy/+MZG9YnOC+vFutlq7HGLhV9mKNxVgfGun+9n4JCD+aqOWs/Th1Fuzg3EhHLi8CkZERpvnXJ3eRL/qb+9e9gAzdXF2u+9K0D7VL7/AawHdxisMsuczPOy7Dodo7uNDm0eGiMy2Smivv7UBjDBLgys1N0PJy5q60W3EuY9mTq5KlGOSzXNZ5p5IuS6565pzKjYpO+hbsBpdAF2c6dPj/eNVrdeghvZfEzjo4syYSyS1+7iNpO0tXT5eph8o+YzuFHEuUEEuo5z/WLLPROs04Q/cnLyxhJY/pveNNx2MMELnTX7fUvb/wsesaP4zGRfS4zdX9y00HZd4OscFIkoJg+XkhfBaELa/MC0mROAIIJm/EF7LFt5jMnMiHEA4gRAL3xYAok/LE4vMutEy8nYmPQj0Juq6cMQkXVfaVuAs54mF8Hq2sKX525QdLZOUHc/vTczz6NnTu8Skp517pc06UnuP+krSNqL5dtze43lJW4uXmfRMfWK5vbR9JmWW7DPJcsq6Rss+Kv+d1kcIIR49pS8ZS+lJ23n0hNOypTKe37igHL9k+UdP2PKxMm2L4wLRi5QTKhWvU1xQcXCVDHBOILQBKftFYhpwJr1kUI0H9cwnWj6zrjLgR3VcfkEIbeDbpuyc4GrFlw09X4XBlVDbfPngqsx8sTqwF+X7TJl2mtm+ab1VwdWjJ+zcOopnOy5kj58WiBoDOY4LRC/RJsHVVrcFZ79GAJbJrmh5WEhPFQpR/CRVdFsxYcN71NYVN1j5HNZ8jOG9Nu24riWubli2yekVvFZym+TAX0lBlatJ+VfjPWy/kj4Ti/qv5rgOW7nVXyz8e5h5Ak+tIzguENGrs3lwlSR69z6inQRX9zNDvkW+2a8R8K4eP3lUfkBXHNdxAuBhVjSYbVi2UQ3tv5aPpY/aRxxIX5Vl/lVnDwnNlfSZWP2tDdwPMc6UYZd+CrJWPwHwUFwPjgtE9MpsFFyFg2aU6N3ysEh+gZ5ewWvpCb8BfGWA6WKcPM4+93F3a8N73wDQwJVvo9tQk06DQZkXETZw3gNG7Q/pusG3JNm0CX++TdnJlwcAhPAHUTJyMmg2PsXvQaJXpoGbwNGm1VB/B+DnLG5XAb62R8B9B0dbvVBTClymLs4q6TNxjS8/wsEInevlOsG3DkbyDyYAhUHI6blWhrbfL3RckK+CBQMfIccFIpLl3l/My5WIP0ouQ0rPY9CTcx3h9PLLmPSy28gkrsaJpWoZasKw42fzRDYuO50Wlxc4wm4ZEnHpRUi7hNYWTHmHas6Vvo4jvIKE9kzbMyVRC0Mb1epgatfGhOxcWp/Vc8LkeXltPXe/c7Zx4ONC9vip9Y/awnJasg7HBaKXKSdUKmTFKyosy4Jh8laiNzefYLJpTgPRE9hF26d8HBeI6NBt8r3A/7iZiIiIqEJPElwt/7+xLs4O8f8jJKInx3GBiF6qJ7stSPQcsO0TEZGMtwWJiIiI9ozBFREREVGFGFwRERERVYjBFREREVGFfsubYVnWU9aD6GCw7RMR0TZygys+MUWvEZ8WJCIi2SY/uHlbkIiIiKhCDK6IiIiIKsTgioiIiKhCDK6IiIiIKsTgioiIiKhCDK6IiIiIKpQfXE1dWJZV8Cn4X+znPppWE/58J3Uuts9t04sW9C1Y/dxWv72pq/WrEP6FheYg3N02y3iuY0HmeO5O0LfgTtVp4aCpHqd+ACCAX+n5DOBa2W0/rQBupl9E9ZL3X66jemw2bB87bFtPc+6eYEwxmbr7H1NeA2GwnLwQXgvC9heZZSY9W3iPhpUfPWEDAsiZT3TAcrrE7tt14AgAAnDEZAfFb49jgZnpuETTMucyOce9qs7wRDiAACCcoKIiNxE4uduf9CDQ8kS21QghxEQ4ufNW2FnbeqpzFx+bisssa+HbBeeFdLnfCwU2vi3Y+DRE+9gw47iN4aMHe9OC1xQOXF6lop0L/x4CPQc2Rhj+vYNffac3EIFTfbmlma4+lPOax4Kgf4ThnwsML2vKtM69g4m4QUNe+PQGQkxQ3Vlu4KbS8gDMfbhrXtUIfgDnp+Z59bcrzv67OmrFS5jtqG093bkDGp8EJr0KC1zj3NUuh1j8OcTRU181e0U2Cq6CgY+DuKg49/Gh/bDvWtCLF+Br+wQfP12h2QJG38eH0f4rFPTP0N1kvdc8Fsx93P308EUKrDD3cXcL2P6V+uWcauDmk3nO/oXwrztY7ygGGOM8Z1+fmVd27mqXH+Hc3vHixI5sEFyFmP0yTE3vUav3wJfT49yHuY9m4XJ6Dod+7z6aFw6asN50MMIInTcWrIt4kDfkWej3z7P3/l0E8nYYzZNsOka3d44GamhfO8D9EGPDgJTkT8jtTW5rq+YbSjTk06j9Qc6d0Nv5cl6Uu2X1AyV/Kio3Ku/sFsDtWYk6yZ54LEiXjz9xn88bC8JBczku5ByjbcaC4FsHI+3KS/j3ECMAJ/Vy12OSbfpJveLcHtOYl8qcQ316cjyX5SyXyyl77qNpHaFzD4zaR8o5UY6ZdjwxnaH+vpqAo8zxz2tb2flqXc3HWVv3Kc5d7r4tpyf9Nuhnl0unyed103OHBs57I3S+8ftuJ4rvLyb3n7WPfq82cGL0kD0AACAASURBVKR71Avh9WxhS/fCF76t3sN+9LLz0zLjPIL4PvSkJ9+Tjual+Q2Bo95zN+St6PeWo7pEOQrJv5V19DLpVcl2iYXwWnJ7UNvncpll/0jaZ5RTYQvvcdX8uBilH5nyaaJpyd9R+43XN/UpOGKi92G5X6X9Il6mMPdj32OBltsU59ykx0brt2nf1vt+ZWOBOQdNLlO1PJ+Ic4W+/KedaRNCrBjztOM18e3C46C3mcKyM39H+5NfNyEmfnHeTnFuz0Q4cVmljn+JtmWqq1y2KWdQqesuz53eZgJP2K2i5bXylP3X+/D65y7dZ+ZerZQTKhWvU1yQaQDRkxCjZZQGaRzo8gbUiXDKBDNpAmNBcJVOU7+g1M6ifpGsGuzpdcl0okdP2NrgEwVFeuK5KUCRB7xV80Vm8DQGUyWSX9NEWfmLSNu2OqiWD64OYixIf0QVBRXmQK26scBUXtEXtHn/M9uUGca8SU87ByuCzLx6GsfTnGArE1BL46tXEKyk+1ciuDIeC+X4r2pbxXUtPM7K9nd37kTgaMdC73crgit9PRQFV6vOnXwMD/UhmsOxSXC1wW3BBs7/lP6cjzG81xY5rpdPNJzPVtwnji+XXgNfxAJea42qGsuuof4OwM/ZYeSK0EELvnUwuu/gSLoUf3YLAF2MV94+q6Ne2F5XzVfNfo0K5ye3AMZ/iCdKjn/isSC5LfjjfP3k4icaC2q/N2ED6P4ouNXSqqNeWEremBdgfKstelzHyVo1XHc8teE9Cgghf6Jk73AwBn7fKB19faXaVn5dy9jtuQOCH3pWY9z+1hDdFvwAfBZY+Kt61nbHg7azUUJ747KNGkL4fR9h3LkfZhsOT8d1nGCEmTGpLoR/cYYHfwHxV3v9p0riuhk7y6ZPqdArEmB862Ai9AEqGjQLB2EAwAyz+6IcjlXzVfW3dn4gMHVx1D7BRAjc5Dy5tQtPNxYEcN90cBIIiE2SiisfC6LAOLOvx+dotgDcnm347qmiMS/a5ujXbJOCV5SdJ39sHv+q49z0lKikVj/JzVHEdIyHt8WhSqpU28qra0k7PXdx/72fYeOzN2ji7KeHhch5Ojdj9fEIZw8lAkbaxMavYgj6Rxi+PUcNDZz3gFH7Q5pEF3xLkkujxLpa/QTAQ3qi1fnR+t2G9vLE/jL5Lh1M9F8v0i8X9amlZFsNXPm22lnmPu5ubXgVJWHSyxX0z/BgfHIoTmw3DcK347Qdh4M7dFserk7XmC/1k3RK/IVS+70J+76DD/Lj1lNXqoPUxzK/kotIv6DnPvw1v1iecixIv1ynY/XpxryxIP0yq3osiI5ZNtCpof1XdFWt29j8BbDmMa+G8z/VfQgHd+jG23KniI/D8ks1Mz+3bEC5kjr14c+jY9ZtqMnjwcBHOB9j9vZ8dYB2eg4HI3Su9UR4F1YD+HhZNqxd1bYK6lpyC7s9d8mVsS7OkgT1+OlE3J7FSeta8JyZD6k9hxh/l69kr3HupLVmv0a80LArufcXpbwG80e+r68muzq+nquwar6cJ6LmUiiJji1HOEk5vYnIJP0qdTYktxZOd8REuk8u14Fej6hLmJPAE2q7UROu0XOkXAc5l2HF/EzbVXMm9ETuZZ/Qkr+TtttLyvuH+PBval1NycOmBHBhrNv+xgJlunQco2OjjgXGBOnMudtyLHj0hF2Qr5JpJ4V1yZmeGfPU42D7Xianqmh+6bLlpHXD+Vj4zhp5qaaHIdTjVu74b9Z28o5zYY13dO6UvtTyhKfnVBXNV46JLZyerWy/7LmTjla5HEeKvxfWY8UrKizLgmEy0Yu3edsP4V8cofNuknPbatV8eo6CvoW7t+pLRImeg3DQxNGvjxyPStjke4H/cTMR0YYanxZofj/i/9VGz0o4aOLoexMLBlY7wytXRJLN2n58VSrJr+jpV6dWzafnLhw08bU+fNKHCYg2MnWjJ245BpW2yfcCgysiCds+ERHJeFuQiIiIaM8YXBERERFViMEVERERUYUYXBERERFV6Le8GZZlPWU9iA4G2z4REW0jN7jiE1P0GvFpQSIikm3yg5u3BYmIiIgqxOCKiIiIqEIMroiIiIgqxOCKiIiIqEIMroiIiIgqxOCKiIiIqEKrg6upC8uy4o+LACH8vo+wVPEh/AsLzUF26aBvpeWa5qclDJrS9g2fi7J12UR+/Z9COGjueP9oFaX9XfgIEcDtByXXDuBaFtxpplT4F8s2nJ3/tPbazuY+mlYT/nz1okHfgvtfPpraGLA8fupxtfRypy7+T7+pjWcybf0LH//dL1c3IiJZcXA199FsdOEEAkIICHGOsXWEzs8yRYfwL47QuTfMGTRx93YBIQQWvo1R+yvyvq5ql0MIMYEDSPVIPgt4mGFWpjpry6//UwgHTRy1R/vZOEWmLo7agPcYt7fPwAfrDN1SKwdwc5YN+keYXUdlTnpA9/P+Aui9trO5j+abDlZvPQp67t4ucPO/2hjG4wZgw3sUuDlNlquh/Vd0TG1/ASGGaB8vSwl+AP/r0zAdd4AuzpRAOVpfBE60/l9t/NunL8D1/n5gEdHzVBhchX8PMWp5uEoHrwZuxALeuzJF19D+awGvlSkV4+8jnNRr0VKXQwhxg8aaFV9uY9N1y5Rtqv/TqF0O4y8A2pfgRxfofVx+QR+3MYwD/dUauDEuG2B8a6Mel9n4JCD+aqNWUZ3Xtdd2dtzG8NHDqq0H/SMM/1xgeLk8SrXLL/BaI3TeaFefpi7OMFGWjUvBGOfSWHECz3eA2zNj4JSMT8k40Px+tPcrjET0fBQGV7X6CXDfwVdlUKmh/WmbL4MZZpVcDQrhD8reniFaX/2tDdzeabeFGrj5tEU4P5/hYduKvSZzH3c/PXzJBEs1tD97sNHFXRocBXAbwMR0fqZj4A9t+u83mPSAUfvDilt/NbSvnb1eYSSi56X4tuDpFbwW0G2Y8hMkmbysouWiWyVRmdvkm8ww+yX/HedL9AOlPu4UcV6HIb8rzfeIcmPK5XDpeR3J/kplSPuV5uyk5arrq7+apXkX/o5ud1JZtcuPcDBC501xu8jmZRUs96aDUVJmbq5RfhuJtuUikNtbP4DedkKtrOYglPIcV+UR7bKNy8drdT5T8K2D0bu6+cfccRtffDsNjsLBHR78K+OV7GBWl67ALzU+LcxXwHSn53AyPzSJiHIIA33ypAcBxJ+WJxbyzMAR6E3SPxe+LS2zEF4LwvblNSbCAYQTmLacJ1oH+ifdbrQdfXpa76Q+gSMAR0zSfyfrxNMePWEbyl3W3/x3un6yTel4CDERjlKeLbxH87FQ1n30hNOys8ebdirbJcxtK53r20r7Vs+/oa0/esKG3AZ0+W1k4dvZNpu242QdeZtqv4mmZdtscZ+tto0rfVAshNezC46HafzQxfvYsoWd21cmwtPKWPjOcpt6vw8cw/hUpi5E9BLlhErF65QvSP6SkQbHliHoSQfLaoOrzJeUMsDHdckN9EQ8sEsDuf53so62f2n9lS8GqR5y3fQvT3mgVgI67Qv70RO2VvakZwhmaafyO5EUqKTnJCfoT8/jBsFVURsRevs0lRdtc9nn9L9Fps0q/WSXbTzuT8rxMPRB/ZivHCsKyxBCBF5mnhJcSfW2/UVOcGUKKonoNdgkuCq+LTj1pcv2yydpoOQ5mJ7iU5/S2YnjczTfVl9srX6SOy+cGbJljus4AfAwC5f1ao3Q+Rbfqvmh3Y5oeVgI7Xh9akQPD2hF198yoX2fwoEv3Spq4CZ5yky5PWQvnyZMP1s+ZJHTRioTt1mTXbZxzMcY6vmWx/WVCe3bCeH/AM5XjUenUv4V78cT0ZZWvOdqhuHfWgZJnIclSwfdJ1VD+7IBrPXeodXC2QPQqqNu2mL9BEAXY0PehfJ00bUTJUJPx5i9PVfzRe7Nr46Iyn7AjO/UOSDDzLmO8rBko+rPWU4bqcx8hgcsn1iU7bKNZ4K0leqot7YcX+aG+uVI86/aphdohJj9BOy3ppGBiEi18iWio/aRmpA6/YrOvY3m7zUkg+yorT6mrP7iB0a/skNtNQFZCP/iLPsU0FpGUgAZ4Gt7BOdafRoyrX+a4L9Mfg0Hd+gqr6tIlhuh05jhXH7K6fQKXquLMyXhOIA/CKOEWYzQuU7mRXXBfQdHfJHo3nQblta279CFg/NTAGjgyrfRbaiJ2cFAPV/Ztl4QkBW1kS2Mvo/T8oJvHYzkV0wAy4Bol20cDZxrT+cF35IEf1Nyew31d+bxo6zw7xnqv5d9tjl5AtEkesp5GWASERUovL8Y5yqoibTZ3AZ1fpLfYUgEzuRjaLkdBnrZ2Y8jJoZtZZJ/lW3H+xDnajg9W6u7MNffND0vJ0pL9F/S83T0/JnldE/PGaOdk7vEwvfERGgPdBjarDrfnExu+4tsWy5IwDa1kUybVtoLhBNkt5nmXPWc5bLSdo1J8rts41rZjr8iB82Qi7hkyPnU8zB7et21upnqb8q5Chz2RaJXKidUKmTFKyosy4Jh8sszdWE1HuA97iBHbOrCxY309mh6Dl5e24/eFP/gLwwv1tzSE7XxoB+9nb3y+pcW/W8Ns2vB/kz0Cm3yvcD/uHknDEm+RC/K07XxxqfoDen7+S9oosBq+OeCgRURlbbn4Ep/WWH2s7MBderCanSB3HyP9S1f0vgBeL+//9KEKLL8/w0zuZOblriXNh49qfwFH578v6BJ/h/I/V01I6Ln6HXfFiTSsO0TEZGMtwWJiIiI9ozBFREREVGFGFwRERERVYjBFREREVGFfsubYVnWU9aD6GCw7RMR0TZygys+MUWvEZ8WJCIi2SY/uHlbkIiIiKhCDK6IiIiIKsTgioiIiKhCDK6IiIiIKsTgioiIiKhCDK6IiIiIKrRWcBUOmrAufIS7qs3UhWW5CKRJQd+C1Q9yV9mNAK5lwZI+7vSJq0AHZ+dtMdP+Q/gXFpqDnfW4teynLxIRPT+WMLzUx/yunwCudYYuACcQuDmtuCZTF1ajC8DBRNygUXHxmwgHTRy1Tw6mPrR7ue+5mvtovulgBBve4xDt44o3fIDtn4iINnv/YfkrV9Mxuj0HDoDujx38ej29gQic6stdQ9BXr5oBAFp11PdRGToo4d9DoOfAxgjDv3dwJWnv7T+Ay6tSRESVKBlchfA/P8B7f4PzHoDbcTYIee6mLs5u910JOkwBvrZP8PHTFZotYPR9vLtb43sS9KOr0kREtL1ywdV8jCGaOD8GGu892OhibMhBSnIywkHTmKu0an6WKeckmpbmQ0m/tuVyLUtdL5rnIpDzqeJ1g74V35Lp4swqyiuJt90P4vwY0z7k1y8zz5BfYyx77qNp2CcAyrJ6vhpVZDpGt3eOBmpoXzvA/RDjeXax6tt/1FbVZdR8wGwbN80rardReWe3AG7PCuqk90X2BSKiXMJAnzzpQThB8tdCeC0ItDyxENo0RB/bX6TrAbbwHlfNj4sJHAE4YqKVmSyfTEv/DhwBxHV79IQtlbXw7bSs6N/J9uPyA0fZtrx8ule+Le2nug/oTZb7oC1jrJ8+Ly3PEZOisiGVrxyf+O/essZqfWkT2S6xEF5LaqNiIhzpHC2Xqbr9x9uB3PeiacnfUZuN189t/+XbrbpP+jGQ686+QESvR06oVLzO6oImwjEFHfKXQjTVMEBHXwbKgJw7X2QHTNMAXWLATAdiqaxM8FQQjCn7aQoi8wbxovpl9i2pg/qFU/gFoQSE2pdQ+tHPC60j04kePWFr5zRqX9q53En7NwRTuQGQXj+5jqva1qrgarlMJiBiXyCiF26T4GrlbcFwcIducrss/hy1R0CpxN466q1t5mt1mT2sqGt0W2T8h9hLcnBR/Yzzjus4AfAw2zyDxwkEhJA/O3iS7RULvnUwuu/gSGr/UW6e+da4qtr2P/s1Kpy/7/av1IV9gYhesRXBVYjxd8B71ActgUmvTGLvDLN74KRe23C+qlY/Ae5nmJlmTt34tQk7eE1ESUX1q9VPkPeFXHb/Tbb5MqJVAoxvHUyE3v4X8Fplnpqttv3X39rAz5m5zx1A+5exLxDRa1YYXIWDD+i8+2j89dd478G+7+CDnlQqPUkYDu7QbXm4Ol1jPh4w05KFR7/iIfr0HA66OJMTY+c+3LQOy3WDH+s9+xQN+AAQwh9smApbVL/Tq+gLueGu2P/SNUb72sGofaQkEYcDn4m8FQn6Z3jwrwzvnIoT22/PssnfO2j/SdBQ+72Z7XNTV6rDpu2/hvq7+J9zH37BFbm0L67CvkBEr1ne/cVl3oaevC7S5FR1fpIn4aSJuKb8qdz5SpnZJOBl/sVEWl8uQ53u9Jy0rC//qSW0x/kd6bJpQnyybX0bEE6QrY8xUT63ftIxyBzXEmVnjk9corKcnBNDm4i6RF7bi+jHPJpfdftX21F6XrW2u2xDee3/H+LDv61ut+k0Y56Ufjz+xb5ARK9GTqhUaI03tK8Swr84QufdBOKT6f3Sq+YT7d9mbR9g+yciepl2+4Z2IiIiIlqpouAq/lV+j+hFhJmXcK6aT/Scsf0TEdFShbcFiZ4/tn0iIpLxtiARERHRnjG4IiIiIqoQgysiIiKiCjG4IiIiIqrQb3kzLMt6ynoQHQy2fSIi2kZucMUnpug14tOCREQk2+QHN28LEhEREVWIwRURERFRhRhcEREREVWIwRURERFRhRhcEREREVWIwRURERFRhUoGVyH8CwuWlXxcBADCgQt/vtP6lRTAtSy4U2nS1E3r+aQ16cvHyYLVf+oaUPWi9pWe0wsfIYCg//Tty8jQ1oP+AbW9PfVFIqJ9WR1cTV1Y1hE67yYQQsSfc4wtC0fthyeo4ioBXOsMXX3y6Q2EuEHjiWvT+CQgHj3YAJxAQHx66hpQlcJBE5Z1hgd/sWz/n4EPloWz233XDlH/bGRaf9QOD6Xt7akvEhHty4rgKoDb6AK9iTZQN3AjFvBaO61bSQ3ciAmcPdbAfAXDRv14D5Wh6sx9fGiPYPsLDC9ry+nHbQz33OZSpzcQwX5rcjBX8IiIDkRhcBUO7tCFDe+96TdnDe3rE8wO4rbgHk3dw7iCQZULvnUwgoOPcmCVauDKB9s/2z8RUUZBcBVi/H0E4CT/CszpDW5OpTUGTSXfSMmBmvtoGvJWACxzMuRl5HyRVfNNtR801W1k6if92i6oW7SOi0DOu4m3HfSt+JZMF2cr6lRUzsr6rTi2+WVLuXLasdDziJRzRQACjG8BtOqo5yxRu7xBO+0b5rzEdK52/pqD5dlI8qPkZeTzsWp+VlQXeRuZ+kltr6huVbR/tS/G9egHcb827U9+XU31ZV8gooMjDKLJC+G1IABHTEwLaRa+LdDyxEL+GxBOINKybD+e++gJO56XLAdguX7gCCBaftX8yEQ46bakMvX6pH9Hy6M3KV+35DgEjgBs4T3K29KO0aMn7MwyJcox1q/42OaXDal89fhEf0t11ur7mi27RHwOpOOeT2tDet8xtgdHTNLloLTnSS85d6vmx5sLHKkNLtfR65P+HbcPJyiq2xbtXz4ySl9U9ydp35OefJwL6irYF4jo6eWESsXr5Be0TnClD1hiuX5PWzsd7PRgSN2OPOCumm/afjZYKTFglqlbwZeRyFlmdTlF9Vt9bFfVMSkj+cJSv4SyX96v2UbBlRLcxKQgXRYFRnK/MvUT+Xytmm/aviFAKbEf2bpt2P41al/M7o8yv7Cu7AtE9PQ2Ca4KbgvWUH8HAA+r80rmM2SfG4zX/zmLLsEnt95+nEOUSAauv7W3mr+6fvL89epWuaL6lTm2m+jJT39Gn6Ext+i1qqPeAnA/w2zFkuHMcPaO6zgB8DCLb4bFt7LGf4gSCejxtjeeX6J+8vy16rZbhXVlXyCiZ6Iwob3x3oONETrfcnKJpm6UnxF/kXR/GJZ7V0cNAdw3HZys8WqC2a9RvO5m8xXHdZxglBMkrl+3yhXVb+Wx3dA2X0avQg3tawdAF3cD85EKB024U6BWPwHQxdiQq3NSrwFTF0ftE0yEUHIU880wu4/X3Wi+tif1k/wgce267VZhXdkXiOiZKH4Vw3EbX3wbuD3LJqtOXVg/zuNfeA1cxculyaBzH3e36pOGya94TMfZ91LJX06GdVfPl8pPpIN0A+c9oNuQk4xD+P1lYmtx3fJFX6xxeYNNH0gvql+5Y7uO2uVHOPcdHMnndO7DZyKv6vQGkx4wah9pyeFRYPUBX6KA5PQKXks9f+HgDt2Wh6s0YFleAQ5+GFrY7bhg3RLzDVeYR7/iEOX0HA66ONPOt5vu04q6FSjd/ktcAVxdV/YFInomSt1fjPNHgGwyqkzNX1BziJb5HBDoOVE+i5K07ghHWiaTP5U7f5KWpZaXzR9R6iCVkVe3f/z7P9RytOOwTAhe7q++DfQm2fqYyimoX9GxXV129vgYz2mpxO2Xz9glpFy8zHFMacnayvFUz4HTk5Osk5whR1ommz+VO1+pWzYJftlP1Tosy8iv25f/LNFutfavU9vnf4j/q9XN3Ffz6moqk32BiHYrJ1QqZMUrKizLgmHyzoSDZnxrwvwW51Xziary1G0/ekVA/D8gGG9Lr5pPRES7tMn3Av/jZiIiIqIK7T24iq5KjRC9iDD732ismk/0fMVXpe5hzmtcOZ+IiA7RQdwWJDoUbPtERCTjbUEiIiKiPWNwRURERFQhBldEREREFWJwRURERFSh3/JmWJb1lPUgOhhs+0REtI3c4IpPTNFrxKcFiYhItskPbt4WJCIiIqoQgysiIiKiCjG4IiIiIqoQgysiIiKiCjG4IiIiIqoQgysiIiKiCuUHV1MXlmUpH3f6hDXLEQ6asC58hPuuCL1o4aCptf8m/Pm+a/UShfAvLDQHVfXoqDyrH1RU3r4EcA9kzCWi9eUHV6c3EGIBrwWgN4EQAjenT1cxswBf2yPgvoOvHHRoh2qXQwgxgQPA9hcQYoj28b5r9RLV0P5LYHhZq6CsEP7FETr3FRS1VwFc6wzdfVeDiDZW6rag/ba+63qUMx2j23PgAOj+eD6/TMOBy6sez9hJvYov/ldq7sOt7KrUqm3U0P4r/kH4rDVwEwf2RPQ8PaOcqxD+5wd4729w3gNwO8azCK/mPj60H/ZdC6I9COFfd7Db1v8U2yAiWk9lwZWeo6LkCsx9NOX8FTlnaurG+SxxroQ+Py1jjCGaOD8GGu892OhinLk1KOVbSDlj7lStQya/Q8svS+en05N8mygPQtm/gvqHgyasNx2MMELnTc5+0QsgnXvLgmW5SuCv9w25/UXzXARS2yrMFyroS/llFfUtaVll/nJ6Ut+gr9cvWqY5CKV9jPvK3EfTim7RjdpHBTlrem5RQZnGY1GwDX0MWB71nPMlTY/3Ma1D5jhHyyhXpbcd53LrK5+PJvx5APfZ55QRvXDCYDl5IbwWhO0vTIulFr4t0PLEQv4bEE5gKOPRE3YyL3AEgPhjC+9Rmy+Z9ORpUZnyNtNpSXm9SboeIC0bOAJwxCRZzfi3VN/AWdYtKlE469Q/sz4dMrVLSOc6l95HknYYt6lHT9jS+Y/6RjQv6SfRJ16+sL3k96X8sqS2mdkf89/LfhX9Lff/SS/pW/Gy8SdaJt733kQpL3/8WJYR1aFMmeYyssdfGwPSfVpxvgzbU8a3R0/YyliSHNstxzmtnUx8W5m/HP9WHQ8iqlpOqFS8TnFBZYIr0xdQzgAgDTL5wYepvIlw5ABIJF8m+pfQioExsz3z/imD8ar6rT2fDtnawZUenAtR+ANBCX6EGmwt1y3RXgx9aXVZaiCS6RtpuWowZg6uNpufZQ7w8ss0r58JlvLGgJXnq0RwVapNrDfOTXracdK2owaIRPSUNgmutr8tOJ8Z8h1qqL8D8HMWXfZOLpf/OE+fwFpHOLhDF12cSZfcj9ojACMM/97mRtsMM8OTRfW3NnA/w2yLkul1CGeGbJ/jOk4APMyitpncRhr/ISCCLdOUt+xLstmvUXbicR02Rpi90AcwypyvQsdtfOwB3YbhFu7G5ybA+NZcp0TjvQf7voMjvhaE6FnYPLhKntCJBwHj03vv6qghgPumg5NAQHxqbLChEOPvgPcoIIT6mfSA0ffxFnlMddRbOWW06jiQZyTpEE1duFOgVj8BjPl/8VOGUxdH7RNMKnmVybZ9SRX9iBhinPmitlF/oa+dWHm+Smh8isafhW8Dt2dxgLXNuYnHoV8FP+eO2xgKEQdtI3TeMMAiOmSlgqtspw/gvhmi/nsNQANX8SCTJmHOfdzd2vDeLweZ9FfhdLzW+1vCwQd03n00vmMo+TX3YeNHvWtoXzuAUkb0Li3nuo0akPklH11Fi365lnrB33EddlLygAntz1HmisbcR7MBnJ8COL2C1wK6jWUSezi4Q7fl4SoNph7S9hP82P7tRZv2JV3t8mP0RX29bJfBtw5GvaS/aV/6cx93t5ACilWi9aO6+oXBQKmrRltuA0CJ86VddZferXd04SOc+2jG+167HEYBlmk/1jo3NZz/qY6h+jgT9JNgiq9pIHoWcu8vKkmYho92/19NqFVzRpa5JhDoOWnSqt37D20dNaH1/2vlby9Tv9YX8UVLaM8k+ZoSSw1l5efKQNi+V5DQbkrI1ZN26ZAlXUJtO4aPkgOkPUyhtFW1TTi9ZZL5l//U2mecZ5PXDoXI70v/+Pd/rCjL1Daz9SvKk0TLE15BQrtSt/gYpNOMOVN6Gf8qVWbuMen9K/NQizHRv/B8iTTfKVnH03KunJapzM3HueRc5I4zQohJzxZ2K7sOEe1eTqhUyIpXVFiWBcNkohePbZ+IiGSbfC88o5eIEhERER0+BldEREREFWJwRURERFQhBldEREREFWJwRURERFQhBldEREREFWJwRURERFSh3/JmWJb1lPUgOhhs+0REtI3c4IovUqTXiC8RJSIi2SY/SJMpUgAAFxFJREFUuHlbkIiIiKhCDK6IiIiIKsTgioiIiKhCDK6IiIiIKsTgioiIiKhCDK6IiIiIKpQfXE1dWJalfNzpE9ZsQ+GgCevCR7jviuxIOGhmzov6acKf77uWL0EAt/A4W7D6wb4rWSjoL+vaHOg9Itq/vfXpqQvLcnHYR7BICP/i8NsAEe1HfnB1egMhFvBaAHoTCCFwc/p0FdtMgK/tEXDfwddnEAhuYvZrBCcQEEJAiAkcQPo7Pl+0vfkMDy0PCxEd24VvA9LfInD2XcNC4aCJu7eLtO6j9lcpkAngWmfo7rF+0fhyg8Y+67CxEP7FETr3+64HER2qUrcF7bf1XdejGtMxuj0HDoDuj+fzizIcuOWvNr31cJUb5NbQvm5WVCtqXrdRy5t5egXv7VPWZh0hxt9HOKlHta9dDrVApoGbODCnNcx9uIMQQA3tv/hDhojyvaCcqxD+5wd4729w3gNwO34etxzmPj60H0ov3rgs+MIHgNM22sdb14qO22gXXqmtoX15qNddZpjxqkrFQvjXHZTvqUT0mlUWXOm5QEoux9xHU85VkXOipm6cJxTnMOjzy5qPMUQT58dA470HG12MM7cGpTwJKafMnap1zOSnaPln6fx0epLntMzTSfe/YP/CQRPWmw5GGKHzZsP9zqPUWcptSeozcKW6bHhcCGmbyhyj5Jj66Xx3isL2sMyRWpWLpG4ze36jW37dxrq5klqemdYe5RwutdxoveYglMaB4tw/PTcyWs9FINehIJ8pf/micaRo/wr2Ye6jaUW3AUfto+y+6X1GqWNUr7WuThPR8ycMlpMXwmtB2P7CtFhq4dsCLU8s5L8B4QSGMh49YSfzAkcAiD+28B61+WuY9OR1om3KdUqnJdvrTdL1AGnZwBGAIybJasa/pf0JnGXdoxKFs87+ZdZfa6+X25IFTrp/QkjnR65POn/D4/JC5XSJTBuPpwqvZTr38jGV5he0h9z+kq2JNj/ZlnxuctqFQl/G/Hfe+Z/0knnxcvEnqldcp565tSTjQ1J2+jek/SjoF/nLy8d7nf0rsw/RMtnjrvWZ5Hg9esJWjt2mfZyI9i3ve6FwneKCygRXpoE8Z3CVvlzyg4syXwymOqhf/NEArA9o2XplvjSV+pj3XxlEV9V/7fnr7nfOsYf+ibdh3N66x+XlWiu4UoIlPUAt6gNF7b04MDEGuZkfJOsHV/n7l9NW5MArE3jEfSRvHwzbi/qrtF+PnrAL2tvq5dU6rd6/VfuQE1zl9ZkNfyQS0eHZJLja/rbgfGbIQ6ih/g7Az1l02T25tfTjPH3CrUrh4A5ddHEmXfI/ao8AjDD8e5tbWebclfpbG7ifYbZFybu2fIIw+QyZi7UL8hOEyefT7nKxwpkh6+e4jhMAD7PN2/rs18hYro0RZvHtrOi24Afgc/z05DNSZv8qddzGx97y1ixf2UD0umweXCVPzsQDu/HpvHd11BDAfdPBSbCrL50Q4++A96gHEwKTHjD6Pt4ij6mOeiunjFYdh/wM5TZftLSGJw6ya/UTwJhPiPTpwE1EPxiGGGcCDRv14yh/6Oynh8UzDdJX7d8uND5Jr/G4PWOARfSKlAquRr/0r48A7psh6r/XADRwFQ8eaTLn3MfdrQ3v/TKYSr/sp+NK368TDj6g8+6jccBvvPdg33fwYeNE7Bra1w6glBG9S8tJHtPXfv1GV9GiX6ylEomP60iuAQSDzRLa1UAqqvOofaQl1/rP4+nJQ6YHUqdX8FpdnGmJ0f4uE/9Pr+C1gG5jmcQeDu7QbWVf0VEmwE6WqV1+hIMROtfLfQm+dTDqSX0r3f/oVQ9be8LAtNT+FYp+aAEApv7q5PS5j2YcTNUuh8/uSh8RbSn3/mJePgn0fIuImmSq5kqkydGAQM9Jk0ft3n9o65gSS/Mp5a7Kh2l9EV+0xO1MYqwp4dhQlp5HIdfD9r2ChHbT/i2nrZOfodZ91fmIt2Xcv2xCe+nj8gJlu4R6zrLnSZ/viEnOQwKr28PEvF6GVn4mN06vjy6vn2n7Im9fSrwHbOH0kjbyT/FPrazCfimyCen/0tubsq1sv8i0z8zy6+5fdnnTPqTTev9a3WcePeG0DIn3RPTs5IRKhax4RYVlWTBMJnrx2PaJiEi2yffCC3qJKBEREdH+HXRwpb+08Ln9x7lrM/xn2ernOf9Ht0RERK8DbwsSSdj2iYhIxtuCRERERHvG4IqIiIioQgyuiIiIiCrE4IqIiIioQr/lzbAs6ynrQXQw2PaJiGgbucEVn5ii14hPCxIRkWyTH9y8LUhERERUIQZXRERERBVicEVERERUIQZXRERERBVicEVERERUIQZXRERERBUqGVyF8C8sWFbycREACAcu/HlVVQngWhbcqTSlb8HqB5sVN3Wl+i7rnJYrz7vwEQLA3EfTala4T0T5wkFz2faADdpf1GeagzB/kamrtH3aj1VjWaYtFDGc063GSiKqnjBQJgeOACDQm0hLTIQDCMAW3qOphHUl5UE4QRXlJRbCa+l1jz16wq58e/Tc5XSJyi18O+pXLU8sNiph2Wdsf7MS6DCs1RaS8RiOMIxqRLQDm3wvrLhyFcBtdIHeBOJTQ5rewI1YwGtVFeI1cCMmcFYuF8Dd4NeZ/ba+SaWKzX24RVcMiArULodY+PbK5YJ+3lWnsn2mvGqvRB+gA+2zZdsCAOD0BiKo8qwT0S4UBlfh4A5d2PDeNwxza2hfn2D2hINx0D9D9+k2VyCEf93Bw76rQS/b1MXZ7RNta+7jQ/slt2j2WSJ6OgXBVYjx9xGAE9SPcxY5vcHNabz0oAnLcuEPmlEeU3KFScl90n6FS/PkXKtk+/5Fkk8S5Zac3QK4PctZvgKZXIZou5bVhD+Pr5rNfTStI3TugVH7KJ4nHwPLuE+Z43PxP7M5X+n2drR/tDb1nEptY+pG537g5p7D7HmUchcvfMz0jUntL+hbsBpdAF2cWavzaZb1VHO2zHld8b7EV3LCQRPWmw5GGKHzRt4Xc66lsi99P13mf/9TWjapb7o9cy5Z0i8C+bj1g8yxUq83FR1jw7y8Pms4FquPr/xpwv8vVzvupj4sj2Xa8ctrC9qxL8yry5SfnJugYIzNLz+UxvAXfzWTaFfy7y/G+Uol7u2nOQN6/kfgKPlOC99e5hU8esKWcrYmvi3lQCXblssryJ/Kr1laTt4nzbky5DJMelp90m1H+S7yvir7Jh0TJyg4PtoxiNZzKspjo03IXUI9p3GOU28itRVTLqLUX/Q23pOWf/SE05LKN7S/qN0U9T8970ptp9lcnoXwWkl9tDYcOFoOZVRWpv/BEROlX2l5l4GTqfOkZ94HuV+k66THQa3nMjey6BivmqfvT948jXZsJj1tXMwcO7nOprFsRVtQ6mY4Bsox1svXxrx4G5NeXjvQyn/0hK20yaryaomer5xQqXid/ILKB1dCmL4I8gKbqLNOeqZAQx5Ecwb3DYIr46BpSmjXvhjUAUmmD8b6F0C2vnlflMogKxbC622a4ExVWHaiiXCKvlgMXzxqsLD82P4ibm960KG1L639lQ2ucr+0hR4gFvQhfX8MQZLaZ/LK0vvcRDgFfTazj5kfHOo+Fh3jwuNvDK7KjSf6DyfzsSoIhvRjsqotyMG7/Enqmjk3q8dKZR+KyueDPkQZmwRXBbcFa6i/A4CHrfKqnEBACPkzRPs4wFjPJTmu42TzzexE470H+76Do4LbGgCA+cyQyxEfv5+zwserG384wO1dVPb0K2Z/tFGroO60JeM5LaE30dq7wPCyhvDvIUbaovW3JZOYK1ND+9pJb60XvaIhnBn2Pu6jD7OiFl3D+Z82Ru2v8etaxqgbcza3kHOMV87T6ln2WNQuP8K57+BrfFstnD0ArSbO89IlVijVFloeFtp+qA8VbSmv/OM2PvaAbkO7vUtEaylMaG+892BjhM63nA42dVfkAuQNxHXUW8DoVzbT4KActzEUAkJM4GCEzpucACv+0un+MBynd/XiYOn0Cl4rOsbBD+D8tJqq05aO6zjBaP0fFjnBdK1+gm1/qFTi9Cb6In30YKOLs5ygIqpvF2ND7t9JvTj8r11+hIMu7gYBxr/qGwchuYp+sKz4MaMoeSyABq58Ow04jtonmPy1+Y+gUm3hfmbIw6pQQfmNT1GwtfDtKPhkgEW0tuJXMRy38SWvg01dWD/Oc34VAskvw1H7SEvs9hHEv25xe5bOi55MjH4xycsvA7DkShqAuQ9/jYTv9YK45aAX9JNgSn/sPQoOAQBTH/48Gnzl/cHcx91t3pOWsuUv6Lu3V6j4Nz5trIHzHtBtaEnc/fwXPSZXOI7kvpK01dPzKEC/Xia+f22PgPsOjpSE7WX7i76E4+0OtviCS79IQ/gX8f4ctzF89JBeLzmup/8OBj7C0yt4LXX/w8Edui0PVyt/AET9YdQ+q/xKbNExLjz+mT5bcCx0UxdHvz5KV3lu1H56XIctBeIrx7JVbeH0Cl6rizOlXQTwlR+y2eCs9DhXVP7cRzM+fmu9IoKIVKXuL8b34WG6/y9yElON8wz5Iel0z5gEKm9rs5ftZeslbzctT1k+yQuzhd1aUffc47DMwSg6PvESWoIp7Yve9vW24gRCa1vaedP7itxWlXmO8HLzYOIy0+VNbWOZzJ60TaWuLU/8/5l2txBez1bqpyaK69O0fqjnbhmT+uXyivM1M/1CO3ZOkN3H9Y6xOk/ts0XHQmMa/7T9XncsK2wLhvObTfhP2oVe/r8y2zOPPznlJ8n1heMV0euSEyoVsuIVFZZlwTCZdiaE3x/j/BPzrfaNbb8qAdw+cFNlntC+TF24WL52Zjndh3/cRrvq255EdFA2+V7gf9x8CJjITi/MThLZ9yL+XypM03/UGVgRkdGzDa4y//my/jn0JMz0BYYWrB/n2V/FRM+N9MLKD7h6IYFHAzeBs3x6Lv2Mcf4SrsoR0U7wtiCRhG2fiIhkvC1IREREtGcMroiIiIgqxOCKiIiIqEIMroiIiIgq9FveDMuynrIeRAeDbZ+IiLaRG1zxiSl6jfi0IBERyTb5wc3bgkREREQVYnBFREREVCEGV0REREQVYnBFREREVCEGV0REREQVYnBFREREVKH84Er6H+7VTxP+/AlreCDCQRPWhY9w3xWhJxEOmmq7V859AFee1w9WlOMif4kqBHBz66DW1Z0C4cDdYR8O4V9YaA5MPUU7bnF90jWVY14wzkzdJxmHsscp2rf9jovRMZSPGxEdIGGwnLwQXgvC9hfK34AtvEfTmi/VRDiAACCcYN91oV1SusSjJ+yCc77w7cL2sPBtAUAAjphUW01V4OTUI+6vveXWJ739999JDwItTyzMc4WTO08IETjxMd31PiyE18upR+Bo5zQZH3ZdJ45DRPuQEyoVWvO2YA3tzx5sjND5ttvf4psI+ju6QjAdo9tz4ADo/ji8/f5/7d09a+NIHAbwx3AfwoZcIUi7Tao4cMUZ0my3Cl6wU4Utc1XIwUK85s6rdGG7bQ6Wq6TABns/QEDNgu3KcFwbcBGD/S3mCr3NjEay5MhO9nh+sI1lvWZm9Jf1SEtbsmfhIPcLB7D2sqfWO0MsPbvijUqb3AOtQ8OEhY/hnQ33rBl/1PwgsPTy92rbrP01x+SVhXrWtMMriIlT9SalLXzM91vZ26Fo4mriABhh+H2bv203cSXG2MHeE9ETlc9cRSecf+cv6xbZ9BpH/W0seAXv0wzu2RVaAwB9f8u3eIjKmMBHC03TpD0LB4YLoXrnCt2copCA1fc5rF+KlVZERLryxdVijhmQXF1G+Yfbay2bouUrlMxKksuYfMzKLKSzIvLnJ7deOP0Ef/5eQ63ZA9DDUa2G2ttfDetNllcqr7DwMcQJWntA88yFjR58w/xRtsaLciNRBkbJrsm/rOn5k23ncmi7wjzORy/O5SjtTG4Hcj5q4eEko58keS2prejZqukc1pmxtALQxDvPBvpHa9r9ur62kvJQYT+V9kddbrWZoMx+BajHLi8TZ+h3xn1K1gofreIF6MLDSbMHwMFlJyrIkm2I8mfxWBfvQ3os8xbpvJ85v0ZEL1r+/cWszFV4zz/OP6i5jigbkOQCwqxA2xVLKTcQLydebpRjGAtHzjQ8usKGLdxHeV41xxLkW0zzyN9xSmcixgN5P8Lt1DIhSbZGPlbh8ZGOy9Kz43nHA/mYBfulzEvPQu0SejtWJe0p6Rd67iZuG9HfOuwzwd9a619SxktuU3G7njip5Y+9nHxSRO6nqVxQkb6mbq+8P3KbLpoJUufRjYUjLzurX0Hqh+Fxi7Ypu2/l7JM8fj26ws3LNCnHMy9rle7XybZljGXauJUa19a0SSKqXkaplD9P/oLkk4ZeEIUMA75x8FS+ZygmMk8s+mBoLkTSg5A+yOYEVDNpJ554PenBNL1+87FLzRudGFhcvQibFVdCGE/SIqddGvuH2r/WXzCMhVuizcj9Sl1H8b6m9qnNTv5Fiyvz8kX2mKN/z9i3iuyTm/8AghxoD9dh7rvr1pV/URU8fKBfSLK4Itq1TYqrQrcFbW8JERRiEELgyhSelcwfRukP9yzYGGGe9biyHh4ejJV1CiEw7JTLQDSPHaB/E97G+IL5cbdgQDWwur1BL7rVGP5rdEcoE1x1JkLbj2F4uyG8bXABfBZLuO1Su0Y/MCXQHd3aum9BlAwrr259IDcXNIEn3VKqd4bxOnqfpNtoFfS151a35NHjKX1rgi8PljnDZrLXxWfPxqh7XtmrGKLbgv6x2E14n4gqt5WXiFr7NnA3hJ8abOzsp6sWc8zk6VUE5g/fwW0Hgd7MJ6oyreB/A9xHvTgSGA+A0Te/0PbN5qZvreC9PcLMW0J8LVfw0S5ZsNpZT4gG7SPvacEs84dRmFmc4PrnUxxMBMSHwqfzZP0PFlpr1j9PtdMwhyXb8cMpdesgY3wAMPUx27dKL3M1nwFtC9ZT+9bUB47L/S3qnc/BOHNRwXvwptdodA8wLnARS0QvV6HiavQwL7XQeucSDtTBZvL3KUaDSyUkKhco8vR65xLO3SkaWnjVywnJJleuK3i30Xx1dC8coH+Em/13xa9GAaxuz3H66tIYam2eubDvTnGeGzQN1j3qNrQXJXpxuDY+rgsfw7sSG0c7UkfrTRAIVwPaK3hvGxi+KdqmZskvttNrHPXV1yPEBfjUR6/ophV9VcDdKRpa2PtLdwT7TTDvJn2tCPNFReiwlRofAAQh+SakUHge6VfwhYfz7gjORVJMbdq3il+ESX9T1NH9Og6OoxKeD4rzZFs83PQRPGCQ8+JZedmTe3OLyD2+RPT8Mu8vpkKbhhchrg3KSvMbAu/2wIkzEakMhpSXSKabAqn69/XtWAq3Xe7lfknWIT8bE03/xxQ+jtauZVqibVY+bzvCaZuOE+2asUuk+oKek9Hydcbclbl9KG1t4MTt+/Vvr9V5tP7wflDk4Ywwk6XNm8r4FOxrer8Yp/Yrp3+mmDKJeX3HlLta8xBBqm/9tWaf/hDvc/tfepuNQXt53NA+c42B9nQmLPrcGcgvTS1zfImoKhmlUq5aOKOiVqvB8HGFJriuBT/dbz/bsYL30UfrA2+/0Xrbb/tERPQj2eS88P//j5s3CLITERERbeoZiqvgV6segFG3sZ0X5MkvF7xvpYKhyYtLM/7l5iGIiIiIsj3TbUGil4ltn4iIZLwtSERERPTMWFwRERERVYjFFREREVGFWFwRERERVeinrAm1Wm2X20H0YrDtExHRUxiLKz4tRURERLQZ3hYkIiIiqhCLKyIiIqIKsbgiIiIiqhCLKyIiIqIKsbgiIiIiqhCLKyIiIqIKsbgiIiIiqhCLKyIiIqIKsbgiIiIiqtB/WSxGlxkdUSkAAAAASUVORK5CYII=) What is hypothesis generation?This is a very important stage in any data science/machine learning pipeline. It involvesunderstanding the problem in detail by brainstorming as many factors as possible which canimpact the outcome. It is done by understanding the problem statement thoroughly and beforelooking at the data.Below are some of the factors which I think can affect the Loan Approval (dependent variable forthis loan prediction problem):* Salary: Applicants with high income should have more chances of loan approval.* Previous history: Applicants who have repayed their previous debts should have higherchances of loan approval.* Loan amount: Loan approval should also depend on the loan amount. If the loan amount isless, chances of loan approval should be high.* Loan term: Loan for less time period and less amount should have higher chances ofapproval.* EMI: Lesser the amount to be paid monthly to repay the loan, higher the chances of loanapproval.These are some of the factors which i think can affect the target variable, you can come up withmany more factors.import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt train = pd.read_csv("train_ctrUa4K.csv") test = pd.read_csv("test_lAUu6dG.csv")Lets make a copy of train and test data so that even if we have to make any changes in these datasets we would not lose the original datasets.train_original = train.copy() test_original = test.copy()Structure of train & Test Datasetstrain.columnsWe have 12 independent variables and 1 target variable.test.columns train.dtypes test.dtypesWe can see there are three format of data types:* object: Object format means variables are categorical. Categorical variables in our datasetare: Loan_ID, Gender, Married, Dependents, Education, Self_Employed, Property_Area,Loan Status* int64: It represents the integer variables. ApplicantIncome is of this format.* float64: It represents the variable which have some decimal values involved. They are alsobnumerical variables. Numerical variables in our dataset are: Coapplicantincome,LoanAmount, Loan _Amount_Term, and Credit _HistoryLet's look at the shape of the dataset.train.shape test.shape**In this section, we will do univariate analysis.** It is the simplest form of analyzing data where weexamine each variable individually. * For categorical features we can use frequency table or barplots which will calculate the number of each category in a particular variable. * For numericalfeatures, probability density plots can be used to look at the distribution of the variable.* Target VariableWe will first look at the target variable, i.e., Loan _Status. As it is a categorical variable, let us lookat its frequency table, percentage distribution and bar plot.Frequency table of a variable will give us the count of each category in that variable.train['Loan_Status'].value_counts()Ratio of proportion of Categoriestrain['Loan_Status'].value_counts(normalize=True) train['Loan_Status'].value_counts().plot.bar()* The loan of 422 (around 69%) people out of 614 was approved.* Now lets visualize each variable separately. Different types of variables are Categorical, ordinaland numerical.1. Categorical features: These features have categories (Gender, Married, Self_Employed,Credit History, Loan_Status)2. Ordinal features: Variables in categorical features having some order involved(Dependents, Education, Property Area)3. Numerical features: These features have numerical values (Applicantincome,CoapplicantIncome, LoanAmount, Loan Amount Term)* Let's visualize the categorical and ordinal features firsttrain['Gender'].value_counts(normalize=True).plot.bar(title = "gender") train['Married'].value_counts(normalize=True).plot.bar(title = "Married") train['Self_Employed'].value_counts(normalize=True).plot.bar(title = "Self_Employed") train['Credit_History'].value_counts(normalize=True).plot.bar(title = "Credit_History")**It can be inferred from the above bar plots that:**1. 80% applicants in the dataset are male.2. Around 65% of the applicants in the dataset are married.3. Around 15% applicants in the dataset are self employed.4. Around 85% applicants have repaid their debts.Now let's visualize the ordinal variables.* Independent Variable (Ordinal)train['Dependents'].value_counts(normalize=True).plot.bar(title = "Dependent") train['Education'].value_counts(normalize=True).plot.bar(title = "Education") train['Property_Area'].value_counts(normalize=True).plot.bar(title = "Property Area")Following inferences can be made from the above bar plots:1. Most of the applicants don't have an dependents.2. Around 80% of the applicants are Graduate.3. Most of the applicants are from Semiurban area.* **Independent Variable (Numerical)**Till now we have seen the categorical and ordinal variables and now lets visualize the numericalvariables. Lets look at the distribution of Applicant income first.sns.distplot(train['ApplicantIncome']) train["ApplicantIncome"].plot.box()* It can be inferred that most of the data in the distribution of applicant income is towards leftwhich means it is not normally distributed. * We will try to make it normal in later sections asalgorithms works better if the data is normally distributed.* The boxplot confirms the presence of a lot of outliers/extreme values. * This can be attributed tothe income disparity in the society. Part of this can be driven by the fact that we are looking atpeople with different education levels. Let us segregate them by Education:train.boxplot(column="ApplicantIncome", by = "Education")/usr/local/lib/python3.7/dist-packages/numpy/core/_asarray.py:83: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray return array(a, dtype, copy=False, order=order)We can see that there are a higher number of graduates with very high incomes, which are appearing to be the outliers.Let's look at coapplicant income distribution.sns.displot(train['CoapplicantIncome']) train["CoapplicantIncome"].plot.box()We see a similar distribution as that of the applicant income. * Majority of coapplicant's incomeranges from 0 to 5000. We also see a lot of outliers in the coapplicant income and it is notnormally distributed.Let's look at the distribution of LoanAmount variable.sns.distplot(train["LoanAmount"]) train['LoanAmount'].plot.box()We can see a lot of outliers in this variable and the distributuion is failrly normal. We will treat the outliers later. Lets recall some of the hypotheses that we generated earlier:* Applicants with high income should have more chances of loan approval.* Applicants who have repaid their previous debts should have higher chances of loanapproval.* Loan approval should also depend on the loan amount. If the loan amount is less, chancesof loan approval should be high.* Lesser the amount to be paid monthly to repay the loan, higher the chances of loanapproval.* Lets try to test the above mentioned hypotheses using bivariate analysisAfter looking at every variable individually in univariate analysis, we will now explore them againwith respect to the target variable.1. Categorical Independent Variable vs Target VariableFirst of all we will find the relation between target variable and categorical independentvariables. Let us look at the stacked bar plot now which will give us the proportion of approvedand unapproved loans.Gender = pd.crosstab(train['Gender'], train['Loan_Status']) Gender from pandas.core.reshape.pivot import crosstab help(crosstab) Gender.sum(1).astype(float) Gender.div(Gender.sum(1).astype(float), axis=0).plot(kind = 'bar', stacked=False) Gender.div(Gender.sum(1).astype(float), axis=0).plot(kind = 'bar', stacked=True)It can be inffered that the proportion of male and female applicants is more or less same for both approved and unapproved loans.Now let's visulaize remaning categorical varibles VS Target VariablesMarried = pd.crosstab(train['Married'],train['Loan_Status']) Dependents = pd.crosstab(train['Dependents'], train[ 'Loan_Status' ]) Education = pd.crosstab(train['Education'],train['Loan_Status']) Self_Employed = pd.crosstab(train['Self_Employed'],train['Loan_Status']) Married.div(Married.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize= (4,4)) plt.show() Dependents.div(Dependents.sum (1).astype(float),axis=0).plot(kind="bar", stacked=True) plt.show() Education.div(Education.sum(1).astype(float),axis=0).plot(kind="bar",stacked=True, figsize= (4,4)) plt. show() Self_Employed.div(Self_Employed.sum(1). astype(float),axis=0).plot(kind="bar",stacked=True, figsize=(4,4)) plt.show()* Proportion of married applicants is higher for the approved loans.* Distribution of applicants with 1 or 3+ dependents is similar across both the categories ofLoan_Status.* There is nothing significant we can infer from Self_Employed vs Loan_Status plot.Now we will look at the relationship between remaining categorical independent variables andLoan _Status.Credit_History = pd.crosstab(train['Credit_History'],train['Loan_Status']) Property_Area=pd.crosstab(train[ 'Property_Area'], train[ 'Loan_Status' ]) Credit_History.div(Credit_History.sum (1).astype(float),axis=0).plot(kind="bar", stacked=True, figsize= (4,4)) plt.show() Property_Area.div (Property_Area.sum(1) .astype(float), axis=0). plot (kind="bar", stacked=True) plt.show()* It seems people with credit history as 1 are more likely to get their loans approved.* Proportion of loans getting approved in semiurban area is higher as compared to that inrural or urban areas.Now let's visualize numerical independent variables with respect to target variable.Numerical Independent Variable vs Target VariableWe will try to find the mean income of people for which the loan has been approved vs the meanincome of people for which the loan has not been approved.train.groupby('Loan_Status')['ApplicantIncome'].mean().plot.bar ()Here the y-axis represents the mean applicant income. * We don't see any change in the meanincome. * So, let's make bins for the applicant income variable based on the values in it and analyzethe corresponding loan status for each bin.bins = [0,2500,4000,6000,81000] group = [ 'Low', 'Average', 'High','Very high'] train["Income_bin"] = pd.cut(train['ApplicantIncome'],bins,labels=group) Income_bin = pd.crosstab(train['Income_bin'],train['Loan_Status'] ) Income_bin.div(Income_bin.sum(1).astype(float),axis=0).plot(kind="bar",stacked=True) plt.xlabel ('ApplicantIncome') P = plt.ylabel ('Percentage') train["Income_bin"].head(10) train_original["ApplicantIncome"].head(10)* It can be inferred that Applicant income does not affect the chances of loan approval whichcontradicts our hypothesis in which we assumed that if the applicant income is high the chancesof loan approval will also be high. We will analyze the coapplicant income and loan amount variable in similar manner.bins=[0,1000,3000,42000] group=[' Low','Average','High' ] train['Coapplicant_Income_bin'] = pd.cut(train['CoapplicantIncome'], bins, labels=group) Coapplicant_Income_bin = pd.crosstab(train['Coapplicant_Income_bin'],train['Loan_Status']) Coapplicant_Income_bin.div(Coapplicant_Income_bin.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True) plt.xlabel ('CoapplicantIncome') p = plt.ylabel('Percentage') train['ApplicantIncome'][train["CoapplicantIncome"] == 0].count()* It shows that if coapplicant's income is less the chances of loan approval are high. * But this doesnot look right. * The possible reason behind this may be that most of the applicants don't have anycoapplicant so the coapplicant income for such applicants is O and hence the loan approval is notdependent on it. * So we can make a new variable in which we will combine the applicant's andcoapplicant's income to visualize the combined effect of income on loan approval.* Let us combine the Applicant Income and Coapplicant Income and see the combined effect ofTotal Income on the Loan Status.train['Total_Income'] = train['ApplicantIncome'] + train['CoapplicantIncome'] bins=[0,2500,4000,6000,81000] group=[ 'Low', 'Average', 'High','Very high' ] train['Total_Income_bin'] = pd.cut(train['Total_Income'],bins,labels=group) Total_Income_bin = pd.crosstab(train['Total_Income_bin'],train['Loan_Status']) Total_Income_bin.div(Total_Income_bin.sum(1).astype(float),axis=0).plot(kind="bar",stacked=True) plt.xlabel ( 'Total_Income') P =plt.ylabel ( 'Percentage' )* We can see that Proportion of loans getting approved for applicants having low Total_Income isvery less as compared to that of applicants with Average, High and Very High Income.* Let's visualize the Loan amount variable.bins=[0,100,200,700] group=['Low', 'Average', 'Nigh' ] train['LoanAmount_bin'] = pd.cut(train['LoanAmount'], bins, labels=group) LoanAmount_bin = pd.crosstab(train['LoanAmount_bin'],train['Loan_Status']) LoanAmount_bin.div(LoanAmount_bin.sum(1).astype(float),axis=0).plot (kind="bar",stacked=True) plt.xlabel('LoanAmount') P = plt.ylabel('Percentage')* It can be seen that the proportion of approved loans is higher for Low and Average Loan Amountas compared to that of High Loan Amount which supports our hypothesis in which weconsidered that the chances of loan approval will be high when the loan amount is less.Let's drop the bins which we created for the exploration part. We will change the 3+ independents variable to 3 to make it a numerical variable.We will also convert the targetvariable's categories into 0 and 1 so that we can find its correlation with numerical variables.One more reason to do so is few models like logistic regression takes only numeric values asinput. We will replace N with O and Y with 1.train.columns train=train.drop (['Income_bin','Coapplicant_Income_bin','Total_Income','Total_Income_bin',"LoanAmount_bin"],axis=1) train['Dependents'].replace ('3+',3, inplace=True) test['Dependents'].replace('3+', 3, inplace=True) train['Loan_Status' ].replace('N',0, inplace=True) train['Loan_Status' ].replace ('Y', 1, inplace=True)Now lets look at the correlation between all the numerical variables. We will use the heat map tovisualize the correlation. Heatmaps visualize data through variations in coloring. The variableswith darker color means their correlation is more.matrix = train.corr() f, ax = plt.subplots(figsize = (9,6)) sns.heatmap(matrix, vmax = .8, square = True, cmap = "BuPu")We see that the most correlated variables are ( ApplicantIncome - LoanAmmount) and ( Credit_Histroy - Loan_Status) Loan Ammount is also correlated with CoapplicantIncome After exploring all the variables in our data, we can now impute the missing values and treat theoutliers because missing data and outliers can have adverse effect on the model performance. Missing value imputationLet's list out feature-wise count of missing values.train.isnull().sum()There are missing values in Gender, Married, Dependents, Self_Employed, LoanAmount,Loan Amount Term and Credit History features.We will treat the missing values in all the features one by one.We can consider these methods to fill the missing values:* For numerical variables: imputation using mean or median* For categorical variables: imputation using modeThere are very less missing values in Gender, Married, Dependents, Credit_ History andSelf_Employed features so we can fill them using the mode of the features. There are very less missing values in Gender,Married, Dependents, Self_Employed features so we can fill them using the mode of the features.train[ 'Gender'].fillna(train['Gender'].mode()[0], inplace=True) train['Married' ].fillna(train['Married' ].mode()[0], inplace=True) train[ 'Dependents' ].fillna(train[ 'Dependents' ].mode()[0], inplace=True) train['Self_Employed'].fillna(train['Self_Employed'].mode()[0],inplace=True) train['Credit_History'].fillna(train['Credit_History' ].mode()[0], inplace=True)Now let's try to find a way to fill the missing values in Loan_Amount_Term. We will look at thevalue count of the Loan amount term variable.train['Loan_Amount_Term'].value_counts()It can be seen that in loan amount term variable, the value of 360 is repeating the most. So wewill replace the missing values in this variable using the mode of this variable.train['Loan_Amount_Term'].fillna(train['Loan_Amount_Term'].mode()[0], inplace=True)Now we will see the LoanAmount variable. As it is a numerical variable, we can use mean ormedian to impute the missing values. We will use median to fill the null values as earlier we sawthat loan amount have outliers so the mean will not be the proper approach as it is highlyaffected by the presence of outliers.train['LoanAmount'].fillna(train['LoanAmount'].median(), inplace=True)Now lets check whether all the missing values are filled in the dataset.train.isnull().sum() test['Gender'].fillna(train['Gender'].mode()[0], inplace=True) test['Married' ].fillna(train['Married' ].mode()[0], inplace=True) test[ 'Dependents' ].fillna(train[ 'Dependents' ].mode()[0], inplace=True) test['Self_Employed'].fillna(train['Self_Employed'].mode()[0],inplace=True) test['Credit_History'].fillna(train['Credit_History' ].mode()[0], inplace=True) test['Loan_Amount_Term'].fillna(train['Loan_Amount_Term' ].mode()[0], inplace=True) test['LoanAmount'].fillna(train['LoanAmount' ].mode()[0], inplace=True)As we can see that all the missing values have been filled in the test dataset. Let's fill all themissing values in the test dataset too with the same approach. Outlier TreatmentAs we saw earlier in univariate analysis, LoanAmount contains outliers so we have to treat themas the presence of outliers affects the distribution of the data. Let's examine what can happen toa data set with outliers. For the sample data set:1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4We find the following: mean, median, mode, and standard deviation* Mean = 2.58* Median = 2.5* Mode = 2* Standard Deviation = 1.08If we add an outlier to the data set:1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 400The new values of our statistics are:* Mean = 35.38* Median = 2.5* Mode = 2* Standard Deviation = 114.74It can be seen that having outliers often has a significant effect on the mean and standarddeviation and hence affecting the distribution. * We must take steps to remove outliers from ourdata sets.* Due to these outliers bulk of the data in the loan amount is at the left and the right tail is longer.This is called right skewness. * One way to remove the skewness is by doing the logtransformation. As we take the log transformation, it does not affect the smaller values much,but reduces the larger values. So, we get a distribution similar to normal distribution.Let's visualize the effect of log transformation. We will do the similar changes to the test filesimultaneously.test['LoanAmount'].dtypes train['LoanAmount_log' ] = np.log(train['LoanAmount']) train['LoanAmount_log'].hist (bins=20) #test['LoanAmount']= pd.to_numeric(test['LoanAmount'],errors = 'coerce') #test['LoanAmount'] = test['LoanAmount'].apply(lambda x: float(x)) #test['LoanAmount'] = pd.to_numeric(test['LoanAmount']) test[ 'LoanAmount_log'] = np.log(test['LoanAmount']) test['LoanAmount_log'] test.columns train_original.columns # test['Gender'] = test['Gender'].fillna(train_original['Gender'].mode()[0]) # test['Dependents'] = test['Dependents']. fillna(train_original[ 'Dependents'].mode()[0]) # test['Self_Employed'] = test['Self_Employed'].fillna(train_original['Self_Employed' ].mode()[0]) # test['Credit_History'] = test['Credit_History'].fillna(train_original['Credit_History' ].mode()[0]) # test['Loan_Amount_Term'] = test['Loan_Amount_Term'].fillna(train_original['Loan_Amount_Term'].mode()[0]) # test['LoanAmount'] = test['LoanAmount'].fillna(train_original['LoanAmount'].median)Now the distribution looks much closer to normla and effect of extreme values has been significantly subsided. Let's build a logistic regression model and make predictions for the test datasets. The process of model building is not complete without evaluation of model's performance.* Suppose we have the predictions from the model, how can we decide whether the predictionsare accurate? * We can plot the results and compare them with the actual values, i.e. calculate thedistance between the predictions and actual values. Lesser this distance more accurate will bethe predictions. * Since this is a classification problem, we can evaluate our models using any oneof the following evaluation metrics:1. Accuracy: Let us understand it using the confusion matrix which is a tabularrepresentation of Actual vs Predicted values. This is how a confusion matrix looks like: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAk8AAAC0CAYAAACNDcidAAAgAElEQVR4nOy9faxt21Uf9rv+eA/b1GDHdmzj4LqYWKz1gkPmcaqCAqpSFECNKE32LSJtxB9k7TRKkVLRREWge26hVDVtSKiUdG+LpOUPFJ0jjIQSYSAgaHFpmz0hFm9tsHGNDSZ8+OMZx+89G3jv9I85x5y/MeZca+99zrln3/PuHNK9Z+255seYY3795hhjznXn4uLiAo0aNWrUqFGjRo32ohcdm4FGjRo1atSoUaPbRA08NWrUqFGjRo0aHUANPDVq1KhRo0aNGh1ADTw1atSoUaNGjRodQA08NWrUqFGjRo0aHUANPDVq1KhRo0aNGh1ADTw1atSoUaNGjRodQA08NWrUqFGjRo0aHUANPDVq1KhRo0aNGh1ADTw1atSoUaNGjRodQA08NWrUqFGjRo0aHUANPDVq1KhRo0aNGh1ADTw1atSoUaNGjRodQA08NWrUqFGjRo0aHUANPDVq1KhRo0aNGh1ADTw1atSoUaNGjRodQA08NWrUqFGjRo0aHUANPDVq1KhRo0aNGh1ADTw1atSoUaNGjRodQA08NWrUqFGjRo0aHUANPDVq1KhRo0aNGh1ADTw1atSoUaNGjRodQA08NWrUqFGjRo0aHUANPDVq1KhRo0aNGh1ADTw1atSoUaNGjRodQA08NWrUqFGjRo0aHUANPDVq1KhRo0aNGh1ADTw1atSoUaNGjRodQA08NWrUqFGjRo0aHUAvOTYDjRpdlZbnHv58DcBRqG+/2+/2u/1uv1+Av90CWPUD0OFo1DRPjW41Le9b4OTjX0fPcwOxxW/xW/wWv8W/TfH9OXByvsYxqYGnRrea/EjAqY+DrgcAD/QSHgdeEc7x0eK3+C1+i9/iP+zxBUiNOCrdubi4uDguC40aXZ5O7q4BeDi4uC/h50AOHp52NtO/w2Bt8Vv8Fr/Fb/Ef0vi9hx/D+83ZgGNRA0+NbjWd3F2HnckIAB7D6QoYwy7GjQ6+98DogN633+13+91+t9+3+PfyfAkkYOWxOVsdbe1pZrtGt59G0TM5uNEH9e4I+ASqAnnE3zHcqzxa/Ba/xW/xW/yHOj4CkArkcExq4KnRC4OiXdz3Dhg9nFHzyu5FASyEcBfDEcMdWvwWv8Vv8Vv8hy4+EOJqaHUUauCp0S0nGVjhOSl0aVcjw8zFXU36jfg7hjvKMZj9WvwWv8Vv8Vv8hyJ+3CADAq6OSw08NbrlxEPQwctuhnc1CDsVT+EYfdZStd/td/vdfrffD/fv0eU5f2yap0aNrk6i1gXSDkY0T552NBzu6K+n97LTqb1v8Vv8Fr/Fb/GPFL8HgtYJQNwQH5MaeGp0+4ls4PngawiT3zDhnmLaeBwub2x4i9/it/gtfot/g/HHuEkekeIckxp4anTLycchJIOPdythwAW1rw53SS1cD9faqha/xW/xW/wW/+rxw3zsK+8R03kKz/HjPB7nfH9k4AQ08NTo1pMLpri0W0FWA4MHHhKwwigOh7KvyeG+5z0RoplvOr6Et/gtfovf4rf4c/FB8T3yCTtQfB0u8cVPFfKM+HxEauCp0e0nUeci7kkEMCHvWIrf8JBL2EJ4mAAC+JLB6/LOZyK+B+Ba/Ba/xW/xW/wd8QOYCnOsxM9aJyAAJ9dH8x1poRyi5srM98ekBp4avQAo70SKgSwDt5dwHvg8kMOAFS2VBlqggW80WSPMBDKff1Bft/xb/ofmv8ayP8Gyv4Pltr5g5XyOxf8a/u4JTvo7WJ/nhU80DX6MWt0+8svjEjDhoPDIr4xn2Qi1/G9x/pIPslkPriwXoV9L3mm+75vmqVGjqxOdtgsDUwYbSOMkCwQvBDEeLQTyzKrm+rOolyMPI/KAjvb8ehzM5NnyfyjzP1/ipL+D5d074W9/gpO7d7Dsl3nCB/crGOAh5mAOnwIwM/0TmUI+Huu7AVCdnHPdjyV/ow3Y2VZu5pnLZH6neETL/6HM38+UW8vH9JmYLrhh5E3yw3BRZgNPjW45RVVvfA5PlZ3JzkkhTO7Ti6BVOzu12PEEUaq1Ob5VU7P2gHyuVP41Nfg8COSJqfF/Nf6Tr0VB66AJOtc7ayDnP92XMm/2duUMtGTRoIUkkuSDMQZsve73qV7avKL8T65B/jyOIkeKR36fxqpqB2431n5JO0g+Psmp1LrVZCblTmnjOH8oPkvzPop+wcBAg9/GfwI6sa9xeObfKz4ZmBXjW/V1ntsNWL9hauCp0S2nMPB8fE7Td1zE1MCX3yMgC5yTPX2c3EVNLHBMJncHmjjg04TlAEAmKQgAy8+yP5IJy8U8PeUpuQo/os3I+es4Pta0jO9NnmVZjf9p/j31h5q2BxiwGi+wGS+wOVuld35cZ7MCAxDpS9T3LG8AqL/RIkR9N8hHA7jMfwzoaBEUmbB5JZXl4MagSTu5u4SPfKaFKsmH22tanpnPirzGXA/hBSxXytsn7TEtuMjgMcvAQbdnlhkKmZGsBMym8R37T1zodT3ycwLF1C9snsyb5udR4B/Uv2o8CACK+SX+HeWPefmkPh05TxuJpnlq1Ohq1OfBJNOuOCOGCZwHpuy2woQOZNs7xB4fF7swaMMOyhfvw0IlmomcH5IfAMSOH7UDPi4moimT8t0ovHL8sEsM+eTdWNJmqPg+5u8of58Xxsb/Tv5zOS4v+lKO6W6hDw0YFjHg3CfAkXjpBc4H8JL7Wc4f4keS+I2AZdT9M70nkjoNZwHMrU4HMEADaMEdmTfAb01msa30GDD1oTZyVd5pwyHUp+U1aRvLtnLpr25blhXKOqQxmmXmlcy8kqn0JZuvj33Vpb6XQaiWi8TnNuG+5Ex8H/nJ7fXC4h/VdtL8MP+2XX2MX/ZREL8Z2Cc4B8Q5/NiapzsXFxcXR+WgUaMr0MndNfJe1mN1ugKApIamvTLSDprUxrQPzu8pRdZsOfo9HdemgIpdxiS9xyXj1+JYLhv/0+lrNTC8ngbTXNA8rZKZAip8gAOwvnuC9QgMZxvgfnhGv8LqzMW+t8by7hIMhtzpBqsFdJnnJ1ie5ljubAN39wTrFD/E9bE8LFZYLYa8SRjXWKtyBgzjCi7xrGk428CJ5m30VR6HhZFhweMK7u4y8SjxxSQ41S+m+kq9lY/ff257/9+f/+nn66xvOT5NPcmsvTxdpneAx+ZshWNR0zw1uuWUB1oeiNCaJtndyu+0I9a7HT0VIO3CoaYJ0C5b0vmUT841D/7sk+WkRIqf35TxYfKnnbzhv4yPrMlo/Ccuk1YGLu2uufdQ6yueFPWAG5cZhCyGNPULCYjiWmFcBlOZyc6fnuDkNPNgQQmAAJJMOmd+ZV+TZQF+gDXWpzYHkx4eGNeTPC5P1xAZ1nlcKh5T2/XcnjEuv08y99ncSe8AJM1FbitH4ZX+I9pE239EM2L7j8qH+s/I6an/p/im/4+6PJ1/pf8/tPwTKK6EJwhl+Cz5cSZ+jgPObfSK+zwnOzLpWpocoTdCDTw1uuXksi08/uYdjIvvWaUNyGQQJxKyu0N2OkDysRFAllTQyUyYwZkDshOyTArGhJMW5jHGSPE1GOFLPxWIKMI1/yU/vLN7dPmHLN7SH8Rk0fMiYfIaKZ9EwUH8pD+JGs+QTsx3DoDrcmx3usFm3GBzNsDBY31/TeHGd+p8ibWYMgSU9KvkY7VaoE6pPB//rbEU3ij95myIZednwCWzn4TM87jGWsxC+/A40uIY219tRtg0I/Jm3zcQaFJOwj6DiB7URi4v8tTX1CJvTEA6PvWflH8G3GrxV1ptAiNinlT5OOLf5v+w8m/ADo3REN8AY8UnDD8o6qXBsh6LMreojQ50++s5/zjUwFOjW04+n8JQCyGSk3iarmkHE/xB7O5IJnCTj6QiJ0YpW4EzKL1MPnGVJrucj9plFfFzfcr4xFlyCuVdKIMFs4PvgTr/mftcei1/XgTn+GHwelz+1eJNEy+bPMC7eWlL1RYT1K+wGjcYGESKP9EimNZSmWNeWPxpuAspOW0nTlz2dQEw3BvygnK6MrzEWMl/KZZF2i5Oj36FzemAOnnITdC7eQQwrvfjMTkqC3/a1COO4UDZd1IP6FXJuT+kxdz0H3JSVv2HxrTqPyo+9R+eK7j/J35Mf1bxqf/3Ot96/IeRf1Ti0yzX55bOOWUQV8yH5EieR1/WKikARjNg2uhwv0n9fHZ0PnBq4KnRLSczqOJzHezoCSDH1BO0s/nQ5MOhapKnEMuTL57NLms2Pk821siUp8zM2z7xaYeqasyQYip/BhWl0Ysnv4eBf/2beaL8e86z6EVEdNouanMMvCbi5f5ykzwvKNwrd5fH743mwRbCgLS3L+d40qVKWVM1TW2sFkOjtaDnsrYV8GX6DD+3/n9V/n0lvjbPWR4UuEop7LzMabldY9ho6kAbHT2Ocx7HogaeGt1y8oBREdsdjJ2cC81InNQdahNOCbIKzQeVYcFPjj+1mGMmvuapBH8Z2ml+dsWv8VNb7F2Rp4OdwHhJtmB1brGbyv/6+a8CJlMDLUmbtyVdX10nIQN66KoBMZXpfxsMxhRhF8L5pcIBnSdNVL2+DqA4yGaTOAZ28bgyACvLzU/waEBPr8eM0nJU232qP9T7TOv/18E/p83PSDlY/utpp+IjparM18q0z3xRemXSPB418NTolpPLx1vNgHTVSYEHHU1sxsfAgiI9TB10PrXJhHnhdKZcNeFY/ubqVYtvQQOHTsWX37UFbSqufueKd/V6ynN9J2nDrpf/kscayJoCQSXVF3CQz5Otx5ByWt9f64VhXOKkj6aw3saLvfa0dOIuyts6YDGQ/9Iy13Fc4uRuLJXSrLmOdB/UPI8cb5kkUPK4CwxN9eOp8Tofrp9b/9+Xp9zD92ubaf7raWvxa3OYbdfamA3zveb6WNTAU6MXALnKbmRqYNtBHsL8XhMBL7Kukr4syy4YdpfHfFjb/j5leMOrhNZ5tvlP8ZbTajAxVU/ZKQJzvFhtns6P66N5uE7+58C1hNYBLFOpJRFKPk+95d/BnWYgs+zFpyg6n/dT8eInYSrXC6jy4ILmSaVfp/Ti4C6LVqqdfHLm3AMY9uARJt56hsfpxbFsu6kxOje2psBZrqntp960qx0/zLe3bTvbd7mPc/7TwK4+dq+Tf5i4U+PVzjm53JKHOf53p51qy+n5eQJU2i9IHIEaeGr0AiBfdRovB6F9V9sdwbyzi2h951TfjZVmusndVCUPvS/cZwdntVK1sm3+Nd5yfW3Y3K5T7wVLDVnNv6Ku4bOLQc7xqvxPg2suj9J0ulbz+SBrdcayTLfYqJvJE/Wr6D/lwk3hiw02pzrWcHZRhKny4IPmKZVjncMHrM6GUJ8+PhO5WE93bxeP/kAe67IqHcnzM8xzzZRVC3dF29sFttZnyn4seVie5vuzhj5TYxoUqz52r5N/W57lzabZR1u/a+6ZTzs1T+4ely729Rg2Mt/HoXZJZqNbTSd39aVpq7OV8ukoadeg3Tds3/xsvEPzukx5l6nLofWovavFOYS3ffJ8EPzvEb9D7FeH8h3fdS5/fy7lFakT4DPV5hN5FnF2pZ97P/OuQ+Z9suxDeL1KG16mzTnsUej/+6bZlcdlZX3ZtLvlFO4Wy2HlJuHmqGmeGt1yigNLjtpah9nO699pQFLaud3OZNhUfrVnm1et/H3K3of/fepyCO9T9ajJ4pA0+5Z3E/zv0b4WOHX0rrN5gMK5X8Y4W1M/BZwoXlfLS37XZO8MLxU+bH2r70x5DPrsgjfLo41/2Xa7zLPh5QXb/+d42XfO2ffdvrI+JO0BcuponLR7nho1uiqFAZS+Zm8nbAFTXW0QIvmJqIm/2zUZTT1jIpwnCft71+JyHRP6PiBtn+d96g8T9zJ827TXwL8FOAw+OhtvCiy4DBa2xMuW8yFZJMA00ZZdjUfqJwyqVF5cppGV4qWSVpVRe1cJV6DPtMW2Vl/JY4dMa2Bsdlwd0v6Gl2L87Ur/sPf/Q8DZrjz2fbdL1oeOR9Az98FKf0x9O+YzTsn95qiBp0a3nJy6ubhcDOzAM4NQJn9eGHhBkAEsi5TkKYtoCvfmrw2f+m14vfJ7QC2qqlyatIrftXym6rcrnTd/p/jeR55z+dfSTYRbgKPAD7d9DLP9RMpNfYMnbgtoOB3lbevCPAAVsIGJ3xXgxvmrcQDNhwqvgawK0OsMnwXwQ+X3LpnSM/ttFf2Xw/dpf1+J53G5fmzz3Tfdg+z/+4x/y6dNd+h4su2xT357jkegMhbts5Y5X+h5TGo+T41uNe32eYoDOWmgQIuUN0BJfsvgrO2e7MCtDeSpOFO7sVq6qfIO4WWfcneluUr5l8l/7vc+7/aRQSRuf9UvYMx0phzxXyr8mIJDrJ/hVXIDYtzCn4jS2ncd4Lb6FnUf+3bIl8qIdQvnm7SMXOfhqd9zfABwHeC3lt8gL7dlN2YeW1Z2Rlb2narbvn3ssn3jkLCHuf/vW/5lZHYdc92Bc0t1zGE6HJ6+pxjeN5+nRo0uTWbgbiU074YcT+aF1smn3ZJLC6ksImbBpPIc7dgdkHZWjiYGdQEgacFcKoNOl3UppxBO5YX4oPjOxAcc7fQzP87EJx4m+c8yoNwofoxD2hUla6lz5036vOTm+nN5U/G5/Fp52MGvM+XlugZwEWmb29jx7jfJmdoo9iG3RW7HbeIM0l8ybz7JLB3V7mJcyYtlIGm35qbmbe4BIa8MXKRlUhmxL3syUUuf8tschi6mJVl4Wri8kVcCfqQ9yy3pEz9J2ltqpS31dw6XtNzHO/OO5VL0wx1joTa+Ur1MnnbcqbFT9v8yLc8DNZ5h0pb9vc5zrd+XfT3zZ8Yzt1nirzIvRJ6n01L9Chk4de/Y9NiO+Zi+IemQxhWUhorly7wci5rmqdGtppO7y3D/TPxY7XC2Kn0z4u7XdQ5+i7TzLoaf0h7oCTj8kp2+1gaUv5FSTL/HTHx5aXbuQKkdm9Se2d+1/Gv8T+Vf1oc1H5rfqd+1+s7xL21GO9ZC07E/v5zetmXuCxTHaGgm24TlaOvPcWiByQDF1J/TiKyUhky0P5wXh2d+WEtld/Nuy32f6jSlyaq1JYclXg3P6e+O90ysNavJs6a1KrTGRm5T/b/Wp4o2nisXE/xNtGVN5qofVfpDEa+ettpfJ/Ob4pnDyrQyh+5OW5OBHetIfc5tAZ/6ZZ6DbXlJ89R7YGyap0aNrkAu3amTduId72Rd1grE3Y1Pu3LZ0cS/W4FIyDsf2Zl3nnb6Lu/2ZPckO7it2SWl91wG8m5MxUfencuuOJUn6Z2Kn3/ze4+k+VLxhR9+n/kL/IjsHP0GCm2I0XxorUzeqTr129Pvef4lvVd+MY7k5/bk12gKtqavpLpwHXQ/SVoB0SDRjjm92/I7U8dOt7GnRc8JsODdve0rW6fak/unpzjetKuHU78F8GLLGrDMN7dXLsMH02KUS+r3qr2ZV6/bQdpT+sk214Prn/t87hOqfyX5xTzI5yeV0+W2TjwY2bstyYjbm9ImOSgNtC5XwpwAV8UL9X+eJyr9KKWh9hXtjeM5IMWjfmb6l6e+pMY/yTenlT5XkwuNJdM3Bahr2dfSsgzs3FWOGR9/x5ipz3F5SvM0GpB4BGqap0a3muTmZNnhbM4G2qnQbkx2NYVvSiTaGcuOnYZqeLZ+JBM7TN7VJ82AKSflbTQ/HJ/LzOnNDru6m9svf1Xv6u/KDpfqLD47ub60i1Tpd+XP77W2QDRPqb1S/Wwb1PjVv4M8Qbvdsp1T/qwdiqTaw8idNUBK06K0T3qXzXxyf8x8Qr8zdU8aVNumUY6lJirnq9LImJBdfqEBKPuN0tyR9imUoWWQZQOlfcr+WLZ/5jyLtEli0qbSD8y42EvDwrIxvFR8xmp9Yl7LNDPuoPtgXbuV23JKO6fLqGjUrQayml9lHE7IT+rBbTCtydQyLv337HiszLHQc9ny7hrcxpuzFY5FTfPU6AVAPn06QnbixW4s7mpcHKhpMZJdVdoxx0UBYSDLji2kdVnjwml4Vyi7+ph3MqnQ+7ybt7tZ0iSQL0ry60gTC/Ku2O4sD8gfSR55J86ar3KHq+vsRYvBO0Oqf95dkzZNab5cqc0gp+W888ztlTRHpKnjXa/WBGr+fSxfdvPiD+RV+dJvPGkeSKasMUk7comf+Ve+QKn9snyyRjHWM8bJ7e7Nu6zlkP6XNKhWuxf59KkdEfs1SKtmtIHJPEpxiS9v+oln2Yq2bcsyor4ofWALHW61RwxOtgIGdN0TgBUZpMU7SVu3Pfc1al+rfU1jWtokgRAJF17ybzUeUrncx229uJ1kk5HLFUAhQN9q4rIWjeYB6l/iyyZt5jqqG6XPc4get87wV5OftIfndhONEWuklMbQm3FJ84aEb+tzrKPxKfwBjj4Gfzxq4KnRLae4I4pfh08LJTSg4cnWbXV4mk7tpCvvZHefwAiy9oqAWUplFvG0kG3zRJlV8XmB8AxUtnYyzkAwLb5poRXeTB2MaaeWvyyCnngSXr1aNLwCMsqUsM3l8MSbwOfWlROj7DojQHKURuelTT6+ZhbY5jwdgQ/mPy2ICgilZYrKJ7MfLRSO8gcM77KYc527DF68yqd8l3sg8+g06Oi0DBMI6JCAbmofWvSm+oTES2CgCjpz38saRZGXV6BAlxFjGDAiC26KK/mI3LhPETh0tk8m4GCAIbVr6kcM6BNgpvDOzhMZ7HmRr9pA5AVfjYduIn/YejmTP4EtxTePF6/k4YvwDOJl7gn1ILNhTM/939G4DflR/6nILwNY6TM+tyUDWALfjvkmGaV5g8p1FEdgEYP01F/h4+e4sq7rGNTAU6NbTk7d85TNDl5NuC7FzrvjPDnKJJwXv+SoKrufmDrvhrPJKi8QAnJ8ntxoImLNT5pIkBfLrNqXyY20DIAGfGmyMv4V2zzpZy1cnvjFdOBpsdAT6tRukXfGzB9Is6QX7mQ2SjIlWZvyPaURnpWcU/nkj5PqQvkkx2qn2sYCQF0PLj9raDzlYxcw3qXnRZ76Ve4xuQ22EeobLZVnU6cAqi6nF4DMi4vnvpjknNvPpzoim0OobyrZU5/UcWXTAAVQkkZma9pFgdXc95MZWsK2uh6qrzCQYv4YSJMGRQMxl0F18rmCGR92zGcgobQpMdc0lmN8rxb83Jbelqc0tGnJL/jL+REA4T65zX1HQKXqWwpY6A1MNnlSf6f8PM8dkL5i66M1oC4BWN5w6DGkzG0JcFE5CXSxPAygI9mmDWbqf3k2PyY1n6dGt5pOlA0cE8+7/u5Kt0++qKS5bF5z6Xfld2j+c+kvU+auel62zMu062XqMcf3Ibw8iHeX6efYwf9cnS/Lt83zMn3/Kn16blxedX64bBpcIa8H2bck7Dr427fsqecD0vYOGI/r89TAU6NbTeqSzHh8NdC+i/Xc8664u+LvymtfPmr5XKVO+06q++Z7mTTXkb72e1f6ufeHtsVV2vIqbW3iqH5fo3376FX6/iHj4tDnq8r8QddlH773qc918HeVel3XeL5KmsPq0K4qaNTo0kSDShYQvr6/9xTPR0fD+Fve9TJhmGdw2pl85SOV6WOVDi45NHr9Ecs+GmJSGrOb4nIojoMpQ/Lv/V7pAZsuBil5WLnMyKc3aaq87VvHy6a3vHmdt3l2SVYTdaI0brLN9+wLc2mKOleeU30pv+ozgNH2KXpHbar7kKP3NZnsqlPlnfpYq+2bZT3drEyZx0r71+RVtGlFhtUyd9SlIhen6mLrW2vjqXAe23vUszqv7GqHfeol+dbru98YuOR4UPXcNccyiDouNc1To1tN6pLM3sNHAMVHXQEP1zv4Eel3WlBVvBgX0augBzBKWj3Q89FyCZ/fQYUYu3ZZ+n2Nt/12jmX9abo3vy3/c2VM1cfmP5durk41udbrMpcv82brXMpDgytf1VyW8cv2lPDIR4/o1GrrNV2HWt8r+8s0/9NtV+93tT6s+TRyrY6vCv9V3nfLoGwrSbOjLxTlTdSHNHS2H06PF9vn4/s450zVpd43TR8p8trdR215s2Oa5DLfV+w4tmNUy25uXtPjkPKZkX2hOZ1sz/DM/baZ7Ro1uiTZe56G0zUAh/p36gA9sZl39o6gXX+n4keH0Pk8hBfM51Xlr1KPfXino/r1d/vWd678GRnP8XhQ+Xumn7nzaXf7Yjrvfcqak8NOfmryBbUdsD5FXnR7YHO6RKNGL3Q6WawUuGpmu0aNLk0WiMTF5ry2YM0tZg44nyunkuZ8Iq+dwKmyi5zKS/HH9TA0yzvHqaQFJuQ1Vd8D3+3D407+d8ioxr8FJjuBExO9sxcUbqfqtm9f28XPVJ8yvIm2YkSjRo8OTWpkb5YaeGp0y8lp34oY5nu9+PjRl+nA6nSPcOVBJq8WpZgPB1lfGaExl83qes2BWSD7cjLw9ldPvNq8es2HN2V4IPleVE117PsyBSjIF8JX+PcFX8xNNOdMldGXPGv5Acp3xPDnyRfEw6U2SHXdo00mTZijrpkf+b3IxqbnfCeAWupfzE9F8zRWwmLOD8Mi0qjRjVHVl+w41MBTo9tPyV6eF668vIQwZ8BJ9l3R2pwcy0UHYxfjI318mMvxnONogYRTSxunZWDm1VNe0MPFn7UFPz6PJWBR9aIyuGxH6XX5XKsSNDAfLsoDIBlXwAGnzzcgefOe24L4GJ1JI7Lgssu0DkhALzt+556RZGHa040T4Kt3Op4Czb6sv8lH9byR6pAmf+2vFeJR36N4GpzxxqFRo0eA2DfsyBrXBp4a3XKyWiciszjyoqsXf8TFakJLNcaFsacFdSwBg6OdkFMaA6vFQj6NN5aLblpA5YTJSCCM69RDhRf14l3ayACE38fyK+lz/hlMKPDYE79KlkjvM1CpgLeJ9/r0GAMb4jHK2jEIVYA0yzOlU065vtSE9QR4JX8lPy6L2l2VW88npe21HD3LlWXYc44CZNHHVWEAACAASURBVK0GCw/FB1IbNbpR6vW4PhY18NTolpNoFqxmKS9iekGmeOmosGiLJrRUxlzkInBJw5cWaXWijzVQwosxBXrWYqg8ecElk5AyLWrtiISoelEefOYmgDAillVfMWf1RgNmtVYMNhhkFtoT0bxZGUcA1W0wLAbNa8qfgYM2RVpNYQYuxLfwZYHgSFpEC7AUkGTtU112jgFQodWidwJWVfuXfHlqE2fqn8tudCvobANcbI7NxS0n2WQcH0A18NTolpPswMmUQdobMdXoBR2FyYgXvhBaapd40VPhrHEydng38mLvC02E1uwQoLGmIposmM8Agugd18CWzdqlxQZDt8LQ6b+S3lXqzxqwpLUqTH8WMBBfBEycWfQzsJVWIcALre2z5jDVjhP+SQzVCrMbm/h6c6ycgBHLUwFJBooCgKzPFdU3ADs9+Xu1IAgwYihI4UxGu9boQDrdABcX5b/xQZzialrCq1McEf3EeLhBauCp0S0np50IhZQZquKXUjMZVTUXuRy9wOd0VY2AgDPWSlRASIhX0exEUovqqLVMQAYxFljYhT9rhAa4boUBHuvtEuvzk/B3e4Ll1sN1GwOQcv29BQBJRl6BxFz3zLvS/FgNEpvs0rfjXCm7nsqmdmCgN+mfNOX8P3KLE9CqOHM72x/IL8nZ+D31oxHwRX0zJ4CAbgZopSasPPTgScaNrkT37wB34r8n1kC3egAAqoHcK5MaJ8elBp4a3XLy1YVO35wsixgDlJS6YpKidzVtBi2QrI2yJ8OUZoTBhbrpmpc+XpC1hsYXeRrgZvxsmPecaoDrBjissT5fpvJz6WustyeUN1Bo2gx/VuMi2rUMDo3mhzRl6cLFyqmZfEqOZKhAKy9FfLoSlfbQpy990qhx/TVwyTKcBjS+r8dXGs74rg7otEYsy6KuCdMaMyhZNLoiKW3uMn4UfHX0E12NDD0kJjuggadGt5608Qbpl9VywPiUgLQOGoh4G08BHCiAwwswa0D0jcKoal3sLb6sgbIOygywSidzlkDJew4fAgdbo0EDg4JYE3L6VvXh36PhC8SXApikaSHfJWu6w+jCBZRZarotRLa9KUvxQe/VacP8xBo5bhMbniBKBdCo/lIBmzWgqU7QxbaptbUGbBZg8cKRNVyNLknk06bDPQAfLidt9BARa1uPC6AaeGp0y6luupCFLPsr6QVsEqiwD5E9oZbMURogqFNQyeclcafiqlNj6tgt+1aFfGrmo7zohrBkSmMAYU/2xf9dJzzRpBN5KLUcxP84FP5Rq460TexvNjo440e16oYJE+dK+1stQJdBepJJJOUIT3BJTaalNk6ddBx1e0qOuV9ouWRtlZVoBq6O4te1Uaa/Ke5RaKYUkCtObDbN07XSpGYpythe3npmfaQmHMCtL9WZKy9bbXQJcmZsHo8aeGp0y4kXEDOgWMuhtBZGY9KTDsh8RFPpdXrHy2sEFvreoBQ+dXJKbPa0uDNfmTer6UF5Qo61LwTokq8S+9rwd6kYCJiLQVXZcHBw8fTbOvlGrbdreARAZc1JzsY9j3EXq8wfgACcHPx2GeMuY1jmxFun6sS/dvwXXQ77e4FSKo0Sa8JYK8fXLsR3Og+WaQZn3sS303kAVhkEqusUavd4WS1U7cQma56OvIC8YIjHxOkGWAA4N5+8GS8ArLNv1J0lAAdcmO+rnW6Aew44P4nxToB+A9x7sFV4NIg2akemBp4avQCIduKk61FUMdlZrVB5eorjEbEzuvH1YW6Ufd4AL7kAs7jkkXyM7MdEE5DiU3Q1sECLewYDTj2HcoPz+Ko4cUdalW4A4LE+l28IIgDBbQZXySSY/Kl85r9fF3EdANexRlDyXBIs8IUcdb1K0Kgc+g3QzPoeczcXt7f1ias4k6uyVfrMa+329cRBcbVC/CV9hdpM+mfpGZXbMWteG12JzkhLdC/K9dyM/P4OcJfGAdb5szx8FcfCAds1cJdAbn/ygCvwqFDegB6bGnhqdPtJfZ6FzWOV26IVmIECR3yjtxvLeKEEMgcRC3qh53h6mGtNRcUJu88LudVoSUnqPiN7mWIBAhgcUi5iZtqeYH0uGqUsQ+WAzcAt1StKs8tAI5HVuqm4TmvBDMDwW+GPNWkWyHK9SKZFGqPNMxdxslO6S22i/cUAqNOA6rCB6i9IaVS9C580/lVeZeBJltzvuF1zHZD4b3RJEtnxabs7d8K3Es8ugNMd6eX7hKQxDc92cffA/eMv+LefvJqnj0kNPDW65eRpMWYNQQ5JJpnanUS91mhYM8ykWQdQR9VzOhPP+FBZJ2Mpq9CMcPk9g0DDVwz3tbor7RbRWANDLjprs3YmcrjNPmPW3yjk54AFx9Ugriw7PkewpqQqixD7XynAG2vOpwCTSTZnk0/05TB1UafSXhmZGvDJl6ayn5pyeDfmz6Qtq2iXaneI1do+5Jv7igaELNu2KF+ayByr6O5J6J8LY5I7M/dB3av08Vp+ANAdf8G//eTUoZNjUgNPjW45aSNHTcuiQQL0JZMmr+KqAJVLJPX5k/Ama7pynAB8bPmk6QCMI7UGBQxW8i3o3vCJ4iqGxKvSbom5YdA+Slz9bSkRDwAda99Y6yFy8MC5iWvqm4iBQQeqS8mD1uYZkDF3MzfxV3wyh+UDKA1U9doHJQmnQJ0GrkbP1es+WLRHRTbegE3h1BVAi/nRJTe6JBWmzyhn0azCBbC08MBd1lBNLOLVU5DHX/BfEDQ+HHJs4KnRLSdPCwjt8gFl5lALaz/lo4KsQeCFjE9loXJUvraAT/rGeDVRezNpu4pfk/7AL9fBQMTahZwEsPx2HUKS+czugq1ZiAEpDBCM4GK7JjAgMjUAZRG52a5h24m1egCfCHRpkuTLOQsna3snlwWqSpbQJjEgt5PKh+vBmiJOa7RhnL7y+RerYcpXPxDsN99OtGBOE4PEps24NM1spABEsxz9Pl+bE3ixHXjjUdmEII6ZRtdA7dt2jRpdBzmzyDNIKk0p2RQUSXxUrAaC0/Z2UeQ8eTFEebUA34SrPqXCIIfTxTL6qXqYtPb2dDYlsvP56ACI8/aAoRuMAzoicMn8+R7J2XvAkHmDw9CF/NbIIC7lvRiobg4DYtnUVilu4tXBqc/DgEyvBK6Kz5xozR1gzZVOaZ/yScQSxKQ8aycz+fuCMOZAxSs0OAbIiT1zqfzran3CfrTYlNk0T9dEU872Z5ugGU0n7iJAWtCt44t4Kg8gn6d10EZ1Q7ieAECYozbtzqjroqZ5atTouog1H3mhYnAiC1xxdUChKVK5pif9aRaQWU4v3mzK48WezTaAM4tvzru4OJPrMVZAmvocCdTxe2WeS349cou4y3c2LVbxlF24YiBL0gNYh6sJuoHueRriFQMebFbyMW4AZzbu2vAX03eh7NViBaQ8NVAJcuBWMZo59T7LOYPJTFl7mHNL7ZPSl4AMgL50016cqfLJfVDlwf1PzHrqYlJT5/S+AqhTPzj+DvwFQfeML5OY55LDuAd6uZpA7m5CPn3H2qbTk3DSbrHJd0Gd32kO49dFD4nm6c7FxcXFsZlo1OiydHI3TmgAAI/hNPwVzYM6gVZbfCrPXgABXQLJQETlW8t/DFoLBmqisq+m5YWw35dfyWmizBQO+H4HvwUve7yf/YtChgAKOe6Vz6Tssbt8884j+lfZz5xQ+8gzp/ejPuGY3nFc0/6Y4Hm6/W27TeURfq9PdR029k6iRo1egHSyGMBz4eZsNRf9gVLTPDW65VQDQfLGZRUvO/ryJYuAvrl5LM0mWWtD76nUrPnK2h8Xw9XN0vbuIXPk1tfMS6ocvcBaP67q1Qa9V3LIR+Np78bXKyg/o1LLUxzbV7d3Z26VU7RIaeLTM2oPaUxTzviJODZFcvuq32J+Q25zkReDU+kF/H07MdWRRsheTpk4MqY6T23BpymZZ75hPuXGGkUJNwDPcRsliZkTf40aveDJmetAjkcNPDW6/WQdavkCTHIOF0BTOHNXQYuAjUyF6Q7I9yUxCEp81J3KU1p15FY7LidehUcDUnjhtXzaE2EZwFHpDAL523zKGZ75jOH2JnS+8yr5hokE62CV/cxsvX0CqlBgjIGJBUOqfc0pvAStzYk9a7ZTdaq0c5ZQ5juASf3Wmostz+z8nq49qF6gyqAQuo24rz0EX5dv1OhGyWw2jkUNPDW65SQLVnwGSPMTyKkFT2tY8iIkcbW2RTQYVFoguhE6LeT2SHky1QG101hV7QkDOXV5JpXN9x2lO50qWin+9los14IQCw+0NkS/yWBIcmVndZEfyoWewKoCdeo+I61N43z4pm8LXoI2R3NZM4Mp3zPoPmE1aNzGur2zVjLlYz8yzKZbJT3WWpbaPa0V1DxZIK20rE3z1OhRo4fE56mBp0a3nDK4sAuMAia9Weinvl2mFls2w0At4uXt4bxwct7aDJhNPOFXcX8TLYzO8hh5KLRVBNQA0rJYHvtyIXasuaLvxOUj+GVcdnbOwCwVrzROFpyEOmjAxZLPeWU5qPu0CLCmNAXIwiQwUmCYbxRnQFRzxjcnHotboAz4tvKzYJDjpnui2FSY+nQu0zrJ641Do0aPAunN2zGpgadGLwByBagotBcowQCgF6xygWKQAgJcrPXIgzgtY70BGxUNjzMAwZrlUJQRy6599DdpLWLefB+T4lE0N1IPoGay01oQrfGyJjNQvFw3rVlyJBOrXWMTZLoLi4GrAURKs0RXP3jiwVN5GRgxr9rMasFM7Q4tK8fiFF/1I8YO1ulegb3YH4vPwfAGgMFr8WHgDJAbNXo0SMY5byKOQw08Nbrl5OlvNh55s7ixSUSBDTraHxYozrUOqNR38/iEVV4GQx7knKxO3tmF3mqO1D1BeoHMAA0qT3WqkAGO0dQwCKyZg0otiL3Z2u73NDgsAFkB8MqrFbIOieuS65nBm3HWp0lU+QzJg/VHIvnztJsBaanRUxdxVnzj7I3w+oJLip8AmpF1X5E98c7+ZuVi0TRPjR41Ys3TcelGrypYnq/hz4Hazr79bX8P/xupR1XztD9x/KnnQ/OZSz+xEO4VdhW67nKvwt9c2kPkdt1lP6gyL1vWTJ/qYS4M3NX/2u/Dfj8MPDzqv02Y6vOPyFUFy/se/tzHX679bX+v+Ndn9e0IaG0GLyb7/HUzz5fNh/nZFe+QsKv8rdVn37Da36vwN5d26t2h7XEdfF93Gxxa1kSfSv3eUf9yuq+pax58OS56k/dNx4cjEwzVtWaW6U3da/mn/Ex6e4Fukd8E/7M8X1cZps0YJNj2Lj5DdRtlemh9DQ/qKhku6+bpxsCTH+W7VsjCa3/b38v8BQA45firfVh4IoBOYycQ9Q6YnCB44pJ8e/NbPfMiZ+PzeyKVdqJsNZmh5KUWf6p+U2HFRFbLy5s8TBzyNdsrbW/lbvmpAdyJ9u2n8jVxa21q0xTvKIui/bFnnjtkovIASlnktPa7e/nZ0TiI8elqhBDXlHfT8YHCjJ3zMYtsGkOctiIvy4NKS7IcWb4T/M/yjIkyON4+ZaCcHwq5cFrsyd/DKtND62t4gBxu4bXgOHSDPk8u/y+dYGQnXmTnUGmE0bX4LX4ZnwZoviMoD77k0WK/eScDPYGxEJ7uWzLvs6O2HrzpWQY3l5PiSZ48aZhJ0O4UVVric9T8Kpv/yHFq9YYBnzSpGjloPkxYb/KyE7GcXEs8G5lXd5Jc5tTEbGRf1MPyZtqAHPr1YmHkNFJd7KLC/kpmQndF+++Tp5GfrU+tr9m2McA+O537zGsCMbGfJ75IPvDF4YUbi68W3sx/GuNmkc1zAVTf4/uykkxGOeRRjmFXW6CVDyGoDFcpN9bUjssYL5RbluG4zahdU9spACFzIaD6hipXyzSf0NQydVXARH55RqaOZMUydVXQCCr3qvWlMJKh1jSZOeWIdGM+Tyd3l0HoUTjD6YD0CYL2d+fftZg8+zAAhlOPPCE9un/Xp/FDndEWHuQCHa/zwPYK5eybfjYeyuep+HuVh/o7lXYizs53D+IvDkxT4a9z8Sv3l+VjH5nNpD2k/Kv2uR1/9edZPLBw+vtqHfJveZ4Le9B/LU9MtfAW9uiG2Xf822w8N2f0oeYbppvVPNGO2TFAQPkdsOLSv0c4vvJrUBdCHgewPFR/C22JK+PJIlbckyO0o5x9F8HZeJXn84n4U+GT+U2lvUT66t852jefQ/OohG1tGx76d6IuxYI+kbYATlNpAJzP5bWPfPeQJ2tabB22lee5sAf91z53E+EtrIVN9eXYZ5RW9Yh0g+BJBnz469MJKQA98pHfGO7p+ZGOL0e12fxgJlNemryZlPO7EG6vJryd8an+xYVpdqF2Rj0MZRZRVxpQXvqiR5Pf1OI5ToQXPBl+bL6FD4It11fCIqk8a/lA1V+HV/JTcScW8XRvVEUGnD+nHefq4k1alg3nu08b8d+ZvqGoTOtHFOSL9qD0/Y52Kvw5LO971IfNYx1uF01poFpYC5ui2Gfy9SG7NmUPlm7Y54kmOgIPzgAJR88Ago36EYlfpI0AKk/IgJ1EeerX99nk6Xrq/pl8wzTFpwUzfQaE/T5o8lb5g+zizM9o8geMBk4AoskfWVbqjqBiseSFSV+kmEAp5YGeLpBUjtb5viJnfEtyrXR9EPlWHwC28RNIs4DQfBOPwMLUPVTqw7y06Erd+IO0nmXXEyjsiQeShQqHvoeIv4+X4oxO5S19V/HbZ35VHXvidSRpjgSg+bLKsZZv5JT8Lcr4uZzyA8fEL48Dczv5T/7Q72J9d40P/tJnIgexvhyvkD/nR+2UgDz1Ewa8o8mPxpP9Pp+D/MblwMhNAa65MmsaqBb26IXZ8MnfMh5qm56bpRu+JFOGe5gYHU8wfTZVqYWyjxPIIxAfiF91H/OiLr+zlsCTLM0FjjzxSr78vTVeCMccL72XBYQXvOTcaSd01pDQ7ctmUZR4fImjBhrOmCbn+ItV73nXD+IFRV0YjPF3xTJIpMWa4/PiJo7BKOvjbf24PtQ+fAM13yaunpNWxfLqE2Dkjxjn+CwJKqfPPSW9NaA6P5t2VLLToDffxK3zTunGMp3njUQIoXy03DVvlTiKH20CL+KzbHqP55/7CvzsD2/xM//dh3X9eyqvp/r3APCmGPNtSI7IKY3Dz3zTj+I968fw/HPPE9gvx10B1mH6Ua2eRXvndz7xlwHWzoVpHzPadT3XypkzL7awo4VdPP88Pvn//iw+/N+889rK+PA/fSc+8a734OL553enrYVXf7s07o5NRzHbyY5OmbKQFyplykLWhkzFdyY+THxn4j+s+ae4iItNT+8BNammhcocxVYLYepoQmW4p3DRLPCC6Dl/CWUgJYF0AsIRPyr/WG6uAZXbrTB0KzjiL8tbgKSuCdeg0EZZHlmrQ1oHgOWaQYpqO3UijEyNo5Z3Spv4NO1DskpyYcBIcgOgNFGsFQvcUP0mzZFUbqEdI3ALr83IYACb627TFxq+QquUwyxokM0At7HOT/hLHECZrpOp22w8OL4BLoDDz/39n8GvvvffwH3fG8CkrrooyvtoDlfjKcT81n/wtRi3H8J7Vr9S9qnR1IlTMlA1pjtrTlZzT+JQNhSUr1pwpsIrJAuTjTv5XM/bYcDm3oChQwVIHbrgPej4DyMdR0ZPbX4OT3/oV/GGv/zXry3/N37jt+LZ3/0tfOK975lPsksDat/zKdYj0o1fVaCOqSpgEP+acEAmSSQTlmpO8R+i8LTgsc8QhWdT2MOXP4ORvIuVvwJ69IJQmAZooU/pR3njAASgMnQbrLr8rIxRo5RYWRxpp6x5zAuGkglYQ5ZJOcmzPZsgV2lCrCzCSkrEIx+hZo0Hm7AIJLhug2ER5LHqNkku6e9ihaEbMmSkPPkzKql9jUmUTUn5ODBSiDJfAbDfS8vyGGK7bSKvsf0WEovriMRrDbSmdjK+UqIxSe8XQSZ8nDvXE4WMhTf7wWXVSqKxG217k9aW216ZGwtIn3Mfqd8ZmL3ZfBp/5e+e4C9+9ivB/VVtWoQXAV6/96aUh/DI4PV1b/51/M3Tr8MvvvvXCPQZQMc8qaP6WRaq7xMAz5pNA6ijTABMmOhUbzLv6HcXAM/mnot/879VN5G+tnB1BC63qGi2PD07w7eraLBm4neGRxu/s+/2KW+f533y2RUnzjf3CGjuU+dDZXRA/E+PG7zuP/puPP7Vr5+N/5nf/CA+9FPvgj9dwp8u8eQ//i787nt/En/8zNNF/Mde/fV4wzd8Cz715C/W+amB9c72M1SAv6yDxwXLN2y286nSYsra9TeZsMZswvImvPZeTGL897akV/EVUABNzJnyQidgwUGZCtJvWXQ9/HaJ9fYE6/TXB/Ag5SVfECmhsphROQpE9SXQEQ2M8v0pdvGVHXtfTtPF1+UnLiB0RuuQ+KOYuQ7Q8jhfx7jrFL6McmL+tK+XKcuazXrmIsuO/Zt0yzIYzYBHJiBv2w8rDJ3TMrB8sdkJqPCtTcHZZ01zlXkHgaassVJgmsE2UGpfeaNhNgfKXAvAfsg555/zLTWUHnL/zu9/5A/x+i/5worGaqrPePz2n4SmPvdT6c+v+MLH8exnno+gi+UXy58poxhTxpk+yz7OFQkUZeA5Z0Ypc0E8OYgAeCTG+Ron9/W/ZfJTISBOC6A3ZsLUPhbMEX9+S/wkHn2h5fL0nGqR4tCmwl7TsFXbBv17Z3nO8OpUnJQT87EPr9wWUfZuzky1Vz4Tdab4vsiT0+b4f/jJ38fjr/lkpZ1y2k8++b/i/f/k+/HU/7XBy9/4ZrzyrT3++NNP47d/+t34+C//QrWNX/yyV+D5Z54VbgxQ0pvFUA73swo4SlYA4v9IdMNmO1lkS+CQ/qpJM4R7BiI0yTDQsullB1gAmtuUHvm3BgdQZYDfVExLIcYA1w0IYGAJD97FAj6ChELzY/wsphYvKKDgSzOa1Kc3+QsJICh8p/JuP+fBCbkss0BYkKO0JbToxvy1X1MMT+YMdhBGWgwLEyVp51zRBi7x66sy1XxrPxiRD2gC4jZYYr0FgEHLtddl6Y8OQwMf6Z8W1PYAIlDjnpjAODJYSkB9u8T6fJnqwyCNzWsJwBZjg8vnfqY1rSLz3FepHVnOhdkzEvlMffT9n8C7vuNDWPZrLHuPHxz+GT74S1+EL4pmu/ejpELDF3PnsckjVJlnk5aL+2muh6+8B3y+jqK2+5ZxxDwa7YNKsc0QT6Wz+WwpHZvpLACwPlgVqiyJlwiL/TluZladjefM733z9iasrMtBvHYOq3sDNgtXxFtHsLo2MrQlXo+85sNqxPE+99TH8Zv/4kcAAG/5q9+GLxu+E1/6n387/uzf+wF88X/81/DYF7z68HK5T3WVeFuShLznzfzeNXkwdLNmuz4vhz5OtDKZpN/xEkjfa+CULozs486410Ar5RPTu54AF2tybkF6J+9H+g0kDYnSCijtFNJCw6YlGbYOcbKpOIErDQSfSpK8xaTEZhgFAnjBcInXxLHRcHC+YbcXe0bVrwlpUmZziQaVejDpW4dLbVxNK6Tzj5R25q6QbQZLtj3Mu57aQ7VJLLPPci/bLccLwMYXu/oCMnYgwFYxjUXe8nRuZa/bRpuB9ULuo4yq0xifvGMTYVEGx9UAWQOesi0ZNOV+lctXjvfkKyFjSnz6Pvr+X8D//K3/HJufeAqv/VOvRP9Vr8Rv/Mrn8P3/xY/D/+RTAIC34f2ZewLJnLc2MxK/hWmSgCDVoe4sznEdsBDZWQkhgX0LkKrPJNksQR1v9/K0e5lMG7lrdUqPPMd+7ha0aVCnsq6rvEs+Q42UvcjdNI97PH/6mS2ee/YZvPYdX4NX332HivPak6/Gq594x+6DA3Phe/nkRUpWluPSS260NGVu0QCKAYSEiEaG/8oC4glY1P76uBBZzY9/SNML4PL0W/6i9+HSvaRhcQDkW4ExTykjyTpoGSTEdSGNpwUeyNCNF/382QQPNw5hYlKdPqjzHfliBD81B7dYmW69xvqctUUOQWORJ1SPZVSPewJjtg4ESmXhUD4jtBADcTFmU4iOH/zPYpmUb7qcVGhL5csi2DsAK6y6yG8ndV5jfb4GFhus4OGxxJp5QDCZrrfrVD9Z4F23UjL222WURuYpSWaLCFhk5ERZLSQtCLAFjaNtv8CB1D3KpNsoUBdixRIWKwzw8Ns11r2Lzv1Cq+i34UP9+xzmozbTdTm9T33VA33kT2TXu8wzcey3J5B5I/V5AXXszza6NLkyEAn92gH4ADQQDvRP738Uz3z6c/iab+7wLd/9MgAO7/3U/4OP3P8j/PxP0cw9ImxsesNHzDv7O3LrZPCjQXwGU77Pz3KalPt99g+DIZ2fyAEddi9IgPZ9kd/WV0mA1GIoNDzL++tgulMmQsqjA9zWY7g3YLDpVFyH1T0N+vz5Gkubnzwnk47HyTlwsXBYLYCTc/vepndYLSrlWB+tfsBmgQr58JF7AMCAzT39dnl/ncxdLC/X5bhSd3m/Pg/aJ3dvwAr5d+Y9ymbrcSJfmti3HqQ53+8gQD3+s/86aF9f9iffdHj+mAjvAP01BOj+14W5rIBJFUvDMejmT9ulRRkQZ2ltmosTiSxuybQFFE6Uo1OXSYIACXqdT/YhitRDAZd0Qi6GoxfAI3GhfZBSGDLAifykuinNE9QpLMu/65EXcVnQJBzcwUyHGfPknUweUr8Un7ufU7lonxejEcEKwyIubMoXaMCq46nQw/fiTE1xtyHusOBFbwh+OdtljLdEXnx1+XzcXOqUeFcLhldmPeVfk9pb71bYt0Y77lYGM9dU3ncxbTfEukR/qB5qIQkLYuQn1TFrsjxqcvMBTMniXtFKSP6s2RlYHvBRrvX2E9+o7H+zSvGCbxe0/9Q2l+3g4c/F/yuA3/V5bEvlwJz7lviKuQ6qryZoul1HgOECz9vsHi05KAAAIABJREFUayb+eNyf0/1krE20oIhPvCotjdZgfvyjn8ZH3/dpvPyVjyfgBHh81Rf++/iWH3gZTr7+S2K6t5l7q5w5JQfVfzIP0h81+GGNpiyDSSOqzJY0B1iTNZAXE+N/pGjKiZh9X2rpGAjAkz9U4D4s6jl60u51Of2wGDDAYylpzz1WCwZsGRzcSXEyuLB6MU//A4Dbhq0kFOCuLKxUjtTjDpWT8usGbLpsVjs5Fz8nj5P7HmuEOm0WE/IQDs7XCez4bfYjm1ry/blAMgMXujhGkmx31cMaGwuJmTfOxLYcOnzsqY8DAF72ujdCt0PtqYA7RX5BnpW0BoCVOckYsOvazdMNm+0AmeihwAFpnBSIyZob1xsxC1DKIQl4BCATpzjKR7mxjkgTYdJ5JSDmEyjxBJIyOPEKDKU9ZSw3X3hJGid12ok0VYZ/AVBqiibn6aKjWwdX4xCcbcRQndWNURuw0KfKHKUZorYqLHzCwzr5GbBGJsU991JDYIyarjQdULzUjohaFpGD0/4+ZnGpSADQUqQ6xNryCSrjW2OnimyiiiROonxyD6Cj2suUJrVaJ1NIXvA8XOVE4YTcEHemvHsu/KKkBaQeUcbbJTwCCHG1vFP7DUqiSTOVzHlRE5QkJzwLaCGQsOWNAcstkxOzLwZlDgw85rYYugGAaL3kHclD3f9FfWHUbQpAmawteGZw9anf/2UAwFv+zOOp//EC8cWvfk1M9f7UphbMqD5kgI/qazKGyb+xunlRvmrWSOFS3oqqu32n3nn4+uklhMXXnrarAQHJSbQjSqvZa05T31JO1V7l5eLcvDzn8RV7wMJljVXHMuJy8rc/BZSpjVLkb6ByUtlUTuJ3EXhM2p+tD9qkLo+EAKxq8nBGyytzOIPFGG7NrgICF7p24RQtyXtHPZzU3poP02+f3oV4xIM1wcdN0Ks+72UAgD/89FMpvVNxuAxv0pf5lWXU2rWWh0PeKB+Xbva0nUxKI7IvU1x4EnBQQIrNKBSPFw3R7MgCZ3ai6tQaqb8TYCNgdYz0Vf7J6TuZ/iLQ4KUz7WYptLiHhx1a064kLujqtBZyupoZQJlcPS3uToMN9gvqHRAHeIoLaBW/+BolHwuRB1NeXMrPZIjMdTx76SE7X6vFr3bJYsovy0ydkBozQPLbXI+0GEZgxbw6+DSJ5/JYbrkGWR6k2SDtQ560+CoFB789iWYOrydr5atl2w9k7mQtIQMinzRtXB/5X0yIpXw0D35r22aIMpTdeFwmk0YLGQSLeUBtFjKlPmtOQRYHKgDVFxyAz37mufDiMzGOPayQTtu9LedFms4s2/hOmag1X2kDJ3ESyGfQRmWzRjWNwVhW5foPAMb3RL8vlpwdp+08xdNpUF/EivEZF/3OJadpnZ8LWuetqUkH+HOT/5bTmnpvfdQ+hV6k2oNBDZcjICaWE3ujrcBMqHk/VmJF7QnfiJ+0ecVm2EdedC8YwHxfQl613/s80++Xv+GLAQCf+civXyr95O8iLNaKNaWq3WOcPQ4mPGi6WZ8nnmyUv0kMJdNVCieTmWh4tP8KAxNkLZFyNkcuA+RDRZxxueihTHWFyU9MgQngIJkVEwAyJkhl8ku+LgJ/tBzEBMlyUBNx4FjVPTvYZrNlFZvLyT4CWA682EmNRFtFfloy+Y+OTj94QJw1txRHJnz21RkR0nUhbm3nrTRORuMHCO+2ThWZGHnJ/8Jh8k1h3yjrfB/rpCm271ZkZutBMXsD0vhEYS/lAQDd9WKINYsWUnp1OlLaLD73pv3SGLF8RnC5XSKY84bkv5Q0giPqx4aFs+T3kMcxy0eDHdGXeYj2y1vZdQOSh0zlFJX1BVObiD5LQvqf1Ujpe9g8PvKxDsAHgc/PYY5k9dv4FzH8/fB4fQRYUUY09iRt6mejy75Oqm/V+PWAHSu97v8alMWy4vymfEW21Bsn/JcSbaE0nCmdjbtF1eepoMqi5s/XWC6Cn+PmXihrac1X9E6TdloQXvQ9VSFsfQ4MCwe38Hpjsmc5UoZomYYOCfitIjhhPsRHaZaS2XEC6MLrI/7StzsfQF/U8KwtMDtEXvv6v82keVXn8Ns//W587F/9PF79Z/48Pv+L35reffLJfwUAwWl8qsxdv21YeleCzrSG1le4G6ObBU+9Vwud0rQk/yezWCe/IpooyQcqLQ3kQxTAjACkHF+ZDY1jsDaZOWWqKwCU4ScsQIZ/cuBWJkie8Pogk1q5OV+a+CUdgxNorYhTaWLMcQ10A4AhlkOgQviIAzUPchnEDm4bTCiO6prbFMC5h19IXNpp9wLDKC6gB430gU74R24rZerMvBJkJX6lpMCrZ8BMbe8K4KLBWQEQOwdsJU8Cb+xoS87nGF08DSU55zZJXCoTNJBMo7ZPTfGVHFntoscAidsvvjXmT+E9m5SXWG+DHIdugOtW0RcptllX8sMtoeTbcZtIvDU8VnCdC46g6RADya4TebDZrvybAUze+IBrR/0vyWh0AD6gTG0OwNufeCl+HMBv/MrnVHtJec/96uelPATYJNCj/C8+kNJwHrqnie9TyZ/E4LYycNnIwmVtlTWTVlLpxcvRxsBuMkxcOAz3ggaEnZld57TvUpQNkimWeDhf4wSA71xw7r7nyPEahSlvNxHgFk32do31NmwA+BNBElvKucPmwQqJJAYyGfot3Xk1KY8Bq6qTubSrIZFVRwAqbnaGhQPuZ9Ct77dyu+XFTvzq0AvlYgCM75D919IBniCPx1/1Grz+m74ev/tjP4H3/5Pvx8vf+Ga85LWfj6c/8Bt47tlngK/9drx6CsCDeIHpi+wUrkBkBRp1yO4wJUS8cbpZh3GrIRCgQ07a5YeDDWCCgBQNjDifIlxOgkFMg3ny06fgQHlROE+WnD9AaQz/4j9FEyUQFvKQLi7GAka4vqQZk/h5oozPaRHJZSYtQaKsURHTSDa1+cynytOjKGu0caIMZYFjlbQc2Ta75BA3xuOdrllgxNSYckw+WwSelQlR5AZFajFlsKJ8VGjRlnqNOl7gIfdBnry47DTNpEWXeZU6UB2VjJH6CPu58O5ZJnTf++quTJv36B37uQHKB06AUeIrtTOf9pkCL9QPlM9LLCmdouF+iuQvl+qm5Fjrc7pf52faMFjfH1D/G6fjientTW/7MF77p16JZz79OfzWdz2fynn6U5/Fu/7Gp7D5if9PJzTaLM9aDnIQ1+9jG5DPVu4HUOmjBFLt1QeNVb+vLjEa2FsfFiGlUaUYE5olF9OoU2A1LbCAyQlNh9t6LM85JLsUVPlkh3HR3BqeeVYUDc1gTISOyuGjLhwn/A1mMWvC1DxPySNyUwGyqY/widoJ09M6ljUgmujOo0ky5sbymoQP7MSPWA77JtUAd8V8lv2kHL7o7f8J3vJXvw0vf+Ob8cy/+Qg+/b4Rj7/qtfiir/1P8fav+HcLwK3b04BzAr72zZQ/VQJgNQvBEehGHcbzEi/AgLU0DIDYj4gBEGulGIhkf6lkJkNGqBJfaasEBBEwUtoqDu9zfJW/unrA8I9snMgaEFOvA8JFhnqqiCRaFQFikIUwSmEMi1gAMHFn1uuJ2Jkjyz75qDgMi7T/gk+3WGeNgQeSE3KIS6a/FDfmIHlKudFxXUOMvDO0N057qqPwVCxATMnXhCkv5rmMXO9iUBpn42QmgciM9C+ji5oqhJN4cnhBHe2XBdTKLdcf2GBYWBBpNUc0iVTMe37rgwksOmEH8OWK9pNTeUOalAblQA6Y+pp7nlyUf+bVlz5Pice8s86eRVm+us8JoPXAYoOhi7oYo1llyo79FZACj9e9+ZX4+Y/+34kfSf/N3/mVAICf+LFfxnd93f+O9w6/h//6q34Yv/LkR/Hm/uW5AAPcEMf/05/6HB572R3Ubh+3n2GxfUb3P5IX5S99XEEGBi9TJ+isqcTKy/oPVUFPBXh05cWUmZwCI+x4HtpWxw7O3lqL5YHgwM7aOeWgThsHzmzrwzUqKmy6HFA5EjeVbT9Vk/iek4fTACnlJ4CLyp0yPYmpPaZZb6GcvbkeaoQtwid21JiYPIHpVZzHXv06/NGnPj6TJsR/9RPvwJf9g++EO13Bna7wZcN34vV/4y/hJS9/halEAGvPPfs07rz0scIpvRi53UQfrPVHo1U8Ft2g2S6arca0h86DXzQvVbMWtK/GWInPd71M5KP8jjg+5z9VLvkQec7fAhhrEmI+SU2fAZIx702EZ60USEtCINTIWU24INUvwidIIN9EU+nW6eg5Up5rrLeO/GAibZcBDinfqiXW2xVcWqw5LrJcY7yhW4WJp0P4VEz0+1EHCRLIye0lS2b2rcrADhGwqffGt45Tl2aVypDccjiZSQufHgHyQW7LrcNKZGHqKG0dwPkS6+0Q5WbbY52BUNWHzZQN0FiRPKT91C07YQJOPjZi7mCAF01n0hZcX9lgIIAd1w1xQQwAPZ0oU5wKWAhXJaRnOjDBPKt+JKa88zXxDOW/J6a78FuPz2zudfjyL/8pfOB/+ixOTj+HV3yhvHN44qs9/qv/9uvw4z/+f+Aj4zP42G99FCdf+SX4+u94HB9635/AR8b/E+/H2/DWXoARUn96+lPP4YfeucE7/oO3UrnWtzBLJGyMuP/ZsZQ3cmob2JueyiaMnT4tDvLpC5WL8R9K741JZ33fw93L5ioAWJ77ACiMuUWDMR/Gwj19tUlhsrsfjvkrP54p09SEz1PKffTpvUq9Vznx3qgeuuxuwGYxYLMI1yhMy8MAMwRN2yaaKlPdOxTacpbPeuvCXXjiCK8AxUw97huTZK1fVPzZ/p0//eX4vZ/5MbzhG74FL95aIGRozgmc8n7OP43f/5c/ii/o37EbEO3qv4pnl11njkh3Li4uLm6ioJO7oniMqPnUFc7hPAnKzi1pguKOlx1AtaYo+wnU4rPPSPL94Pw4nPjQOWZNGahc9ltSfPHkrgDRnnyMSM/L0yV4URhOY8dh3y0rn7RgZbmnqxjgUPWxoUldtH0pL6lrXtKVXO0X7FHwUcqzFr/Kq5G9xFmfEsAFMJyuVd0m/aMoH9034lJXA8mTPkmYCKuFT9XV5G1OmfIN6MVSXDvlKU+qTtS29H+tnbjM3XW1ZVNZauOj66TGG6LJtK+38+6/Fb7MeAA8nn/uK/Ce1U/jF9/9b/E9//ItJd+mZ2sZl2V4AD/6Nf8M7/jLb8U3/h3gRS8+Kftc0ZZ13mtjIo87Xfb6NGrmeMMnxAvNlLPwLofdubDrpH3yf8A8qBEbTwUWF1UC4aJPAlp2pF8LVeqq+Svfz1PsS3RCz/LsAfy555/HJ977Hnzqfb+It/7t7wnx5nyiaryZsF//we/EF/TvwGv/w2/EL73oRfV47JtVqWceEzHsXI+PzVnVCHsjdPOXZKbdE4GTaPYCkE+soeL3Q35LKS6i7kCAE5n5OL6IO/lksMYnhotpLvCGlFfS9pC/lNQhTfbia2L9sWKYP72Dk7snWJ5Gc0kPKjsDCh3O3am2UDAvPmm2ym9m0Q6MF1leyNL7XFYyN4gcjPYHvLyw5k/KUyYUTmt8RkzdmFd+p7SViTJwUvJJvNJCxKY9MefwVQU97dqMucdTnopvlq86Vp5qkesTZeKrddXx1L1JtHCqb8RNpE3fjzR1Uh/M5WPxpp1yXVm2UhcCBCa9aldpOwLfqu2MJi30L93OnvJmp3Ilpzxawm/Vxrm/SdpffvEv4xv+1l/C9/zDt5R9S/gk85in+gCmLWOK7/v5b8Y3fccdvOjFJ4Gj0bYt1YF97tJYsW3L+eeyQWmS1OyllMD0Ln/ShLM7zNb7KqTy4vynzICscZg77bfrJOCcP5c8Fw7aOZadea4dOAHV9nLmvTfvq8TH+ilfV4njANx54kV4zV/4Brz1b38PUl3nfKKYt8722fD+S7/9+/C6v/hNuPMEAafOxGMzJruOJH6pt9Cp3gck/YPoZi/JjBNTWviBZPdN1wQAWvOhQEGcgM7v4KS/g5P+BMu76+RbwJoStVNPKj6XVd19XiqcTMhkGlTgR4AZ7/4knzHzxnGc4h+lAx87o/MixX5gaUKN5VYcNDnEU7y0tAqIKBxuaWFh0IAst1RXA2Lsx3ZtXnkBpCz54kvjIOvoOS3ixWWWIL8avfuACrP8U33NHUEZEBLAorwdlavlnIG0ApQMctj0yLInbQuQ2yXV1QJOTkvlpTIpdf6uIKqkgAT1Jc8yMHXlccTAIIMAA5AqTu4amOm2Y4AUxrCRf2/ykr6a+p0Bb0YDxz1GAbae5R2JD34wD9R3RSPMaXJvYoDEWjWQXMuNQOYptQgYmOdQmP5KqQvTmTO/oUEI05zvkjyZe552Pk/RhGN4sVjX/LPgS63MDCAsAJ8FghXepf8l85uAhOhftK6B1Ql+q+/2kZHh16ZzlfcF+JwCvRWAAjitqbSpimtKnAHoFWhdyxtAvlyY3qs0tuwya6S15Toh/eF0s5dkmmETwIZMbjLxQTuCp8kLccJbp9MIABBusZYFQcIY2MT8ZeLsNSjJkyrFl7iUT1g4aIkmrZnkH3inibaXnXEpA+ZH5OBSHQnc9TQZMpBKi13G5vxJB4YUiPnaBUkBEQVueCfO+bEGQfsNkR6kXEzg82KV2jzLQUpQn9pgoCgAk00wapfOYIAWp1HzW1us8+JWXxxV+tHWm4evzZtlb4FgjTe9OBbgV7URSlAFA5qBqIXRvHuVlvkqF2elmUnAjEsA1SkDSCuHLE8BX9KGJYBVfj6jrYvtH2V/4npXwaiSdp5/AOjNEZM9XWe0gmGu0LKEBcXkm6XqYTRJnvMx2jcG3XmjEnm2iys54JZgxetTbLTQKTDCLW20WWVaTC72kwBnBkj4XRqYSNaMVMSvlFEDH3xVwMk5kqP7JjqOrzqP9X0y5U35/dQA2T6awAle90qHCviUcBu3BlCm2jmF0fsauCr44paaKMtqxqaoWm+Zo2s94uboZsETH0WWqVt2kLR4s6O0AhSAAVII8be8XDuwKQ1AvkoApCnCGif9HSz7JdZsMuRFSsyCsqD1WiUvWrPklwCk03g53ArBQyZCWbw87fbz5Kmf9aQuIc6YmrgcAXw84TIPJcDIJpcYTTRnBNxSWea6AGmv/KzL0JzJgq73x1rjRHXuK4tfz/ViYOmq6fJbBnygPkbAhk2wzHgCwzluUZdecaWJtRQ13hhkkCwYSKUUDGCBtAhzmXwHUf6UEIw2J/OW25baYLTyrgFYDQK4DAFkGUhyuKkfSD4GSBXXDHA8w4O6e4nLLvqk9k1SpjuuO4G9VHYCoNJnDEDvS7mEemhwZi/YtJfAak2Yjqs+FH7AYi7va6DKzWkzFF8FN/XyMQFwVBxX8Oeq8YgqwCPNhPuAFZNe8b8tb1qXb9rV+NBpGVjS3GieAJSmwS33/5JH1OpXi1eA1TrIqLWvqktXCZvR3um7qFDKhvPb1T9r/c7mvwt4PWA66j1PDkg7yGxGI6CUdl4ZrPj4vS23WKUjr/6czQomD4hvlXmu2LTTvp7AmqMdu0xk+z+j8EcJoU7lzR//daQ94FNyvLBnAEY0MhijEgkIpImdgU+vB7fSK9DEru/WyLtvpRGoLJICOhF54MXIflJD+kGKVQOepE3UpEGiklkFGCT5KS0c56YXrQxwaIFkUNibtKQZS/Xoa7zXTGE5PgMpBm65TWMe6dCArTvVNcfW/Bn+Sz86n9Nx+VxnY/5M4awFQuZRgTzix8X2yn2R+kOq8y5gotOlWafiqyj8pPCRAJU5fKEvh9VyDbxVAFkNICotWr2fe7PpCfnb+iHHmfUXyotuOXJcPa2Y1xT4cEWclEvNtFMjMhvZ0Trt21QBExYUbFOvrPJRzPkGVDl6nqMaSJyuuYk99akckp2STOHfNAEWjCxKfy1fBSZl+9ZBq6M0Be/WHcWkn8yPwrx5n9tqAkT2Zj45Et28z1N8zpBAT5raeTJSL4vZOn2/x3UOTu4fGoPfU9YWSR5rLMU3qr+Dkz58NBWnd3Bymk//re8GHyp/7uh3dO5OJpc11jGv9bnPk31RRnj2I5Q2QH2UUerdW4nw5A2lecq7/ZSjFi/50vAJP4mt74KiBTDlSwuNMluZxddoOIqdc+IxL3l6MSrzYOdstajWHLRrJ5WURjM+KVBXA2facduxJqFw9iYNXq9lygu88rPqKa3SkpG0EqiCSu+IBwajuWQN/ChHA3ZQAjLyI0x9kcEDtVehZWPtkgVfQOELl8vXfczeZVQHbxqE8HcHU2urQwqmHao+frptE/d9jY+p/pG4BINQ5eNXgD89rpxqE+of6sAKy8yY8Rg4CWieNevYOnMcv5fzeFjQ/HQc884X9/pwGg1qq/xuzW8qrwBdNeL6KVBQgTpTJskpwHGI07qpezVvK9cJ/6YCenV1WaRZdKrsnZpAVXI9TVFPN/NuoryiJGd+G1At+bD7whHpZq8q6JHMc6vT+BnGdJwf4F2tM5NE8FNaxnxWWJ0NEdCEe4Tc6SY6+ckktMTybkXRulhhhWVxYywADGcXQTN19ySY8hYbbE5lil3Dm7I8KG6FhrMLDPEkob/PeealQR/HV7UleXis5WoCWiR2I++5OFdNf0h8u1O2cebe71uHQ+Qyx/Ou9FfZ8Vx1t/Qgdlu72mau/Mvwc0h5l6VdfezQ9A+6Tx1KDzr/W0I9tPP+XvRCkd0+c+2DyP+64l82D9kQAxg9Nmc7vy74wOiGzXZZkyFh+QO9rCnhXW3eea3vi8lOsPUAp0x3ITX6NbwAp36F1XiBzXiR7oRwpxd0P4TDcHaBzbjB0Ne1RAR1VH0cggYspJd/IR8gfPco7CIxofb0yo9G1Pl65w84lpnZtVs+9V83837u3SFxbHm78rETl01ny9yjDgw++6vyvCv9PnK/THtchtfr+Dsn+138X4afQ8q7TjkdwuuhfWLf9FepU63f0+/i9JHXYb35W8u3Ft+mqcaZCd+rjD3Ca+9Hk/8+dWRZsalZmaqn0vJf6HR9LXyCv6k87ftZ+biJOG4P+e/zTPmosjk/fnYT4bZOprze1KmQp5WlKEjk+Xh0w2Y7QE8mQRAJIiRThSMzUnwefT5Fwx+eTL4e6+xLMGZRD/cGJFt4v8LmdOpSLQEsIJsxgxh9yiipGE9XcOnqhGjWG3UcAMbZLUGnGCuEZb8RGN8er8JTWK/lWL0DCVDOrPo3PUtaBc6obDXRTJVbeacm9omJqyjPvqvxTnmlQwA+m3CYD3tflvrNdZ+oRzFZVepY+z0FdCfztnWrpbdtbHkw4bWyzGmxcuIzean+MtWmlfxrfcbKfledqv1uIm+1cFi+gGrfrJZLPM317Ro/tfLVAjBRp2IczrQf4pzBC8hYaRf1XhYdUy/uo7X46Vni1OaiWrjl3Zbhd6fn+LVy7Vw12vc1mZKsjK9bIYfe/OU8bbpU54k8Cr5QtgHLw/6uyq0mw5lyinpOPZu8leztPM5jwMigJjfVnjBt5suyue1nNws3T0e6JFNAEgOJ3BDavg9g4dKHbYHsk3TS38GSzFnpo6MgLVTKkeJN8QarX/IUVuqeAI/16UnVBKjzZcr1d4B2gKU0fGzfLjb8rSsAeVI1jqhFh01gg/iY6sy9r+SxR7nkxJwnq5ny7OJiF1k1ECd4B/UlNXlJGlOn6kRXq6OJz/JRk4T4IJmBbS/vnONtsm5m4bftUgUIOdzV6lGUYydE+VubPEvZFwtIrY1t/5oC0xboHDLZMw+TfB0iv7JPONVvdtSv6E+6TsoHyy7o6nfZbz3zDD4wwPMFoHfwFG5lk+JUZASZc0RW+8S1vFMZKZ8abyRfCzoBBN+/2jg2/FTnNjPnTJTnLM9jJc++Vg9TZ+4DyY+vku9sG4hyQXgz4zvGqcrUzGuuiFPp9yk/7kN2HqqN8Tl5V+Yz2PbmMl29b/Hhk4dA83TEz7PYb5LZZ/o9eqzPT/T9TjUSXyjxjUL2O0odW57Pl9Fp3GEYN/H7RKGx1ne1f1LgaY1lvwwciX9VUU7Io0wPrE8j/wut/cr+TVz/LIfw5AkkBtqcL3cI49Ggk8UK6vMs22VwXtwif8rAftKg9vmKy/6t0Vzcq5QzVeZ11m/fulzX333qcRNlH1L+TfC24++60xr0Nh80ehQozPdIm4RH5/MshYlAAyd9I7B5F4GT9i+K/05l17VOn2URka7vr5FhxxInd6OGh/2azhkNu+zzdC4XcAL+dAkNX5gY/PAR6lyHfNeJS7VDFTjlPYLaLahdQaNMGTgFcuVCZ09rbM0zhx3yW6gzf6fSzOUHCre/bb5T5U3V75B62fwPlceh8tynHvvI7FDebD0PKf+Q+l1FfnPx2nzQ6FGlymeMjkE36/NkzUpGjaeXQAJSyWQ30AV4+a/vhmy82foQLwGqJZbRxCdaonw9QqD1aTQBRhCVrkDAOl0/UDXN0UWd2ZTIIMulyVZPcfaeCtY4kdq4UHNaoNBIm3pjmAUsNgzYf/G2v22aQwDBXP6XecflTaXZt16H5l/7fRl5Ml1FLofyNtVu+5Y/le6yv6f+TsWTbdXBp80aNbrlNHH1y03Tzfs8VTRP3vzWzz5f+mgdnyXH3pkLMwG32FRUegNWZ0NI38sz5SOO6P0Kq1NdxnB2gdXC1mfA6myluVFpfTn5VuUgb+qgsuZX0ChSBOR8t08VONQ0K1Pv9tVUzaXdJ4+rvtsHJM6F7wMO9ylzUjtyQLm70u0CMbvKmuJxn3pOvd8nn0OAau3vLKD1+jNGjRq94KnmP3UcuvF7nkTltjobzM5zyveppqKbed95hGvx59JNqf3m1IF7pinMCrX3+/KWn5enyxzeA5vT5uMAlDbwAWu9UE79xZ7vpp73TYuZPGo09W4i3APBzGze+Q7lZzEOqedU/Dl+DpHJoe2wb/zL1InC3vVTH8LmD57Cd39Jhzd93sv2k8FQ9+g8AAAdXUlEQVS+fcW8T210ib6qfZ48NufVj4c0avSCokfX54k95NOtqqWGxU9pXkyYr73fsvYGJu3cu6mydr0zec3UK2uianmhEj8+d4bn26qqP9sAF5vrz5dt4BVNg7fhgF6YapqPmsbH5AsgX2uxRf4A6hbAvQ2GJzcB2JiF1zMP9u+chiM+e3rniD8VbvLxNZ4rfKfeOKNJUvKs8V7Tklged7XDnMZlHy3NjKbHmzjchs8+9xwA4LPPP1dvZ6m/7Vuc/xSfnc7HTdV7V1/tAO3z1LRPjR4dco+kz1MVdJRxspO0jWt9pBhk1Mow77opYDNX1tS7kp9d9arltdNUB2cA2TV3mtMNcHFR/hsfBKJ/QB2ezZmVxdxRWAIHW+iFEADurTA8ucn/zvKzW6BcbAW8xHANWHzmaavLcbWF3MbjRdssxkpbscg8r86I73uunsbIw/Kd6mN4ZkAgcnETACLFlXCKz3xwOlXWVHgNwBjeCmBjANXzFxf4g8/7JN75Ox+u9o8/INZRk9lWA1bXAe/8nQ/jPb/4CTwflfhuRhYsXylzsg4w/UP1A/HpPHRMufp4v5HxfyDx3HQ687727mGh8SJsGBe7oz4s9McA/v5jj+HkFa+4tjxPXvEKfO/jj+OPryEvz9daHJFuEDxFF+3LXo7I7zsTt6ulr2iXtq6SLoZ1FfNf7R0DsJTePFd5q5skXQqbSJ+eRX4PqMPcvwPcif/uroFuBVy7SvQB8V7RPCntDoETBRzMQpjS3j/B+onwbxn/+jG/LxbAycXNJSd2NxGfwZfVJPFvb+JZExLzvL67hl+sMDy5qvBV+Vurj+GFTYCiH1Z1q8UV4GHiq/wr7TAHVFI4g+OKvGqy9Fvg5/7tU/jVjzyNv/6aN1Tr/QUkEqUpsrKitN/6mjfitz73LP63l36iyJOBVFUbVpGvqtucVhK4xHzg8zi/cwe4E83/50sd3j9kZsBF5TMct0ED/xDcR3Qo/eBjj+EnX/IS/PCzz15bnj/y7LP4pRe9CN/7+ONXzMlXDp4dh27UbOeBfHFWx2ACqAOqCWCyNUBkW0lfgJYKKNtS+VvznsuoPoMmVNYOmbJ5lzgJopimAViQH7+/BhrNXyBMpPFOqluxY1IXGyIvnMAEsIE2tXQViVY0DkU+c4ui5DjuEd+aayplWJDnbV7qmT6gvSjLtkBsWrtR4bkDcPcE6yeW6foQ0YC5J6PGy2iKvI0/BzxrcpgBHar+c+8jENv80afxV179Ovz22x+vl89kwfYE7697+2P4lte8Ab/x4U/V86xpEe3vqfrPbQbgr+/0bb87yo2TjJ1zD3RDqWHqTbxj0WIVNGBnlba4G8HorjsKHyL6kZe+FD/w2c+ie/75yTi/AOCbAbwawJ34928B+NBE/D/9/PN412c/ix966UuvyB0rYI5LN2u240pvawCBzS81s9Wc6c9rQGYBldIezWt46mG18AleCofwGS1Vof2Kv1X953i6IvXmL/N8ZLXo3mRvZ54AKhZwsFbB2cVzYnG3ZjQGHtrXpWJKnAE/Rb47QEzhL2N5jmPGyZjgutZ+24XeArg5sAMAI3kq1rQmpoxJTdqExjCR1brNgS8uK777/af/EK9/6eOlT5gBNu8H8OSr/gDf989/FcvR4++8+1/jH/7Ob+LpXwmGB2tOfMUTL8azzz+vQanhm7ViwIQ20vQDIWfbI4Renyb62ACkRklTGbVg9yb8JR8a4Hf8Bf066AMvetEscFoB+AsANgD+SwD/I4A/D+AfAzgB8ImJdH/i4gJ/cOfO1Rksbqs/Dh3BYZw1T1MACUYTVDOfzWmSKvE5P17cCiBT48/rCWsfXtJzpX7M55T2i58ZSN3YN31iHeyO6cz6SE1MaNaX6sxprd11Us3nqbLbd2aBqoGGRBNaD9cBWGRfqNWTGwxPhusqSp8nAi5dMNI65Z+0yvmmMgY49l8Sf6sJjVmVZ3r229x3fTdon64nNxjENGsWZfb3Gs6M+e9e8KtyESC4s5yPSz5YqyyPKK8k77MgtzRKUtmh7sPZEIHHkOSVZNFLXXI9FYCtgKnCzFoBWgpQAnjf7/4W/pdf+CCeefFz6D//lcCLge0nP4b/4aO/hqef++PStFYBQVWTm4ljNYF+Irya/lo0TzH9FAA5kzE+0FgezDtD49S8MJS+Vfv4K41r4H4cT6zdmQR8e5ZTzGXybzUTx9TrjOQhGiiOI+lFgz/pAzVUfM0uKa8HTO9DAEwfBPDfA/i7AN4D4D8D8BSAn37QDExcWXTTdIQPA8eKz5ragKqfkTKfUZ4WiEk+arH2ZRlVUGP543e+nveW866BqzktmfBc0WglrQHV+7pt6DWz3Wkc3PaTD+MFgLXxl3B6spH09xxwfhLjnQD9Brh3fWxrvgxQEerMorhL08Fk3stC5hYbDP06+xc9sY6gaGUWOmNK3ALDvRUcKO19ZPDSAegGDE8OcFuO4+HubeBEdqzRsIBP1SWWL75aXbzXbGt474bsGxU1Xm6KT1WeaLaiWe5uvMj2fBnTLbWzNG9c0mW0TrfRIl6Ne76G2yELBsIMggWwBEDmynbm56QBk3Gao3zk2Wfw19wX43vf/AS+/c1fiu/9957Am1/2WnzsM5/DLzz1cQKbruhzCfTYcqgM1uxxH636e01o8K7HB1LMy1NzShxbF6vsF6k+tVWhGqhZrEIeW/KtuuuBezOAIH1DE8DpEmIeTlQz2+1bztkGWLhsVpM6nS+zH9g+891dTkvzXSJemwCcRnkujLzlk12n68vL64boHyEAJkt/Lv79zQfNwKN32s7TRxcdTQQ1jRBQ9TOqmrUI6HTmt00DGKBTAVRTmqiaf1WhYaBBUmiRDC9TYIzLt3HSLvEaNU8yAZ3R7uZeLP/clNPzxAkA66iZcrRzjRPDdh0GOyLvPU8o10w9nbyc8h0B8iJG4amGbCK5Z7QzT67C5362CI7Zd9dUzhr+PLSNHs7GlNgNAZDyQjsuQ15CpxHccP7jOmh3+qwhUhozJgnrV+HS2PN1AhHuTOfthfe4o3dxJ+y2O/ic0KpJ/RkkZLMVbyQiX0DYQXIbLaTfAe50ALDW5cZ0rjeaMAuGEyAzfaIiK47ntwA+P4R/25vegld89rUp7q+9+CX42reHl/4Pn8plxfHJ/Wg42xR9r+pUXwNFNTDP/Zbi+Gv1edoxp5yf7L9o17RYi9CeyhFdNmc1Z/AiH5mPXOlbxPEWoZ/Pl+NC3xPfTolzDg1q9prviOxmRvhmgH0eQWBvDuN0ue/vX4/j0vsQTHjvjP9+9qYK5vn+iHTjn2fxBhg4AiOu0DzFd0rzZLVQNdBUAyWszTEAi32UlCaKJ30Cc2rHOQfEapozA4aUhosAE2RXnuPor4lfM/Fpuzt3gHMXANWuCVOBUaCQQSIfVe8PgEb5LiLKhQaYvFqgMK0Ip3xyTbQoMU5Nq6C1hZKLCdvGibcb6qay7aD9rlL+8VuJnSvNjBTf3aOrFe65UIdzO1ao3pKHAJLFEPOTS0an+KS68ek56K9RpjKsLCTueayTrf/5Gh6DAQmczpcaRTUOKZxBc02zaDU5HYDPhOdXvfSx4nTfWz4Rjm5/5KlnMl+drWvQxEm+forHDrldUhxn+qzTvFm+r0XzFGmXz9MhY7fIKwJyuxkTQNTtmM8kP6t9miqnNvfUyrEAqMcEACKqAfLU3yr1UHN+5GW0vAxG039gPW6Y3gfg6wD8WQB/E8Dfi/9+8kZK9xpHHJFu/vMs0WncRQDiCYzIhOEI9DiaZByZuxyBkAzAACcmsi3rAjxcmqg8lZ/Lzr95kXIxv5BOSs7P4Z0qn8CE+//bu54fOY4q/G1sksAqiRKsKIhEASsWoseKImojfghxQkTiApfdC7cIzZ6ROCAFyWtBogSQInHb5YfEgQMecYA/AP4AXAciz4AUxYlESCCJI4FsmXUSm0PVq3r16lXPjD07O/a+T7J7p7v61avq7qqvv3pVTQ/apFyTKqXvmJ2O+csInmfl9OmNZ4E3jTZsBwBbG7VMDpQKFVepWnY5ugO64UXME+/Ay99SLYE+rAMUnZzsJN2OUKZ4/EKyXasze0RoUuzRrpKvEpdEnx4SvvOAZEn4/CiXyVMdjMVHsZlihLHP6bd6/ORlG/N97BWgIqRCeQLyR7epA9sMz0ggR3QthyyujOqCPXnFNfIVoeS/i6UWWteeKU/4DMoyT4ATd7Np1pLAyfslHnOaj/GY77w4x5f3bOdrHzlxBxY3+7YZdC07f3lsDlspJoj9k8NXvfaE+tSabddNyyd0wOiG7P6Lw+5jsUSD2t4Jn4v7XPoilCcgl4FeTGnITpLLqeU4HHwXgSi9BOA1ADfiv5eWkjvvAw+3Lpa/SCZ9i2wC5cYiCTyrFz4pMXH/hKWJJCQ0pCS95wpN38zrXPF3SIcibz/JvoROStoTRC/Z8mX+BfHJDVtRJqaKeaZi8QbUpfSuUOcWHvPUO9sOyG85Lj68PscJrK3lhqCyq/l4QDe8FvOkqhC5E2gtuJjAhkhyJxkCvlOcD5EVWQdSjYr7XAfg7HZaiwlwGF4YJt9CPttC9crql1SdiprUhoDi1tG9RR+yLjpkn44V52p+amWTChGgzAQUylOH8HxNkD7C7eKQnZ+Akdy9/rpg+bSWlKgCyLl/cX9x/aPyhDfYsZj+vcf383lSueK+cHut/R2acVsaYaoUz1C6xU3bbilPfUoMD1OYxZZcSyr9awzpa3Z2NvIyKjwmaq58XK63CywIfrTNQg362jtR7l61SiGfo6zuAghtLCnTc5Vj+fgrwiy7DYS4p5NL9+AA+sCbxFIXycyNqFMUpfBXVmsEgWBStSNC1SUak9Ud6oiIlKRGKuStntt5pvowhYn+jsQoD6F5NsOGOiRu37G3R1ZGSl8pVSwPTsiIwEklYylyJVfq2O+RfMgzWUxQGxPXIFQLgLLOE1ddNJ9aCzEWqIZ2qE62xdR2QSZCzjm91rFP9rBHazEV5EOco3XQ0hYUwjLh58g3WrbdHAbvqUFnNrz0MxmoVTU6nnIq/Bd1QaRtTEpvfM7Tm3ePv43yq9dbqpCaTTHkRsrT/65/VJGaP71yBQAweOT+Ms/qPoFy76AiWa1jlXIn71F+341vtS2IpW+pRZpyko7FbTFzzLXXhpv3+W8pS6NA6HHuJvOhIWoZqlDEN5EapLV3KOsjXadZXxijgrY5RBqik4rXLOU4BNwXt6+hXJLgEpa1lNUBjL7cJJYeME4dTaEodS4TBcbUs8wt03OViKdnahSln/ChL6H0sNlsnj0AnsUo+bhN/nTCvtzfiXzJfzajpxiKI3I14fWSSWNSJIpGboEPVWvY7tz52KHROHwsyyYLdNxkU25Tg76XF7VLgZ0OGJ9vSP8LgLbOU/RJXc9HpCm2EL+FSlO8MU4QPo+yCdSNqlRb4qy21EkOY5A2aaI+BG/zWCM698x5DEnal0qJUH2KKfxM0S1sp/1DuDPhrTfEIAU/3YXd/CkV8nPiWdyQojxNKJjbKSRB1AXhbBy6OzeEg8deiqtp+Et1wWcoTsr6KAKx5bUXREWrR1KefvX26/jLf95Pad+7to9X3nkLAPDl45/MZehKYlOu18VVbpZG80O7Z7UOuiDRfgEdbKzzZhylopwQSHFNbUJ8zivEySPdsP7sC5/q34KmLFHgNsDarhnzIZJ+RlmmILVZez3tnSvrI9lT6rBYcodhhwjgMBzf4mlusb4OECcBPIOwJMEXEQLFnwNwCoFQHTy48nS4BGrpAeOF8pRUG5/ePkmtIXUmK0gsTYqHYmrNJCtYeUu0wyHFKfF4JToHWQEqVCRSsQCmGvnsJ1OR0vmT6D/Ytjjui/NCx0pDfy75ljt9F4cUo898raxFQJttx+XqFDDugQFN1aWGBvltjTf0Oxvh4d+kNVLOA6O1gwsY19Z54j6xDqcKENc6Kvpddboefit0+CkG5wzgt/aU/KXasoe9EV8/iabh89lk23n5AL7O0oDNdmuqELF8fCiIl3UUbfCYqnNDuFFpOxApF9ZuIj9p1hvVoVCePNVNIjx57StVeeL7J/H5Hfny2Gg7LncgYsBYXXh+TYUqx19WSsU3/qZnLV7nPKwe8Nm7P4Vfvvk6fviHC/j5pVfx3KsX8O61fTz+8U/g6QceCs8lOz/Vf3FtqH0qr9VM640pJLnObxHKU0Qz8LxHeRpth2ear280Wqtjd4A4jX8bVRwPtoXiw9AXxD5qLJcwUz5x3Sg5LHY2tlnnSMlutXdCaSeyk9ZlYgSyqdwROQsvLwupryXhtwiLY76PECj+O4TVxv+4lNzdysQ8rd24Eb9mecDY2NqDG3j4+IazuxMZN63FUgwxOHYskKbiEem8kEjjObIRZTZkPrTAXzonbUNwOf8KfSBsUSWaTLPN/C/K0ZNXSqnbch2wsRUf5Ijzcg2mI4qNzV3wB2k4YWSEOkY+/MKUIc+GX71UqbTOAu3jzfNbfyu2PBQiAHZMlKs5lDLleKqPec9tlWdKubR9VdM3rTyz+jol/U8uvYFnH/g0Thz/mJr2F29exOTqf/Hyqafw62P/wsW/v4d3r+3j/rvvwdcfPIGvPngC68eOV3lduf4Rnv/nRbzw2KmsKPF7Yhbf0XMPiHP2uth+DsJLqbUHc2AzfrdzS342JSpn4z1g6+jW58b6Os5dvYqTPauM3wwura3h9Po63r58eXriBjY2d6nlBgCcP0QVbqkxT57NDEnKUyQR6Y06xQgRiciB2lklcvm3VLCYSlBM82f58GHAKv+JS8NteaYdDQlm+67jtjO1ywpaHnrLcVxZ/QLlFd/APa+P5FeMIyGVaiA/JGwA0FxhnN7UK+JUKYrKcI9UqKSipakJ2nBRUmZk/vU+qSgVw1HC9tTvolE6RWnjSoc6zNdX/qTUiLxail6j3oplGVp1M6UcWj5avXBfnzx+H35z7d+4EmOaZJ5fePQkXj71FADg2c89gh+fOo3dTYefnjqNZ772CNaPHa/i6K488RF+//47ePqxB4r9/J4ohjwnov5Y/lwN6y0v2VqU8nSUwNYTKxHb7IP6+Pptgm9/+CG+f889uLSIT6lEXFpbw/fuvRff+eCDW7bleXt/iFj+UgVZYwGRBhqG810O6CZSkUgEEZguxxL5zon0YYiLgtHTW1y0HYLKAT6s5pHJWCZp8bcyPEikK7y9e5Dk76JSFBrFuOZNCvgW+XXM7iQTRcfLkTpeIogeGDv4Ffko4kqhMduuGg6RnX6DGNDfkiBMHUZpKCBVh6iRDuVcJ9Oy/Lk60RdT42QaUdaZZ3wJX4u0CmlLfkgyKOzJ6+Eb9Z3u+D4yJQko3xfTfeNLD+Hz1+7Fi2+9Xvs4EUNsWh1AqEkd8PyfL+LqXcfwrRthUU15nxTnKEN5xczf4jxX1UPa0tDFCgYVrzwoXkqqFvQZop3DHRY7bPxgfx9PXr+Op9bXF2bz9Po6Hr5+HS/u709PPA1Hb4Vxxxh9jgUKJCgrSkmBicc9JyWpsRdkhUhUIlAAzZSjb3txgkZ5kuqUFQrSdTxrSH0gUR2zlUicSwqV537RcggpGNznRprSEjljpM0zhYwaW65wHUjM052Anm/bkXpJfzcJgtKRS4IgSVClBmkqjlBeWt9kKxSKFrmDrg5Ni6mpfJ1BHZIdtko6FWJWEB6eXtZ9PF8uDOq0+pZ+C9LHJ4FMU2zu+tsavvmVE/jRo0+UdqYoWn1K3AvPnML2Qw/jrkF4U3d911AhZMXiwMU9woiWPI+CZk15mh+jDRGrxeI81+RQ3tHDcQA7+/v4xy0Mr0m8ffkyfra/j+OLMHb0lCewGR1RuWGkiRreRGiiwlMEYKePnBIBSRQsEx5kssSJlUdJopyYSeeQiVUgR6QuuZK0TXIHl5SnpGjFeBIeHE4kKw0T5UBVl0iaUJ6S71RPocyZbR8+614pSOWp6rAUYsXJStynKVVqZ6qoLJWK0yAoldIjbKpxUy2i1FBGqrIKG1LNqIa6yLeGr16Wu3WeVgdSSVHybpWlNVMyDb3L8mIKCdTqRKaXJK0ibYrPWnm1e65RB+qyE8IvAHCD1XgDvy2xs7FS6ycZZoU/irPtPNu6GLBNcUE+qzLIZMpx5YmpSo4pOomwMLUnEBwiMPnNlO8Pqy4ToaJhxBhjFAkMJ1SelCwiWEl58vFvsZ/ehun/qKolgkTHO6ShRq5kgeoBNMgZG8kjPh6vQipPXBGhDgsoOqE0U4t3XJRGdLK9U9ynqDLF8YZ60VSptDRKxy87Y1W9EjaLeK9GnalbjWS2yscJRiO/qmxamfh5vJycYPHrxomI8LE521KQQC/To1apuNpdXVNZXv57GgFrXVdJDCmGtLk6uMFwp8GtzGy7Qxi2yyHPpLykmKCCvLChNeRhOVKZQjqXGnPPliCo4phQqjoUa0Qz23L+0TYncuRZmoHnUn6J4JHvHR8m9CiHHH0kb5FwEZGbRBsxT6ZRoVick2rtIL5rd7ujL+aJ3Te8w1RVJtonlBZ1anmrQ5aKCc+roShUx7nKJNPEbdE5T8rzpXpVDKVpxEqrM7ZVA8mFDanK1ASjfa5Wf5X6otSfFnfVFyem2fCN/eowp7ZvEq1MGoQpthmVH3GrrnbeqIP6O4zxvp/2XTqD4U7CisQ8LXWpggAqeN8WACcMU/e39slKnjXtvOlmya/vd5/N8jwHH2YbjPvsLer3MvK4hd8DpAfJDRBWrF6Bh2oqBig/LbGKnd9h+9iX5zR/ZvV3UeU6iPqZatOnZQpW6pm03/b7oH4r7f35c+Lbq0vEkj/P4sMKxABy5cjOzqEkS639jGBUMp7cynNa+bbSSdt9x2R+rfz7bDbyGURVatywV8y8kXVT2qlu0CJ+wiv7ZB7M5kApU5UH5bsAn9NnG7L9RJz4R0SrZfypXPJvmY6fr/wN5DIXZW/lIY6N2W85AaCyx/PSysHPafkq/Ov7PWj5KHzS8pm2v88nuR230jLfWvlw0lENczP/qjyk/9K2+E1+ab5WvinXtXk/KnVe1ZdCnNiEnOI+ks/YKjzDBLXdUdrJoj6V8q56m2Xlna+8AyU9i3XyK/B9u6UpT9tn92KBqXJ8/iK49gZVvPlSWtovL4gT6cimdoyfo9iR9ou84988TZG+LgtfGLQ83lOuWcpL9VO8oSrny/QcvL7UeuI+y4fMKfZEXWj1mdLdpM9o13V5XCuntCHzapR1Wrl5mbTyauWWZZM+Nu/zVrmljZ50mj+9virPAPep9x5lfiSbynOu3s9a2hny0Z7vWZ//wq543nvtS99iOln2ylYrb2av+hupTG5MYQN9z0DjGTusZ3imdgfK/TJDX1GUbUXaLCvvrZVXtQ24AbB75ggskjncDFJbqiSqANBFj8fEm6+DuGAy0p6z7JQu30huUB5z/KJzhhvtOLKf/GB50d+c9Y6FvXFpjwd1On482ktrp7Ob14ny8k+uJuVunM9zQMHwXRFIGiOm0ts6W2gz3YRlHQZ75Ker6qQoxwBVGcrrHNOzNwnHfMn+iXpiPjvxFk1XMpddNBjp77Kc1ZuV8DNfP3b9C99lh8e247IOimNNNafho7RTvL1pjYtyr8rnStaJ9If7yf2Tdsc9dZcUMq1Ofem3VG2qNKzcRdpp147dA1rbwOuyed/Q/SeecWm/5VuqU6XscomBwh+lTDx/oLouHrzOs3+p2+H2eXsz5RnObQe/vjlv+QwX9lM5APUZHuf6d6ws2R5YvmV953xlG8nv7fwcu4GV97Ysr/RH9MdugDRJ7LCwNOXJYDAYDAaD4U7AkmOeDAaDwWAwGG5vGHkyGAwGg8FgmANGngwGg8FgMBjmgJEng8FgMBgMhjlg5MlgMBgMBoNhDhh5MhgMBoPBYJgDRp4MBoPBYDAY5oCRJ4PBYDAYDIY5YOTJYDAYDAaDYQ4YeTIYDAaDwWCYA0aeDAaDwWAwGOaAkSeDwWAwGAyGOWDkyWAwGAwGg2EOGHkyGAwGg8FgmANGngwGg8FgMBjmwP8Bjq4qFtmYkkQAAAAASUVORK5CYII=) * True Positive - Targets which are actually true(Y) and we have predicted them true(Y)* True Negative - Targets which are actually false(N) and we have predicted them false(N)* False Positive - Targets which are actually false(N) but we have predicted them true(T)* False Negative - Targets which are actually true(T) but we have predicted them false(N)Using these values, we can calculate the accuracy of the model. The accuracy is given by:( True Positive + True Negatives ) / ( True Positive + True Negatives + False Positives + False Negatives ) * Precision: It is a measure of correctness achieved in true prediction i.e. of observationslabeled as true, how many are actually labeled true.Precision = TP/ (TP + FP)* Recall(Sensitivity) - It is a measure of actual observations which are predicted correctly i.e.how many observations of true class are labeled correctly. It is also known as 'Sensitivity!Recall = TP / (TP + FN)* Specificity - It is a measure of how many observations of false class are labeled correctly.Specificity = TN / (TN + FP)Specificity and Sensitivity plays a crucial role in deriving ROC curve.* ROC curve* Receiver Operating Characteristic(ROC) summarizes the model's performance byevaluating the trade offs between true positive rate (sensitivity) and false positive rate(1-specificity).* The area under curve (AUC), referred to as index of accuracy(A) or concordance index, is aperfect performance metric for ROC curve. Higher the area under curve, better theprediction power of the model.This is how a ROC curve looks like: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAnIAAAHkCAYAAAC6+cJWAAAgAElEQVR4nOydd3xUVdr4v3f6JJNeSO89JIQmoIAIAqKgWPBFXXvv7vr+XAu6KhZcdVf3tYHYsC2r6yq8qCCKC4KIFOklCSEFSEidTJJpd+b+/rhzbyYhQSyrxPd+P598IJN7z33OeZ7z3Oc8p4wgSZKEhoaGhoaGhobGgEP3awugoaGhoaGhoaHx49ACOQ0NDQ0NDQ2NA/////////) * The area of this curve measures the ability of the model to correctly classify true positivesand true negatives. We want our model to predict the true classes as true and false classesas false.* So it can be said that we want the true positive rate to be 1. But we are not concerned withthe true positive rate only but the false positive rate too. For example in our problem, weare not only concerned about predicting the Y classes as Y but we also want N classes to bepredicted as N.* We want to increase the area of the curve which will be maximum for class 2,3,4 and 5 inthe above example.* For class 1 when the false positive rate is 0.2, the true positive rate is around 0.6. But forclass 2 the true positive rate is 1 at the same false positive rate. So, the AUC for class 2 willbe much more as compared to the AUC for class 1. So, the model for class 2 will be better.* The class 2,3,4 and 5 model will predict more accurately as compared to the class 0 and 1model as the AUC is more for those classes.At the competition's page, it has been mentioned that our submission data would be evaluatedbased on the accuracy. Hence, we will use accuracy as our evaluation metric. Let us make our first model to predict the target variable. We will start with Logistic Regressionwhich is used for predicting binary outcome.* Logistic Regression is a classification algorithm. It is used to predict a binary outcome (1 /O, Yes / No, True / False) given a set of independent variables.* Logistic regression is an estimation of Logit function. Logit function is simply a log of oddsin favor of the event.* This function creates a s-shaped curve with the probability estimate, which is very similarto the required step wise functionLets drop the Loan ID variable as it do not have any effect on the loan status. We Will do thesame changes to the test dataset which we did for the training dataset.train = train.drop ('Loan_ID', axis=1) test = test.drop ('Loan_ID', axis=1)We will use scikit-learn (sklearn) for making different models which is an open source library forPython. It is one of the most efficient tool which contains many inbuilt functions that can be usedfor modeling in Python.Sklearn requires the target variable in a separate dataset. So, we will drop our target variablefrom the train dataset and save it in another dataset.X = train.drop ('Loan_Status',1) y = train.Loan_StatusNow we will make dummy variables for the categorical variables. Dummy variable turnscategorical variables into a series of 0 and 1, making them lot easier to quantify and compare. * Letus understand the process of dummies first:Consider the "Gender" variable. It has two classes, Male and Female.* As logistic regression takes only the numerical values as input, we have to change male andfemale into numerical value.* Once we apply dummies to this variable, it will convert the "Gender" variable into twovariables(Gender Male and Gender_Female), one for each class, i.e. Male and Female.Gender Male will have a value of O if the gender is Female and a value of 1 if the gender isMale.X = pd.get_dummies(X) train = pd.get_dummies(train) test = pd.get_dummies(test)Now we will train the model on training dataset and makepredictions for the test dataset. Butcan we validate these predictions? One way of doing this is we can divide our train dataset intotwo parts: train and validation. We can train the model on this train part and using that makepredictions for the validation part. In this way we can validate our predictions as we have thetrue predictions for the validation part (which we do not have for the test dataset).We will use the train _test_split function from sklearn to divide our train dataset. So, first let usimporttrain test split. The dataset has been divided into training and validation part. Let us import LogisticRegressionand accuracy_score from sklearn and fit the logistic regression model.from sklearn.model_selection import train_test_split x_train, x_cv, y_train, y_cv = train_test_split(X,y, test_size =0.3) from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score model = LogisticRegression() model.fit(x_train, y_train) LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr',n_jobs = 1,penalty='l2', random_state=1, solver='liblinear', tol = 0.0001,verbose=0, warm_start=False)/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:818: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: https://scikit-learn.org/stable/modules/preprocessing.html Please also refer to the documentation for alternative solver options: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,Here the C parameter represents inverse of regularization strength. Regularization is applying apenalty to increasing the magnitude of parameter values in order to reduce overfitting. Smallervalues of C specify stronger regularization.pred_cv = model.predict(x_cv) accuracy_score(y_cv, pred_cv) pred_test = model.predict(test) submission = pd.read_csv("sample_submission_49d68Cx.csv") submission['Loan_Status'] = pred_test submission['Loan_ID'] = test_original['Loan_ID'] submission['Loan_Status'].replace(0,'N', inplace=True) submission['Loan_Status'].replace(1,'Y', inplace=True) pd.DataFrame(submission, columns = ['Loan_ID' , 'Loan_Status']).to_csv('logistic.csv') from sklearn.model_selection import StratifiedKFold i=1 kf = StratifiedKFold (n_splits=5, random_state=1,shuffle=True) for train_index, test_index in kf.split(X,y): print ('\nf{} of kfold {}'.format(i,kf.n_splits)) xtr, xvl = X.iloc[train_index],X.iloc[test_index] ytr, yvl = y.iloc[train_index],y.iloc[test_index] model = LogisticRegression (random_state=1) model.fit(xtr, ytr) pred_test = model.predict(xvl) score = accuracy_score(yvl,pred_test) print( 'accuracy score', score) i+=1 pred_test = model.predict(test) pred = model.predict_proba(xvl)[:,1] from sklearn import metrics fpr, tpr, _ = metrics.roc_curve(yvl,pred) auc = metrics.roc_auc_score (yvl, pred) plt.figure (figsize=(12,8)) plt.plot (fpr, tpr, label="validation, auc="+str (auc)) plt.xlabel( 'False Positive Rate') plt.ylabel("True Positive Rate") plt.legend(loc=4) plt.show()Based on the domain knowledge, we can come up with new features that might affect the targetvariable. We will create the following three new features:* Total Income - As discussed during bivariate analysis we will combine the ApplicantIncome and Coapplicant Income. If the total income is high, chances of loan approval mightalso be high.* EMI - EMI is the monthly amount to be paid by the applicant to repay the loan. Idea behindmaking this variable is that people who have high EMI's might find it difficult to pay backthe loan. We can calculate the EMI by taking the ratio of loan amount with respect to loanamount term.* Balance Income - This is the income left after the EMI has been paid. Idea behind creatingthis variable is that if this value is high, the chances are high that a person will repay theloan and hence increasing the chances of loan approval.train['Total_Income'] = train['ApplicantIncome'] + train['CoapplicantIncome'] test['Total_Income' ] = test['ApplicantIncome'] + test['CoapplicantIncome'] sns.distplot(train['Total_Income']) train['Total_Income_log'] = np.log(train['Total_Income']) sns.distplot(train['Total_Income_log']) test['Total_Income_log'] = np.log(test['Total_Income']) train['EMI'] = train['LoanAmount'] / train['Loan_Amount_Term'] test['EMI'] = test['LoanAmount'] / test['Loan_Amount_Term'] sns.distplot(train['EMI'])/usr/local/lib/python3.7/dist-packages/seaborn/distributions.py:2619: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning)Let us create Balance Income feature now and check its distribution.train[ 'Balance_Income'] = train['Total_Income'] - (train['EMI']*1000) #Multiply with 1000 to make the units equal test['Balance test['BalanceIncome']= test['Total_Income' ]-(test['EMI']*1000) sns.distplot(train['Balance_Income' ])/usr/local/lib/python3.7/dist-packages/seaborn/distributions.py:2619: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning)Let us now drop the variables which we used to create these new features. Reason for doing thisis, the correlation between those old features and these new features will be very high andlogistic regression assumes that the variables are not highly correlated. We also wants toremove the noise from the dataset, so removing correlated features will help in reducing thenoise too.test=test.drop (['ApplicantIncome','CoapplicantIncome','LoanAmount', 'Loan_Amount_Term'],axis=1) train.dtypes test.dtypes i=1 kf = StratifiedKFold (n_splits=5, random_state=1,shuffle=True) for train_index, test_index in kf.split(X,y): print ('\nf{} of kfold {}'.format(i,kf.n_splits)) xtr, xvl = X.iloc[train_index],X.iloc[test_index] ytr, yvl = y.iloc[train_index],y.iloc[test_index] model = LogisticRegression (random_state=1) model.fit(xtr, ytr) pred_test = model.predict(xvl) score = accuracy_score(yvl,pred_test) print( 'accuracy score', score) i+=1 pred_test = model.predict(test) pred = model.predict_proba(xvl)[:,1] from sklearn import tree i=1 kf = StratifiedKFold (n_splits=5, random_state=1,shuffle=True) for train_index, test_index in kf.split(X,y): print ('\nf{} of kfold {}'.format(i,kf.n_splits)) xtr, xvl = X.iloc[train_index],X.iloc[test_index] ytr, yvl = y.iloc[train_index],y.iloc[test_index] model = tree.DecisionTreeClassifier (random_state=1) model.fit(xtr, ytr) pred_test = model.predict(xvl) score = accuracy_score(yvl,pred_test) print( 'accuracy score', score) i+=1 pred_test = model.predict(test) pred = model.predict_proba(xvl)[:,1]*Data Science Unit 4 Sprint 3 Assignment 1* Recurrent Neural Networks and Long Short Term Memory (LSTM)![Monkey at a typewriter](https://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Chimpanzee_seated_at_typewriter.jpg/603px-Chimpanzee_seated_at_typewriter.jpg)It is said that [infinite monkeys typing for an infinite amount of time](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of Wiliam Shakespeare. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM.This text file contains the complete works of Shakespeare: https://www.gutenberg.org/files/100/100-0.txtUse it as training data for an RNN - you can keep it simple and train character level, and that is suggested as an initial approach.Then, use that trained RNN to generate Shakespearean-ish text. Your goal - a function that can take, as an argument, the size of text (e.g. number of characters or lines) to generate, and returns generated text of that size.Note - Shakespeare wrote an awful lot. It's OK, especially initially, to sample/use smaller data and parameters, so you can have a tighter feedback loop when you're trying to get things running. Then, once you've got a proof of concept - start pushing it more!!pip install tf-nightly-gpu try: import tensorflow.compat.v2 as tf except Exception: pass tf.enable_v2_behavior() print(tf.__version__) from __future__ import print_function from tensorflow.keras.callbacks import LambdaCallback from tensorflow.keras.preprocessing import sequence from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Embedding from tensorflow.keras.layers import LSTM import numpy as np import random import sys import os tf.test.gpu_device_name() data_files = os.listdir('./shakespeare') # Read in Data data = [] for file in data_files: if file[-3:] == 'txt': with open(f'./shakespeare/{file}', 'r', encoding='utf-8') as f: data.append(f.read()) len(data) data[0][:500] # Encode data as characters char_list = set() for play in data: chars = set(play) char_list = char_list.union(chars) char_list = list(char_list) len(char_list) # Lookup tables char_int = {c:i for i, c in enumerate(char_list)} int_char = {i:c for i, c in enumerate(char_list)} # Create the sequence data maxlen = 100 step = 1 sequences = [] next_char = [] for play in data: encoded = [char_int[c] for c in play] for i in range(0, len(encoded) - maxlen, step): sequences.append(encoded[i : i + maxlen]) next_char.append(encoded[i + maxlen]) print('Number of sequences: ', len(sequences)) sequences[0][:50] # Create x and y x = np.zeros((len(sequences), maxlen, len(char_list)), dtype=np.bool) y = np.zeros((len(sequences), len(char_list)), dtype=np.bool) for i, sequence in enumerate(sequences): for t, char in enumerate(sequence): x[i, t, char] = 1 y[i, next_char[i]] = 1 x.shape, y.shape # Build the model: a single LSTM model = Sequential() model.add(LSTM(512, input_shape=(maxlen, len(char_list)), dropout=0.2)) model.add(Dense(len(char_list), activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') model.summary() def sample(preds): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / 1 exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) def on_epoch_end(epoch, _): # Function invoked at end of each epoch. Prints generated text. print() print('----- Generating text after Epoch: %d' % epoch) rand_play = random.randint(0, len(data) - 1) start_index = random.randint(0, len(data[rand_play]) - maxlen - 1) generated = '' sentence = data[rand_play][start_index: start_index + maxlen] generated += sentence print('----- Generating with seed: "' + sentence + '"') sys.stdout.write(generated) for i in range(1000): x_pred = np.zeros((1, maxlen, len(char_list))) for t, char in enumerate(sentence): x_pred[0, t, char_int[char]] = 1 preds = model.predict(x_pred, verbose=0)[0] next_index = sample(preds) next_char = int_char[next_index] sentence = sentence[1:] + next_char sys.stdout.write(next_char) sys.stdout.flush() print() print_callback = LambdaCallback(on_epoch_end=on_epoch_end) # Fit the model model.fit(x, y, batch_size=512, epochs=50, callbacks=[print_callback], shuffle=True) import h5py rand_play = random.randint(0, len(data) - 1) start_index = random.randint(0, len(data[rand_play]) - maxlen - 1) generated = '' sentence = data[rand_play][start_index: start_index + maxlen] sentence = "OTHELLO\nO my fair warrior!\n\nDESDEMONA\nMy dear Othello!\n\nOTHELLO\nIt gives me wonder great as my content" sentence = sentence[:maxlen] generated += sentence print('----- Generating with seed: "' + sentence + '"') print('-----------------') sys.stdout.write(generated) for _ in range(1000): x_pred = np.zeros((1, maxlen, len(char_list))) for t, char in enumerate(sentence): x_pred[0, t, char_int[char]] = 1 preds = model.predict(x_pred, verbose=0)[0] next_index = sample(preds) next_char = int_char[next_index] sentence = sentence[1:] + next_char sys.stdout.write(next_char) sys.stdout.flush() model.save('shakespeare_lstm_512.h5')Preambleimport os from PIL import Image import pandas as pd import matplotlib.pyplot as plt import numpy as np import math import numpy.ma as ma from mpl_toolkits.axes_grid1 import make_axes_locatable #for subplot colorbars from matplotlib.pyplot import cm # configure plotting %config InlineBackend.rc = {'figure.dpi': 300, 'savefig.dpi': 300, \ 'figure.figsize': (6, 6 / 1.6), 'font.size': 8, \ 'figure.facecolor': (1, 1, 1, 0),'font.sans-serif':'Latin Modern Math'} from matplotlib import rc plt.rc('text', usetex=True) plt.rc('font', family='serif') %matplotlib inlineCalculating the G, the normalized correlation functiondef Calculate_G(image_names, start, time, lags): "this function takes image names, a sequence of time and a desired amount of lags" "and calculates G, mean quantities, maps" def calculate_G(metapixels_a, metapixels_b): "this function calculates the value of the normalized correlation" "function between metapixels" a = np.nanmean((metapixels_a * metapixels_b), axis=(2, 3)) b = np.nanmean(metapixels_a, axis=(2, 3)) c = np.nanmean(metapixels_b, axis=(2, 3)) d = np.nanstd(metapixels_a, axis=(2, 3)) e = np.nanstd(metapixels_b, axis=(2, 3)) G = (a - (b * c)) / (d * e) return G def wholefield_stats(spatial_map, mask): map_mean = [] map_var = [] for i in range(len(spatial_map)): masked_map = ma.masked_array(spatial_map[i], mask=mask) map_mean.append(np.nanmean(masked_map)) map_var.append(np.nanvar(masked_map)) map_mean = np.asarray(map_mean) map_var = np.asarray(map_var) return map_mean #, map_var ################################################################################################################## #this variable is a list which contains each timestep G_time_lags = [[] for _ in range(len(time))] sandpile_mean = [[] for _ in range(len(time))] sandpile_var = [[] for _ in range(len(time))] i = 0 # this is the size of the metapixels, defined outside of the loop size = 10 for t in time: #this is just to make t an interger rather than a float so that t can be used as a list index t = int(t) #the index of each image (i.e. the frame number) is the iterator t plus 11, the time where grains first #enter the frame speckle_a = Image.open(image_names[t + start]) speckle_a = np.asarray(speckle_a)[y_min:y_max, x_min:x_max] lenr, lenc = int(speckle_a.shape[0] / size), int(speckle_a.shape[1] / size) metapixels_a = speckle_a.reshape(lenr, size, lenc, size).transpose(0, 2, 1, 3) mp_a_mean = np.nanmean(metapixels_a, axis=(2, 3)) metapixels_a = np.subtract(metapixels_a, mp_a_mean[..., np.newaxis, np.newaxis]) ################################################################################################################## l = 0 G_lags = [[] for _ in range(lags)] # this loop is for each lag for lag in range(lags): # choosing two speckle images speckle_b = Image.open(image_names[lag + t + start]) speckle_b = np.asarray(speckle_b)[y_min:y_max, x_min:x_max] metapixels_b = speckle_b.reshape(lenr, size, lenc, size).transpose(0, 2, 1, 3) mp_b_mean = np.nanmean(metapixels_b, axis=(2, 3)) metapixels_b = np.subtract(metapixels_b, mp_b_mean[..., np.newaxis, np.newaxis]) G_lags[l] = (calculate_G(metapixels_a, metapixels_b)) l += 1 #this is a new piece to ensure that any correlation values < 0 are set to 0. #(negative correlations are unphysical) # G_lags[G_lags <= 0] = 0 ################################################################################################################## G_time_lags[i] = G_lags sandpile_mean[i] = wholefield_stats(G_time_lags[i], mask=~binary) i += 1 print(t+start) return G_time_lags, sandpile_meanTappingos.chdir('/media/nakul/Backup Plus/Data/DWS_RawExperimentPhotos/feb_13_tapping/long_tapping') import glob import natsort image_names = [] for name in glob.glob('img_*.jpg'): image_names.append(name) image_names = natsort.natsorted(image_names,reverse=False) timestep = len(image_names)Creating masks#specify the bounds of the image - should be evenly divisible by the metapixel size x_min = 0 x_max = 1900 y_min = 0 y_max = 1400 # choosing two speckle images speckle_a = Image.open(image_names[50]) speckle_a = np.asarray(speckle_a)[y_min:y_max,x_min:x_max] speckle_b = Image.open(image_names[60]) speckle_b = np.asarray(speckle_b)[y_min:y_max,x_min:x_max] # partitioning images into metpixels size = 10 a = speckle_a lenr, lenc = int(a.shape[0]/size), int(a.shape[1]/size) ## 'flattening' the metapixel array along a single axis metapixels_a = speckle_a.reshape(lenr,size,lenc,size).transpose(0, 2, 1, 3) b = speckle_b lenr, lenc = int(b.shape[0]/size), int(b.shape[1]/size) metapixels_b = speckle_b.reshape(lenr,size,lenc,size).transpose(0, 2, 1, 3) metapixel_grid_rows = metapixels_a.shape[0] metapixel_grid_columns = metapixels_a.shape[1] metapixel_grid_area = metapixel_grid_rows*metapixel_grid_columns mp_a_mean = np.mean(metapixels_a,axis = (2,3)) mp_b_mean = np.mean(metapixels_b,axis = (2,3)) ## Adding [...,np.newaxis,np.newaxis] allows the array of means to be subtracted ## from the array containing metapixels. otherwise, their shapes are not the same metapixels_a = np.subtract(metapixels_a,mp_a_mean[..., np.newaxis,np.newaxis]) metapixels_b = np.subtract(metapixels_b,mp_b_mean[..., np.newaxis,np.newaxis]) from skimage.filters import try_all_threshold img = mp_a_mean # Here, we specify a radius for local thresholding algorithms. # If it is not specified, only global algorithms are called. fig, ax = try_all_threshold(img, figsize=(10, 8), verbose=False) plt.show() from skimage.filters import threshold_minimum image = mp_a_mean thresh = threshold_minimum(image) binary = image > thresh mask_tapping = ~binary fig, axes = plt.subplots(ncols=3, figsize=(8, 2.5)) ax = axes.ravel() ax[0] = plt.subplot(1, 3, 1) ax[1] = plt.subplot(1, 3, 2) ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0]) ax[0].imshow(image, cmap=plt.cm.gray) ax[0].set_title('Original') ax[0].axis('off') ax[1].hist(image.ravel(), bins=256) ax[1].set_title('Histogram') ax[1].axvline(thresh, color='r') ax[2].imshow(binary, cmap=plt.cm.gray) ax[2].set_title('Thresholded') ax[2].axis('off') plt.show()Calculating G# time = np.logspace(1, 13, num=13, base=2) time = (0,1000,2000,3000,4000) # time = np.arange (0,1000,10) correlation_tapping, means_tapping = Calculate_G(image_names,0,time,1000) #this saves G for 10 cycles of tapping at a single lag np.savez('tapping_feb_13_longtimecorrelations',data1 = correlation_tapping, data2 = means_tapping)Loading dataos.chdir('/media/nakul/Backup Plus/Data/DWS_Correlation_Outputs/outputs/2020/feb/old_outputs') tapping = np.load('tapping_feb_13.npz') correlation_tapping = tapping['data1'] means_tapping = tapping['data2']2d mapsl = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 from mpl_toolkits.axes_grid1 import make_axes_locatable l = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 # fig = plt.figure(figsize=(7, 3)) ax1 = plt.subplot(2, 2, 1) ax2 = plt.subplot(2, 2, 2) ax3 = plt.subplot(2, 2, 3) ax4 = plt.subplot(2, 2, 4) ################################################################################################################# frame = correlation_tapping[5][1] frame[frame <= 0] = 1e-32 masked_map = ma.masked_array(frame, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray cmap.set_bad('white',1.) # divider = make_axes_locatable(ax1) # cax = divider.append_axes('right', size='5%', pad=0.05) # fig.colorbar(im1, cax=cax, orientation='vertical',format='%.0e') # # cax.tick_params(labelsize=15) # cax.set_title('$\epsilon$',fontsize = 10) ax1.annotate('5 taps', xy=(5, 5), xytext=(120, 20),fontsize = 8) # ax1.set_title('t = 4 s')#,fontsize = 20); # ax1.set_xlim([30,175]); ax1.set_xticks([]); ax1.set_yticks([]); ################################################################################################################# frame = correlation_tapping[200][1] frame[frame <= 0] = 1e-32 masked_map = ma.masked_array(frame, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im2 = ax2.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray cmap.set_bad('white',1.) # divider = make_axes_locatable(ax2) # cax = divider.append_axes('right', size='5%', pad=0.05) # fig.colorbar(im2, cax=cax, orientation='vertical',format='%.0e') # # cax.tick_params(labelsize=15) # cax.set_title('$\epsilon$',fontsize = 10) ax2.annotate('200 taps', xy=(5, 5), xytext=(120, 20),fontsize = 8) # ax2.set_title('t = 64 s')#,fontsize = 20); ax2.set_xticks([]); ax2.set_yticks([]); ################################################################################################################# frame = correlation_tapping[600][1] frame[frame <= 0] = 1e-32 masked_map = ma.masked_array(frame, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im3 = ax3.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray_r cmap.set_bad('white',1.) # divider = make_axes_locatable(ax3) # cax = divider.append_axes('right', size='5%', pad=0.05) # fig.colorbar(im3, cax=cax, orientation='vertical',format='%.0e') # # cax.tick_params(labelsize=15) # cax.set_title('$\epsilon$',fontsize = 10) ax3.annotate('600 taps', xy=(5, 5), xytext=(120, 20),fontsize = 8) # ax3.set_title('t = 256 s')#,fontsize = 20); # ax1.set_xlim([30,175]); ax3.set_xticks([]); ax3.set_yticks([]); ################################################################################################################# frame = correlation_tapping[4000][1] frame[frame <= 0] = 1e-32 masked_map = ma.masked_array(frame, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im4 = ax4.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray cmap.set_bad('white',1.) divider = make_axes_locatable(ax4) cax = divider.append_axes('right', size='3%', pad=0.05) fig.colorbar(im4, cax=cax,format='%.0e',ticks = [1e-7,5e-6,1e-5]) # cax.tick_params(labelsize=15) cax.set_title('$\epsilon$',fontsize = 10) ax4.annotate('4000 taps', xy=(5, 5), xytext=(120, 20),fontsize = 8) # ax4.set_title('t = 8192 s')#,fontsize = 20); ax4.set_xticks([]); ax4.set_yticks([]); plt.subplots_adjust(hspace=.1, wspace=.1, bottom=2, left=.1, top=3) # # fit subplots and save fig # fig.set_size_inches(w=7,h=3) fig.tight_layout() fig_name = 'tapping_panels.png' plt.savefig(fig_name, bbox_inches='tight') from mpl_toolkits.axes_grid1 import make_axes_locatable l = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 fig = plt.figure(figsize=(7, 3)) ax1 = plt.subplot(1, 2, 1) ax2 = plt.subplot(1, 2, 2) ################################################################################################################# frame = correlation_tapping[100][1] frame[frame <= 0] = 0.1 masked_map = ma.masked_array(frame, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray cmap.set_bad('white',1.) # divider = make_axes_locatable(ax1) # cax = divider.append_axes('right', size='5%', pad=0.05) # fig.colorbar(im1, cax=cax, orientation='vertical',format='%.0e') # # cax.tick_params(labelsize=15) # cax.set_title('$\epsilon$',fontsize = 10) ax1.annotate('100 taps', xy=(5, 5), xytext=(120, 20),fontsize = 12) # ax1.set_title('t = 4 s')#,fontsize = 20); # ax1.set_xlim([30,175]); ax1.set_xticks([]); ax1.set_yticks([]); ################################################################################################################# frame = correlation_tapping[4000][1] frame[frame <= 0] = 0.1 masked_map = ma.masked_array(frame, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im2 = ax2.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray cmap.set_bad('grey',1.) divider = make_axes_locatable(ax2) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im2, cax=cax, orientation='vertical',format='%.0e') # cax.tick_params(labelsize=15) cax.set_title(r'$\langle \dot{\epsilon} \rangle$',fontsize = 10) ax2.annotate('4000 taps', xy=(5, 5), xytext=(120, 20),fontsize = 12) (r'$ \langle G \rangle$') # ax2.set_title('t = 64 s')#,fontsize = 20); ax2.set_xticks([]); ax2.set_yticks([]); plt.subplots_adjust(hspace=.1, wspace=.1, bottom=2, left=.1, top=3) # # fit subplots and save fig # fig.set_size_inches(w=7,h=3) fig.tight_layout() # fig_name = '/home/nakul/Documents/Python Scripts/DWS_Speckle_Analysis/iPython_notebooks/tapping_panels.png' plt.savefig(fig_name, bbox_inches='tight') from mpl_toolkits.axes_grid1 import make_axes_locatable fig = plt.figure(figsize=(3, 3)) ax1 = plt.subplot(1, 1, 1) # ax2 = plt.subplot(1, 2, 2) a = correlation_tapping[5000][1] a[a <= 0] = 0 masked_map = ma.masked_array(a, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'inferno') cmap = plt.cm.gray_r cmap.set_bad('white',1.) divider = make_axes_locatable(ax1) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im1, cax=cax,format='%.0e', orientation='vertical') # cax.tick_params(labelsize=15) cax.set_title('$\epsilon$',fontsize = 10) ax1.set_xticks([]); ax1.set_yticks([]); # plt.savefig('tapping_t_4000', bbox_inches='tight')Time Seriesx = tap_def N = 100 smoothed_taps = np.convolve(x, np.ones((N,))/N, mode='valid') plt.plot(tap_def[10:5010],alpha = 0.3) plt.plot(smoothed_taps[10:5010],'-k') # plt.yscale('log') test = [] for i in range(len(means_tapping)): test.append(means_tapping[i][1]) fig = plt.figure(figsize=(8,4)) tap_def = -(np.log(test)/c) plt.plot(tap_def[10:5010],alpha =1) plt.plot(smoothed_taps[10:5010],'r-') plt.xlabel('tap number') plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) plt.ylabel(r'$ \langle \dot{\epsilon} \rangle$') # plt.yscale('log') # plt.xscale('log') plt.ylim([0,1.5e-5]) # plt.savefig('/home/nakul/Documents/Python Scripts/DWS_Speckle_Analysis/iPython_notebooks/tapping_timeseries.png', bbox_inches='tight')Combined map and time seriesimport matplotlib.ticker as ticker fig = plt.figure(figsize=(3.5,2.5)) def fmt(x, pos): a, b = '{:.1e}'.format(x).split('e') b = int(b) return r'${} \times 10^{{{}}}$'.format(a, b) ################################################################################################################# ax1 = plt.subplot2grid((2,2), (0, 0), colspan=1,rowspan = 1) frame = correlation_tapping[100][1] frame[frame <= 0] = 0.1 masked_map = ma.masked_array(frame, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray cmap.set_bad('white',1.) ax1.set_title('100 taps') ax1.set_xticks([]); ax1.set_yticks([]); ################################################################################################################# ax2 = plt.subplot2grid((2,2), (0, 1), colspan=1,rowspan = 1) frame = correlation_tapping[4000][1] frame[frame <= 0] = 0.1 masked_map = ma.masked_array(frame, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im2 = ax2.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray cmap.set_bad('grey',1.) divider = make_axes_locatable(ax2) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im2, cax=cax, orientation='vertical',format=ticker.FuncFormatter(fmt)) cax.set_title(r'$\dot{\epsilon}$ [$s^{-1}$]',fontsize = 10) ax2.set_title('4000 taps') cax.locator_params(nbins=3) # cax.set_ticks([1e-7,1e-5]) # cax.set_ticklabels([1e-7,1e-5]) ax2.set_xticks([]); ax2.set_yticks([]); ################################################################################################################# ax3 = plt.subplot2grid((2,2), (1, 0), colspan=2,rowspan = 2) ax3.plot(tap_def[10:5010],alpha = 0.5) ax3.plot(smoothed_taps[10:5010],'-k') ax3.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax3.set_ylabel(r'$ \langle \dot{\epsilon} \rangle$ [$s^{-1}$]')#,rotation = 'horizontal') # ax3.yaxis.set_label_coords(1.06,1.01) ax3.yaxis.set_label_position("right") ax3.set_xlabel('tap number') ax3.yaxis.tick_right() ################################################################################################################# # plt.subplots_adjust(hspace=.1, wspace=.1, bottom=.01, left=.1, top=3) # # fit subplots and save fig # fig.set_size_inches(w=7,h=3) # fig.tight_layout() # fig_name = '/home/nakul/Documents/Python Scripts/DWS_Speckle_Analysis/iPython_notebooks/tapping_panels.pdf' plt.savefig(fig_name, bbox_inches='tight')Profiles Rotating imagesfrom skimage import transform a = correlation_tapping[3000][1] a[a <= 0] = 0 masked_map = ma.masked_array(a, mask=~binary) img = masked_map mask = ~binary y_min = 69 y_max = 100 #120 x_min = 150 x_max = 200 rotation_angle = 27 rotated_test = transform.rotate(img,rotation_angle,preserve_range = True,resize = True) rotated_mask = transform.rotate(mask,rotation_angle,preserve_range = True,resize = True) rotated_masked_image = ma.masked_array(rotated_test,mask = rotated_mask) roi_verts = [[y_max,x_max],[y_min,x_max],[y_min, x_min], [y_max, x_min]] roi_verts.append(roi_verts[0]) x_roi, y_roi = zip(*roi_verts) cropped_rows = rotated_test[y_min:y_max,x_min:x_max].shape[0] plt.subplot(1,3,1) plt.imshow(img,vmin = 0.95,vmax = 1,cmap = 'gray') # plt.fill(x1,y1,'w',alpha = 1,edgecolor = 'k') plt.xlim([0,180]) plt.title('unrotated image') plt.subplot(1,3,2) plt.imshow(rotated_masked_image,vmin = 0.95,vmax = 1,cmap = 'gray') plt.fill(y_roi,x_roi,'w',alpha = .3,edgecolor = 'k') plt.axis('scaled') plt.title('rotated image') plt.subplot(1,3,3) plt.imshow(rotated_test[y_min:y_max,x_min:x_max],vmin = 0.95,vmax = 1,cmap = 'gray') plt.title('cropped and rotated image image') # plt.tight_layout() # plt.savefig('profiles_tapping_protocol.png') z_tile = rotated_test[y_min:y_max,x_min:x_max] #take the fft z_fft = 1/np.shape(z_tile)[1]/np.shape(z_tile)[0]*np.fft.fft2(z_tile) z_fft_r = (z_fft.real**2+z_fft.imag**2)**0.5 #shift high modes to the middle, erase the lowest modes z_fft_s = np.fft.fftshift(z_fft_r) #get autocorrelation of the topo data via the inverse of the fft z_rft_s = np.fft.fftshift(np.fft.ifft2((z_fft)*np.conj((z_fft)))).real*np.shape(z_tile)[1]*np.shape(z_tile)[0] z_rft_s = z_rft_s/np.var(z_tile) x = np.arange(np.shape(z_tile)[0])-np.shape(z_tile)[0]/2 y = np.arange(np.shape(z_tile)[1])-np.shape(z_tile)[1]/2 X,Y = np.meshgrid(x,y) fig = plt.figure(figsize=(4,2)) ax1 = plt.subplot(1, 2, 1) ax1.imshow(rotated_test[y_min:y_max,x_min:x_max],vmin =1e-6,vmax = 1e-5,cmap = 'gray_r') ax1.set_title('cropped square original') ax2 = plt.subplot(1, 2, 2) im2 = ax2.pcolormesh(X,Y,z_rft_s,cmap='seismic')#,vmin=-1,vmax=1) # ax2.set_title('2d correlation function') # ax2.set_xlim([-15,15]) # ax2.set_ylim([-15,15]) divider = make_axes_locatable(ax2) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im2, cax=cax, orientation='vertical')#,format=ticker.FuncFormatter(fmt)) # cax.tick_params(labelsize=15) cax.set_title('C')#,fontsize = 10) plt.tight_layout() # plt.savefig('/home/nakul/Documents/Python Scripts/DWS_Speckle_Analysis/iPython_notebooks/hello.png', bbox_inches='tight')Creating variables to correctly scale depth# this is the depth, in metapixels z = np.arange(0,(y_max-y_min),1) #depth in pixels z = z*10 #depth in meters z = z*3.6e-5 γ_z = (z[:-1] + z[1:]) / 2 #depth normalized by grainsize # z = z/100e-6 # z = z/np.max(z)Computing depth-averaged quantitiesdef depth_average(correlation_map): x = len(correlation_map) timestep = len(correlation_map) G_mean_depth_averaged = [[] for _ in range(x)] G_var_depth_averaged = [[] for _ in range(x)] ROI_average = [] ROI_var = [] # deformation [time][row][column] for t in range(timestep): G_map_temp = correlation_map[t] G_map_rotated = transform.rotate(G_map_temp,rotation_angle,preserve_range = True,resize = True) G_map_cropped = np.asarray(G_map_rotated[y_min:y_max,x_min:x_max]) cropped_rows = G_map_cropped.shape[0] cropped_columns = G_map_cropped.shape[1] cropped_area = cropped_rows*cropped_columns ROI_average.append(np.nanmean(G_map_cropped)) ROI_var.append(np.nanvar(G_map_cropped)) G_mean_temp = [] G_var_temp = [] for i in range(cropped_rows): G_mean_temp.append(np.nanmean(G_map_cropped[i][:])) G_var_temp.append(np.nanvar(G_map_cropped[i][:])) G_mean_depth_averaged[t] = G_mean_temp G_var_depth_averaged[t] = G_var_temp del G_map_temp,G_map_rotated,G_map_cropped,G_mean_temp,G_var_temp return G_mean_depth_averaged#,G_var_depth_averaged,ROI_average,ROI_var x = 5000 # tapping_profiles = [[] for _ in range(x)] tapping_profiles = [] for i in range (x): temp_profile = depth_average(correlation_tapping[i]) tapping_profiles.append(temp_profile[1]) l = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 tapping_profiles = -(np.log(np.asarray(tapping_profiles))/c) def calculate_I(profile): #because all profiles are calculated within a 1-second window, the profile itself is the strain-rate profile. γ_rate = profile γ_z = z d = 1e-4 g = 9.8 rho = 2650 phi = 0.6 alpha = 0.1 Po = alpha*rho*g*d P = rho*g*z*phi+Po I = (γ_rate*d)/np.sqrt(P/rho) return I I_tapping = calculate_I(tapping_profiles) #calculating means and stdev x = len(γ_z) I_means = [[] for _ in range(x)] I_stdev = [[] for _ in range(x)] depth_distributions = [[] for _ in range(x)] for i in range(x): temp_depth = [] for t in range(len(I_tapping)): temp_depth.append(I_tapping[t][i]) I_means[i]=(np.mean(temp_depth)) I_stdev[i]=(np.std(temp_depth)) depth_distributions[i] = temp_depth del temp_depth from matplotlib.pyplot import cm fig = plt.figure(figsize=(4,2)) color=iter(cm.viridis(np.linspace(0,1,300))) ax1 = plt.subplot(1, 2, 1) ax2 = plt.subplot(1, 2, 2) for i in range(300): c=next(color) ax1.plot(I_tapping[i+3],z,'o',color = c,markersize = 2,alpha = 0.3) ax1.plot(I_means,γ_z,'r--') ax1.invert_yaxis() ax1.set_xlabel('I') ax1.set_ylabel('depth (m)') ax1.ticklabel_format(axis='x', style='sci', scilimits=(0,0)) ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) color=iter(cm.viridis(np.linspace(0,1,4997))) for i in range(4996): c=next(color) ax2.plot(I_tapping[i+3],z,'o',color = c,markersize = 2,alpha = 0.3) ax2.plot(I_means,γ_z,'k--') ax2.invert_yaxis() ax2.set_xlabel('I') ax2.set_yticks([]) ax2.ticklabel_format(axis='x', style='sci', scilimits=(0,0)) ax2.set_xscale('log') plt.tight_layout() # plt.savefig('mean_inertialnumber_tapping.pdf', bbox_inches='tight')Summary shear and I profilesl = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 snapshot = -(np.log(np.asarray(rotated_test[y_min:y_max,x_min:x_max]))/c) fig = plt.figure(figsize=(6,2)) ax1 = plt.subplot(1, 2, 1) im = ax1.imshow(snapshot,vmin = 1e-7,vmax = 1e-5,cmap = 'inferno_r') ax1.set_xticks([]) ax1.set_yticks([]) divider = make_axes_locatable(ax1) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im1, cax=cax,format='%.0e', orientation='vertical') # cax.tick_params(labelsize=15) cax.set_title('$\dot{\epsilon}$',fontsize = 10) ############################################################################## color=iter(cm.inferno(np.linspace(0,1,300))) ax2 = plt.subplot(1, 2, 2) for i in range(300): c=next(color) ax2.plot(I_tapping[i+3],z,'o',color = c,markersize = 2,alpha = 1) ax2.plot(I_means,γ_z,'r--') ax2.invert_yaxis() ax2.set_xlabel('I') ax2.set_ylabel('depth (m)') ax2.ticklabel_format(axis='x', style='sci', scilimits=(0,0)) ax2.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax2.set_yscale('log') from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Latin Modern Math']}) plt.rcParams['pdf.fonttype'] = 42 plt.tight_layout() plt.savefig('/home/nakul/Documents/Python Scripts/DWS_Speckle_Analysis/iPython_notebooks/tapping_map.svg') color=iter(cm.viridis(np.linspace(0,1,300))) fig = plt.figure(figsize=(2,2)) ax1 = plt.subplot(1, 1, 1) color=iter(cm.inferno(np.linspace(0,1,300))) for i in range(300): c=next(color) ax1.plot(I_tapping[i+3],z,'o',color = c,markersize = 2,alpha = 1) ax1.plot(I_means,γ_z,'r--') ax1.invert_yaxis() ax1.set_xlabel('I') ax1.set_ylabel('depth (m)') ax1.ticklabel_format(axis='x', style='sci', scilimits=(0,0)) ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax1.set_yscale('log') from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Latin Modern Math']}) plt.rcParams['pdf.fonttype'] = 42 plt.tight_layout() plt.savefig('/home/nakul/Documents/Python Scripts/DWS_Speckle_Analysis/iPython_notebooks/mean_inertialnumber_tapping.svg', bbox_inches='tight')Movie framesdef fmt(x, pos): a, b = '{:.2e}'.format(x).split('e') b = int(b) return r'${} \times 10^{{{}}}$'.format(a, b) plt.style.use('dark_background') fig = plt.figure(figsize=(6,6)) a = len(correlation_tapping) n = 0 for i in range(0,6000,100): ax1 = plt.subplot(1, 1, 1) frame = correlation_tapping[i][1] frame[frame <= 0] = 1e-32 masked_map = ma.masked_array(frame, mask=mask_tapping) masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'inferno') cmap = plt.cm.gray cmap.set_bad('grey',1.) divider = make_axes_locatable(ax1) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im1, cax=cax, orientation='vertical',format=ticker.FuncFormatter(fmt)) cax.set_title(r'$\dot{\epsilon}$',fontsize = 12) ax1.annotate(r"\# of taps " + str(i), xy=(5, 5), xytext=(120, 20),fontsize = 12) ax1.set_xticks([]); ax1.set_yticks([]); fig.tight_layout() plt.savefig("/home/nakul/Documents/Geomorphology/CreepExperiment_videos/tapping/taps" + str(n) +".png", format="PNG") plt.clf() n += 1Tapping - longtime relaxation curves These data are images taken after tapping_cycles_c: 3 cycles of 1000 taps. The goal of collecting these data is to explore the decay of the correlation function after many taps.tapping_longtime = np.load('tapping_c_longtime.npz') # correlation_tapping_longtime = tapping_longtime['data1'] means_tapping_longtime = tapping_longtime['data2'] from matplotlib.pyplot import cm a = (np.logspace(1, 13, num=13, base=2)) b = 10*a/60 labels = list(b) color=iter(cm.viridis(np.linspace(0,1,13))) time = np.arange(1,10001,10) fig = plt.figure(figsize=(4, 2)) ax1 = plt.subplot(111) for t in range(13): c=next(color) ax1.plot(time,means_tapping_longtime[t],'o',color = c,markersize =1,label = int(labels[t])) # ax1.set_xscale('log') ax1.set_ylim([0.55,1]) ax1.set_xlabel(r'$ \tau $ (s)') ax1.set_ylabel(r'$ \langle G \rangle$') box = ax1.get_position() lgd = ax1.legend() ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax1.legend(title = 'time after 3000 taps (minutes)',loc='center left', frameon = False, bbox_to_anchor=(1, 0.5)) plt.tight_layout() # plt.savefig('after_3000taps.png', bbox_inches='tight') from mpl_toolkits.axes_grid1 import make_axes_locatable fig = plt.figure(figsize=(3, 3)) ax1 = plt.subplot(1, 1, 1) # ax2 = plt.subplot(1, 2, 2) l = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 a = correlation_tapping_longtime[12][800] a[a <= 0] = 0 masked_map = ma.masked_array(a, mask=mask_tapping) # masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = .6,vmax = 1,cmap = 'gray') cmap = plt.cm.gray_r cmap.set_bad('white',1.) divider = make_axes_locatable(ax1) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im1, cax=cax,format='%.0e', orientation='vertical') # cax.tick_params(labelsize=15) cax.set_title('$\epsilon$',fontsize = 10) ax1.set_xticks([]); ax1.set_yticks([]); # plt.savefig('tapping_t_4000', bbox_inches='tight')Multicycle tappingtapping_multicycle = np.load('/home/ndeshpande/Documents/Python Scripts/DWS_Speckle_Analysis/outputs/2019/tap_multicycle.npz') # correlation_tapping_multicycle = tapping_multicycle ['data1'] means_tapping_multicycle = tapping_multicycle ['data2'] l = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 tapping_multicycle = [] for i in range(len(means_tapping_multicycle)): tapping_multicycle.append(-(np.log(means_tapping_multicycle[i][1])/c) )Heatimport glob import natsort image_names = [] for name in glob.glob('/media/nakul/Backup Plus/Data/DWS_RawExperimentPhotos/feb_15_heat/heating_disturbance/img_*.jpg'): image_names.append(name) image_names = natsort.natsorted(image_names,reverse=False) timestep = len(image_names)Creating masks#specify the bounds of the image - should be evenly divisible by the metapixel size x_min = 0 x_max = 1900 y_min = 0 y_max = 1400 # choosing two speckle images speckle_a = Image.open(image_names[50]) speckle_a = np.asarray(speckle_a)[y_min:y_max,x_min:x_max] speckle_b = Image.open(image_names[60]) speckle_b = np.asarray(speckle_b)[y_min:y_max,x_min:x_max] # partitioning images into metpixels size = 10 a = speckle_a lenr, lenc = int(a.shape[0]/size), int(a.shape[1]/size) ## 'flattening' the metapixel array along a single axis metapixels_a = speckle_a.reshape(lenr,size,lenc,size).transpose(0, 2, 1, 3) b = speckle_b lenr, lenc = int(b.shape[0]/size), int(b.shape[1]/size) metapixels_b = speckle_b.reshape(lenr,size,lenc,size).transpose(0, 2, 1, 3) metapixel_grid_rows = metapixels_a.shape[0] metapixel_grid_columns = metapixels_a.shape[1] metapixel_grid_area = metapixel_grid_rows*metapixel_grid_columns mp_a_mean = np.mean(metapixels_a,axis = (2,3)) mp_b_mean = np.mean(metapixels_b,axis = (2,3)) ## Adding [...,np.newaxis,np.newaxis] allows the array of means to be subtracted ## from the array containing metapixels. otherwise, their shapes are not the same metapixels_a = np.subtract(metapixels_a,mp_a_mean[..., np.newaxis,np.newaxis]) metapixels_b = np.subtract(metapixels_b,mp_b_mean[..., np.newaxis,np.newaxis]) from skimage.filters import try_all_threshold img = mp_a_mean # Here, we specify a radius for local thresholding algorithms. # If it is not specified, only global algorithms are called. fig, ax = try_all_threshold(img, figsize=(10, 8), verbose=False) plt.show() from skimage.filters import threshold_minimum image = mp_a_mean thresh = threshold_minimum(image) binary = image > thresh mask_heat = ~binary fig, axes = plt.subplots(ncols=3, figsize=(8, 2.5)) ax = axes.ravel() ax[0] = plt.subplot(1, 3, 1) ax[1] = plt.subplot(1, 3, 2) ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0]) ax[0].imshow(image, cmap=plt.cm.gray) ax[0].set_title('Original') ax[0].axis('off') ax[1].hist(image.ravel(), bins=256) ax[1].set_title('Histogram') ax[1].axvline(thresh, color='r') ax[2].imshow(binary, cmap=plt.cm.gray) ax[2].set_title('Thresholded') ax[2].axis('off') plt.show()Calculating G# time = np.logspace(1, 13, num=13, base=2) # time = (0,10,25,50,100,250,500) time = np.arange (0,250,10) correlation_heat, means_heat = Calculate_G(image_names,6,time,100) #this saves G for 10 cycles of tapping at a single lag np.savez('/media/nakul/Backup Plus/Data/DWS_Correlation_Outputs/outputs/2020/feb/old_outputs/heat_feb15_0_250_relaxation',data1 = correlation_heat, data2 = means_heat)Loading instantaneous creep ratesheat_dt = np.load('/media/nakul/Backup Plus/Data/DWS_Correlation_Outputs/outputs/2020/feb/old_outputs/heat_feb15_dt.npz') correlation_heat_dt = heat_dt['data1'] means_heat_dt = heat_dt['data2']Loading short-duration correlation functions: post heating (immediately after heating) The timesteps in the correlation functions are t = 0 - 250 in steps of 10 seconds. each correlation function is composed of 100 lagsheat_short = np.load('/media/nakul/Backup Plus/Data/DWS_Correlation_Outputs/outputs/2020/feb/old_outputs/heat_feb15_0_250_relaxation.npz') correlation_heat_short = heat_short['data1'] means_heat_short = heat_short['data2']Loading long-duration correlation functions: post heatingheat_long = np.load('/media/nakul/Backup Plus/Data/DWS_Correlation_Outputs/outputs/2020/feb/old_outputs/heat_feb15.npz') correlation_heat_long = heat_long['data1'] means_heat_long = heat_long['data2']Loading preheat/preparationheat_prep = np.load('/media/nakul/Backup Plus/Data/DWS_Correlation_Outputs/outputs/2020/feb/old_outputs/undisturbed_feb_13_preheat.npz') correlation_heat_prep = heat_prep['data1'] means_heat_prep = heat_prep['data2']2d mapsl = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 plt.style.use('dark_background') from mpl_toolkits.axes_grid1 import make_axes_locatable fig = plt.figure(figsize=(8, 3)) ax1 = plt.subplot(1, 2, 1) ax2 = plt.subplot(1, 2, 2) frame = correlation_heat_short [1][1] a[a <= 0] = .00000000000000001 masked_map = ma.masked_array(a, mask=mask_heat,fill = 1) masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'inferno') cmap = plt.cm.gray_r cmap.set_bad('white',1.) divider = make_axes_locatable(ax1) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im1, cax=cax,format='%.0e', orientation='vertical') # cax.tick_params(labelsize=15) cax.set_title('$\dot{\epsilon}$',fontsize = 10) ax1.set_xticks([]); ax1.set_yticks([]); test = [] for i in range(len(means_heat_dt)): test.append(means_heat_dt[i][1]) i = 10 heat_def = -(np.log(test)/c) ax2.plot(heat_def[0:200],'r',alpha =1) ax2.plot(i,heat_def[i],'o',markersize = 10,markeredgecolor = 'k') ax2.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax2.set_ylabel(r'$ \langle \dot{\epsilon} \rangle$',fontsize = 8) ax2.set_xlabel('time (s)',fontsize = 8) fig.tight_layout() # plt.savefig('tapping_t_4000', bbox_inches='tight') a plt.style.use('dark_background') fig = plt.figure(figsize=(6,6)) a = len(correlation_heat_short) n = 0 for i in range(200): plt.style.use('dark_background') from mpl_toolkits.axes_grid1 import make_axes_locatable fig = plt.figure(figsize=(8, 3)) ax1 = plt.subplot(1, 2, 1) ax2 = plt.subplot(1, 2, 2) frame = correlation_heat_short [i][1] frame[frame <= 0] = .00000000000000001 masked_map = ma.masked_array(frame, mask=mask_heat,fill = 1) masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'inferno_r') cmap = plt.cm.gray_r cmap.set_bad('white',1.) divider = make_axes_locatable(ax1) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im1, cax=cax,format='%.0e', orientation='vertical') # cax.tick_params(labelsize=15) cax.set_title('$\dot{\epsilon}$',fontsize = 10) ax1.set_xticks([]); ax1.set_yticks([]); ################################################################################################## heat_def = -(np.log(test)/c) ax2.plot(heat_def[0:100],'r',alpha =1) ax2.plot(i+8,heat_def[i+8],'o',markersize = 10,markeredgecolor = 'k') ax2.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax2.set_ylabel(r'$ \langle \dot{\epsilon} \rangle$',fontsize = 8) ax2.set_xlabel('time (s)',fontsize = 8) fig.tight_layout() # plt.savefig('tapping_t_4000', bbox_inches='tight') plt.savefig("/home/nakul/Documents/Geomorphology/CreepExperiment_videos/heat/heat" + str(n) +".png", format="PNG") plt.clf() n += 1 from mpl_toolkits.axes_grid1 import make_axes_locatable l = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 fig = plt.figure(figsize=(7, 3)) ax1 = plt.subplot(1, 2, 1) ax2 = plt.subplot(1, 2, 2) ################################################################################################################# frame = correlation_heat_dt[5][1] frame[frame <= 0] = 0.01 masked_map = ma.masked_array(frame, mask=mask_heat) masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'inferno') cmap = plt.cm.gray cmap.set_bad('white',1.) # divider = make_axes_locatable(ax1) # cax = divider.append_axes('right', size='5%', pad=0.05) # fig.colorbar(im1, cax=cax, orientation='vertical',format='%.0e') # # cax.tick_params(labelsize=15) # cax.set_title('$\epsilon$',fontsize = 10) ax1.annotate('t = 0 s', xy=(5, 5), xytext=(100, 20),fontsize = 12) # ax1.set_title('t = 4 s')#,fontsize = 20); # ax1.set_xlim([30,175]); ax1.set_xticks([]); ax1.set_yticks([]); ################################################################################################################# frame = correlation_heat_dt[200][1] frame[frame <= 0] = 0 masked_map = ma.masked_array(frame, mask=mask_heat) masked_map = -(np.log(masked_map)/c) im2 = ax2.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'inferno') cmap = plt.cm.gray cmap.set_bad('white',1.) divider = make_axes_locatable(ax2) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im2, cax=cax, orientation='vertical',format='%.0e') # cax.tick_params(labelsize=15) cax.set_title(r'$\langle \dot{\epsilon} \rangle$',fontsize = 10) ax2.annotate('t = 200 s', xy=(5, 5), xytext=(120, 20),fontsize = 12) # ax2.set_title('t = 64 s')#,fontsize = 20); ax2.set_xticks([]); ax2.set_yticks([]); plt.subplots_adjust(hspace=.1, wspace=.1, bottom=2, left=.1, top=3) # # fit subplots and save fig # fig.set_size_inches(w=7,h=3) fig.tight_layout() fig_name = '/home/nakul/Documents/Python Scripts/DWS_Speckle_Analysis/iPython_notebooks/heat_panels.png' plt.savefig(fig_name, bbox_inches='tight')Relaxation curvesdef remove_outliers(x): "this function takes a correlation function and filters values." "at certain lag times, the mean drops to an anomalous value but is recovered at the subsequent lag time" "if the difference between two subsequent timesteps" "is greater than the difference between three timesteps, then we set the anomalous value to the previous value" for i in range(len(x)-2): a = x[i] b = x[i+1] c = x[i+2] d = abs(a-b) e = abs(a-c) if d > e: x[i+1] = x[i] return xpreparationfrom matplotlib.pyplot import cm a = np.logspace(1, 13, num=13, base=2) labels = list(a) color=iter(cm.viridis(np.linspace(0,1,13))) time = np.arange(1,1001,1) fig = plt.figure(figsize=(3, 2)) ax1 = plt.subplot(111) for t in range(13): c=next(color) corr = remove_outliers(means_heat_prep[t]) ax1.plot(time,means_heat_prep[t],'o',color = c,markersize = 3,alpha = 1,label = int(labels[t])) ax1.set_xscale('log') ax1.set_xlabel(r'$ \tau $ (s)') ax1.set_ylabel(r'$ \langle G \rangle$') box = ax1.get_position() lgd = ax1.legend() ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax1.legend(title = 'time since preparation (s)',loc='center left', frameon = False, bbox_to_anchor=(1, 0.5)) plt.tight_layout() # plt.savefig('correlation_preparation_heat.png', bbox_inches='tight')Short - immediately after heat is removeda = np.arange(0,250,10) # a = a*10 labels = list(a) color=iter(cm.viridis(np.linspace(0,1,25))) time = np.arange(1,101,1) y = np.repeat((1/np.exp(1)),100) fig = plt.figure(figsize=(3, 2)) ax1 = plt.subplot(111) for t in range(25): c=next(color) ax1.plot(time,means_heat_short[t],'o',color = c,markersize = 3,alpha = 1,label = int(labels[t])) ax1.plot(time,y,'k--') ax1.set_xscale('log') # ax1.set_yscale('log') ax1.set_xlim([0,1e6]) ax1.set_xlabel(r'$ \tau $ (s)') ax1.set_ylabel(r'$ \langle G \rangle$') # box = ax1.get_position() # lgd = ax1.legend() # ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # ax1.legend(title = 'time since heat removed (s)',loc='center left', frameon = False, bbox_to_anchor=(1, 0.5)) # plt.tight_layout() # plt.savefig('correlations_post_heat.png', bbox_inches='tight') # ensuring that G goes from 0 to 1 g = means_heat_short d = np.min(g) gp_prep = (g-d)/(1-d)efolding timet_efold_heat_prep = [] test = [] for i in range(len(means_heat_prep)): x = gp_prep[i] y = np.where(x < 1/np.exp(1))[0] t_efold_heat_prep.append(y[0])Short timest_efold_short = [] test = [] for i in range(len(means_heat_short)): x = means_heat_short[i] y = np.where(x < 1/np.exp(1))[0] t_efold_short.append(y[0]) fig = plt.figure(figsize=(2, 2)) t = np.arange(0,130,10) color=iter(cm.viridis(np.linspace(0,1,13))) for i in range(len(t_efold_short)): plt.plot(t[i],t_efold_short[i],'ro',markeredgecolor = 'k',markersize = 5) # plt.title('relaxation timescale') plt.xlabel('t (s)',fontsize = 12) plt.ylabel(r'$\tau_e$ (s)',fontsize = 12) plt.xscale('log') plt.yscale('log') # plt.xlim([1e-1,1e5]) # plt.ylim([1e1,1e5]) plt.tight_layout() plt.savefig('e_foldingtime_heat.png', bbox_inches='tight')Time Series instantaneous creepl = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 fig = plt.figure(figsize=(6,3)) test = [] for i in range(len(means_heat_dt)): test.append(means_heat_dt[i][1]) heat_def = -(np.log(test)/c) plt.plot(heat_def[0:200],'r',alpha =0.7) plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) plt.ylabel(r'$ \langle \dot{\epsilon} \rangle$',fontsize = 8) plt.xlabel('time (s)',fontsize = 8) # plt.savefig('heat_timeseries.png',bbox_inches='tight')Combined map and time seriesfrom mpl_toolkits.axes_grid1 import make_axes_locatable l = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 def fmt(x, pos): a, b = '{:.2e}'.format(x).split('e') b = int(b) return r'${} \times 10^{{{}}}$'.format(a, b) fig = plt.figure(figsize=(3.5,2.5)) ################################################################################################################# ax1 = plt.subplot2grid((2,2), (0, 0), colspan=1,rowspan = 1) frame = correlation_heat_dt[5][1] frame[frame <= 0] = 0.01 masked_map = ma.masked_array(frame, mask=mask_heat) masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray cmap.set_bad('white',1.) ax1.set_title('t = 0 s') ax1.set_xticks([]); ax1.set_yticks([]); ################################################################################################################# ax2 = plt.subplot2grid((2,2), (0, 1), colspan=1,rowspan = 1) frame = correlation_heat_dt[200][1] frame[frame <= 0] = 0 masked_map = ma.masked_array(frame, mask=mask_heat) masked_map = -(np.log(masked_map)/c) im2 = ax2.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') cmap = plt.cm.gray cmap.set_bad('white',1.) # divider = make_axes_locatable(ax2) # cax = divider.append_axes('right', size='5%', pad=0.05) # fig.colorbar(im2, cax=cax, orientation='vertical',format=ticker.FuncFormatter(fmt)) # cax.set_title(r'$\langle \dot{\epsilon} \rangle$',fontsize = 10) ax2.set_title('t = 200 s') ax2.set_xticks([]); ax2.set_yticks([]); ################################################################################################################ ax3 = plt.subplot2grid((2,2), (1, 0), colspan=2,rowspan = 2) ax3.plot(heat_def[3:203],'r',alpha = 0.7) ax3.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax3.set_ylabel(r'$ \langle \dot{\epsilon} \rangle$ [$s^{-1}$]')#,rotation = 'horizontal') ax3.set_xlabel('time (s)') ################################################################################################################ left, bottom, width, height = [0.6, 0.25, 0.20, 0.2] ax4 = fig.add_axes([left, bottom, width, height]) time_short = np.arange (0,250,10) for i in range(len(t_efold_short)): ax4.plot(time_short[i],t_efold_short[i],'ro',markeredgecolor = 'k',markersize = 5) ax4.set_yscale('log') ax4.set_xscale('log') ax4.tick_params(axis='both', which='major', labelsize=7) ax4.xaxis.set_ticks([1,10,100,1000]) ax4.yaxis.set_ticks([1,10,100]) ax4.set_ylim([1,200]) ax4.set_xlabel('t (s)') ax4.set_ylabel(r'$\tau_e$ (s)',rotation = 'horizontal') ax4.yaxis.set_label_coords(-0.6, .5) ax4.xaxis.set_label_coords(1.3, 0) ################################################################################################################ # plt.subplots_adjust(hspace=.1, wspace=.1, bottom=2, left=.1, top=3) # # # fit subplots and save fig # # fig.set_size_inches(w=7,h=3) # fig.tight_layout() fig_name = '/home/nakul/Documents/Python Scripts/DWS_Speckle_Analysis/iPython_notebooks/heat_panels.pdf' plt.savefig(fig_name, bbox_inches='tight') import matplotlib.patheffects as pe from matplotlib.pyplot import cm fig = plt.figure(figsize=(7,2)) ax1 = plt.subplot(111) ################################################################################################################ ax1.plot(heat_def[3:203],'r',alpha = 0.7) ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax1.set_ylabel(r'$ \langle \dot{\epsilon} \rangle$') ax1.set_xlabel('time (s)') ################################################################################################################ left, bottom, width, height = [0.65, 0.3, 0.20, 0.5] ax2 = fig.add_axes([left, bottom, width, height]) time_short = np.arange (0,250,10) for i in range(len(t_efold_short)): ax2.plot(time_short[i],t_efold_short[i],'ro',markeredgecolor = 'k',markersize = 5) ax2.set_yscale('log') ax2.set_xscale('log') ax2.tick_params(axis='both', which='major', labelsize=7) ax2.xaxis.set_ticks([1,10,100,1000]) ax2.yaxis.set_ticks([1,10,100]) ax2.set_ylim([1,200]) ax2.set_xlabel('t (s)') ax2.set_ylabel(r'$\tau_e$ (s)') ################################################################################################################ # plt.tight_layout() # plt.savefig('heat.png', bbox_inches='tight') import matplotlib.patheffects as pe from matplotlib.pyplot import cm #(figsize=(3.5,6)) fig = plt.figure(figsize=(6,4.5)) ax1 = plt.subplot(211) ax1.plot(heat_def,'r',alpha = 0.7) ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax1.set_ylabel(r'$\dot{\epsilon }$',fontsize = 7) ax1.set_xlabel('time (s)',fontsize = 7) ################################################################################################################ left, bottom, width, height = [0.65, 0.65, 0.20, 0.20] ax2 = fig.add_axes([left, bottom, width, height]) time_short = np.arange (0,250,10) # t = (np.logspace(1, 13, num=13, base=2)) color=iter(cm.inferno(np.linspace(0,1,13))) for i in range(len(t_efold_short)): c=next(color) ax2.plot(time_short[i],t_efold_short[i],'ro',markeredgecolor = 'k',color = c,markersize = 5) ax2.yaxis.set_ticks([0,25,50]) ax2.tick_params(axis='both', which='major', labelsize=7) ax2.yaxis.set_label_coords(-0.025,1.05) ax2.set_xlabel('time since heating (s)') ax2.set_ylabel('decay time (s)',rotation = 'horizontal') ################################################################################################################ left, bottom, width, height = [0.35, 0.65, 0.20, 0.20] ax3 = fig.add_axes([left, bottom, width, height]) color=iter(cm.inferno(np.linspace(0,1,12))) time = np.arange(1,101,1) for t in range(len(t_efold_short)): c=next(color) ax3.plot(time,means_heat_short[t],'o',color = c,markersize = 3.5,alpha = 1)#,label = int(labels[t])) ax3.set_xscale('log') # ax3.set_yscale('log') # ax1.set_xlim([0,1e6]) ax3.set_xlabel('lag (s)') ax3.set_ylabel('G',rotation = 'horizontal') ax3.yaxis.set_label_coords(-0.025,1.05) plt.tight_layout() # plt.savefig('/home/nakul/Documents/Python Scripts/DWS_Speckle_Analysis/iPython_notebooks/heat_timeseries.png', bbox_inches='tight') color=iter(cm.viridis(np.linspace(0,1,12))) time = np.arange(1,101,1) for t in range(len(t_efold_short)): c=next(color) plt.plot(time,means_heat_short[t],'o',color = c,markersize = 3.5,alpha = 0.7)#,label = int(labels[t])) ax3.set_xscale('log') # ax3.set_yscale('log') # ax1.set_xlim([0,1e6]) ax3.set_xlabel('lag (s)') ax3.set_ylabel('G',rotation = 'horizontal') ax3.yaxis.set_label_coords(-0.025,1.05) from mpl_toolkits.axes_grid1 import make_axes_locatable fig = plt.figure(figsize=(8, 3)) # fig.suptitle('t 9000 -> 9100') ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=1,rowspan = 2) ax1.plot(heat_def,'r') ax1.set_xlabel('$\epsilon$') ax1.set_ylabel('z (m)') # ax1.set_xlim([-1e-4,1e-5]) ax1.ticklabel_format(axis='x', style='sci', scilimits=(0,0)) # plt.xscale('log') # plt.yscale('log') #################################################################################################################### ax2 = plt.subplot2grid((3, 3), (0, 1), rowspan=1,colspan = 1) frame = correlation_heat_dt[5][1] frame[frame <= 0] = 1e-32 masked_map = ma.masked_array(frame, mask=mask_heat) masked_map = -(np.log(masked_map)/c) im2 = ax2.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') divider = make_axes_locatable(ax2) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im2, cax=cax, orientation='vertical',format='%.0e') cax.set_title('$\epsilon$') ax2.set_title('t = 1') ax2.set_xticks([]) ax2.set_yticks([]) #################################################################################################################### ax3 = plt.subplot2grid((3, 3), (0, 2), rowspan=1,colspan = 1) frame = correlation_heat_dt[100][1] frame[frame <= 0] = 1e-32 masked_map = ma.masked_array(frame, mask=mask_heat) masked_map = -(np.log(masked_map)/c) im3 = ax3.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') divider = make_axes_locatable(ax3) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im3, cax=cax, orientation='vertical',format='%.0e') cax.set_title('$\epsilon$') ax3.set_title('t = 3') ax3.set_xticks([]) ax3.set_yticks([]) #################################################################################################################### ax4 = plt.subplot2grid((3, 3), (1, 1), rowspan=1,colspan = 1) frame = correlation_heat_dt[200][1] frame[frame <= 0] = 1e-32 masked_map = ma.masked_array(frame, mask=mask_heat) masked_map = -(np.log(masked_map)/c) im4 = ax4.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') divider = make_axes_locatable(ax4) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im4, cax=cax, orientation='vertical',format='%.0e') cax.set_title('$\epsilon$') ax4.set_title('t = 20') ax4.set_xticks([]) ax4.set_yticks([]) #################################################################################################################### ax5 = plt.subplot2grid((3, 3), (1, 2), rowspan=1,colspan = 1) frame = correlation_heat_dt[300][1] frame[frame <= 0] = 1e-32 masked_map = ma.masked_array(frame, mask=mask_heat) masked_map = -(np.log(masked_map)/c) im5 = ax5.imshow(masked_map,vmin = 1e-7,vmax = 1e-5,cmap = 'gray_r') divider = make_axes_locatable(ax5) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im5, cax=cax, orientation='vertical',format='%.0e') cax.set_title('$\epsilon$') ax5.set_title('t = 90') ax5.set_xticks([]) ax5.set_yticks([]) plt.subplots_adjust(hspace=.1, wspace=.1, bottom=2, left=.1, top=3) # plt.tight_layout() # plt.savefig('ensemble_test.png', bbox_inches='tight') import matplotlib.patheffects as pe from matplotlib.pyplot import cm #(figsize=(3.5,6)) fig = plt.figure(figsize=(6,3)) ax1 = plt.subplot(211) ax1.plot(heat_def,'r',alpha = 0.7) ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax1.set_ylabel(r'$\dot{\epsilon }$',fontsize = 7) ################################################################################################################ left, bottom, width, height = [0.5, 0.7, 0.30, 0.20] ax2 = fig.add_axes([left, bottom, width, height]) time_short = np.arange (0,250,10) # t = (np.logspace(1, 13, num=13, base=2)) color=iter(cm.viridis(np.linspace(0,1,13))) for i in range(len(t_efold_short)): c=next(color) ax2.plot(time_short[i],t_efold_short[i],'o',markeredgecolor = 'k',color = c,markersize = 5) ax2.yaxis.set_ticks([0,25,50]) ax2.tick_params(axis='both', which='major', labelsize=7) ax2.yaxis.set_label_coords(-0.025,1.05) ax2.set_xlabel('start time (s)') ax2.set_ylabel('decay time (s)',rotation = 'horizontal') ############################################################################################################## ax3 = plt.subplot(212) ax3.plot(tap_def[10:5010],color='b',alpha = 0.5) ax3.plot(smoothed_taps[10:5010],color='k') # ax3.plot(a[10:510],'k') ax3.set_xlabel('time (s)') ax3.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax3.set_ylabel(r'$\dot{\epsilon }$',fontsize = 7) plt.tight_layout() # plt.savefig('disturbance_timeseries.png')Multicycle Heatingheat_multicycle = np.load('/home/ndeshpande/Documents/Python Scripts/DWS_Speckle_Analysis/outputs/2019/heat_multicycle.npz') # correlation_heat_multicycle = heat_multicycle ['data1'] means_heat_multicycle = heat_multicycle ['data2'] l = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 heat_multicycle = [] for i in range(len(means_heat_multicycle)): heat_multicycle.append(-(np.log(means_heat_multicycle[i][1])/c) )Multicycle comparisonundisturbed_multicycle = np.load('/home/ndeshpande/Documents/Python Scripts/DWS_Speckle_Analysis/outputs/2019/purecreep_C.npz') # correlation_undisturbed_multicycle = undisturbed_multicycle ['data1'] means_undisturbed_multicycle = undisturbed_multicycle ['data2'] l = 3.3 * 100000 c = ((8*np.pi)*(np.sqrt(2/5))*l)/633 undisturbed_multicycle = [] for i in range(len(means_undisturbed_multicycle)): undisturbed_multicycle.append(-(np.log(means_undisturbed_multicycle[i][1])/c) ) fig = plt.figure(figsize=(8, 2)) mean_heat = np.mean(heat_multicycle[261:1600]) mean_heat = np.repeat(mean_heat,1600-261) time = np.arange(0,1600-261,1) # plt.plot(undisturbed_multicycle[0:1500]) # plt.plot(tapping_multicycle[0:1700],'k',alpha = 0.5,label = 'tapping') plt.plot(heat_multicycle[261:1600],'r',alpha = 0.5,label = 'heating') plt.plot(time,mean_heat,'k--',alpha = 0.7) # plt.yscale('log') # plt.ylim([1e-7,1e-2]) plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) plt.ylabel(r'$ \langle \dot{\epsilon} \rangle$',fontsize = 12) plt.xlabel('time (s)') # plt.tight_layout() plt.savefig('heat_cycles.png', bbox_inches='tight')Ensemble timeseriesimport matplotlib.patheffects as pe from matplotlib.pyplot import cm fig = plt.figure(figsize=(7,2)) ax1 = plt.subplot(121) ################################################################################################################ ax1.plot(heat_def[3:203],'r',alpha = 0.7) ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax1.set_ylabel(r'$ \langle \dot{\epsilon} \rangle$') ax1.set_xlabel('time (s)') ################################################################################################################ left, bottom, width, height = [0.6, 0.65, 0.20, 0.20] ax2 = fig.add_axes([left, bottom, width, height]) time_short = np.arange (0,250,10) for i in range(len(t_efold_short)): ax2.plot(time_short[i],t_efold_short[i],'ro',markeredgecolor = 'k',markersize = 5) ax2.set_yscale('log') ax2.set_xscale('log') ax2.tick_params(axis='both', which='major', labelsize=7) ax2.xaxis.set_ticks([1,10,100,1000]) ax2.yaxis.set_ticks([1,10,100]) ax2.set_ylim([1,200]) ax2.set_xlabel('t (s)') ax2.set_ylabel(r'$\tau_e$ (s)') ################################################################################################################ ax3 = plt.subplot(122) tap_def = -(np.log(test)/c) ax3.plot(tap_def[10:5010],'b',alpha =0.3) ax3.plot(smoothed_taps[10:5010],'-k') ax3.set_xlabel('tap number') # plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) # ax3.set_ylabel(r'$ \langle \dot{\epsilon} \rangle$') plt.tight_layout() # plt.savefig('ensemble_disturbance_timeseries.png', bbox_inches='tight')Ensemble Disturbance mapsfig = plt.figure(figsize=(4, 4)) ax1 = plt.subplot(1, 2, 1) ax2 = plt.subplot(1, 2, 2) # fig.suptitle('heating test - 10 cycles') ################################################################################################################# heat = correlation_heat_cycles[t][1] heat[heat <= 0] = 1e-32 heat_mask = ma.masked_array(heat, mask=mask_heating_cycles) heat_mask = -(np.log(heat_mask)/c) im1 = ax1.imshow(heat_mask,cmap = 'gray_r',vmin = 1e-7, vmax = 1e-4) ax1.set_title('heating',fontsize = 4) ax1.set_xticks([]) ax1.set_yticks([]) divider = make_axes_locatable(ax1) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im1, cax=cax, orientation='vertical',format='%.0e') cax.set_title('$\epsilon$') ################################################################################################################# tap = correlation_tapping_cycles[t][1] tap[tap <= 0] = 1e-32 tap_mask = ma.masked_array(tap, mask=mask_tapping_cycles) tap_mask = -(np.log(tap_mask)/c) im2 = ax2.imshow(tap_mask,vmin = 1e-7, vmax = 1e-4, cmap = 'gray_r') ax2.set_title('tapping',fontsize = 4) ax2.set_xticks([]) ax2.set_yticks([]) divider = make_axes_locatable(ax2) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im2, cax=cax, orientation='vertical',format='%.0e') cax.set_title('$\epsilon$') plt.tight_layout() # plt.savefig('heating_tapping.png')Disturbance Time Seriesfig = plt.figure(figsize=(5, 3)) time = np.arange(0,3000,.5) # ax1 = plt.subplot(3, 1, 1) # ax1.plot(time,undisturbed_def[:6000],'k',linewidth = 1,alpha = 0.5, label = 'undisturbed') # ax1.set_yscale('log') # ax1.set_xlim([0,1000]) ax2 = plt.subplot(3, 1, 2) ax2.plot(heat_def[0:500],'r',linewidth = 1,alpha = 1,label = 'heating') ax2.set_yscale('log') ax2.set_xlim([0,1000]) ax3 = plt.subplot(3, 1, 3) ax3.plot(tap_def[30:6030],'b',linewidth = 1,alpha = 1,label = 'tapping') ax3.set_yscale('log') ax3.set_xlim([0,1000]) plt.tight_layout() # plt.savefig('disturbances_test.png')Ensemble multicycle disturbance moviesn = 0 fig = plt.figure(figsize=(4, 4)) ax1 = plt.subplot(2, 1, 1) ax2 = plt.subplot(2, 1, 2) for t in range(160): ax1 = plt.subplot(2, 1, 1) ax2 = plt.subplot(2, 1, 2) ################################################################################################################# heat = correlation_heat_cycles[t][1] heat[heat <= 0] = 1e-32 heat_mask = ma.masked_array(heat, mask=mask_heating_cycles) heat_mask = -(np.log(heat_mask)/c) im1 = ax1.imshow(heat_mask,vmin = 1e-7, vmax = 1e-4, cmap = 'gray_r') ax1.set_title('heating',fontsize = 4) ax1.set_xticks([]) ax1.set_yticks([]) divider = make_axes_locatable(ax1) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im1, cax=cax, orientation='vertical',format='%.0e') cax.set_title('$\epsilon$') ################################################################################################################# tap = correlation_tapping_cycles[t][1] tap[tap <= 0] = 1e-32 tap_mask = ma.masked_array(tap, mask=mask_tapping_cycles) tap_mask = -(np.log(tap_mask)/c) im2 = ax2.imshow(tap_mask,vmin = 1e-7, vmax = 1e-5, cmap = 'gray_r') ax2.set_title('tapping',fontsize = 4) ax2.set_xticks([]) ax2.set_yticks([]) divider = make_axes_locatable(ax2) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im2, cax=cax, orientation='vertical',format='%.0e') cax.set_title('$\epsilon$') plt.tight_layout() plt.savefig("ensmble_test" + str(n) +".png", format="PNG") plt.clf() n += 1 # mpl.rc('text', usetex = True) # mpl.rc('font', family = 'serif') fig = plt.figure(figsize=(6, 4)) ############################################################################################################# ax1 = plt.subplot2grid((2, 3), (1, 0), rowspan=1,colspan = 3) ax1.plot(heat_def[10:3500],'r',linewidth = 1) ax1.plot(tap_def[10:3500],'b',linewidth = 1) ax1.set_xlabel('time (seconds)') ax1.set_ylabel(r'$\dot{\epsilon }$',fontsize = 12) ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) ax1.set_ylim(0,0.0004) ax1.set_xlim(-10,3500) # ax1.set_yscale('log') # ax1.set_ylim([1e-7,1e-3]) ############################################################################################################# ax2 = plt.subplot2grid((2, 3), (0, 2), rowspan=1,colspan = 1) # labels = list(time) # labels = list(np.logspace(1, 13, num=13, base=2)) labels = np.arange (0,300,5) # t = np.arange(0,10,.1) color=iter(cm.viridis(np.linspace(0,1,60))) for t in range(60): c=next(color) ax2.plot(means_heat_single_cycle_response[t],'-',color = c,linewidth = 1)#,label = int(labels[t])) ax2.set_title('heating') ax2.set_xscale('log') ax2.set_xlabel(r'$\tau$') ax2.set_yticks([]); # box = ax2.get_position() # lgd = ax2.legend() # ax2.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # ax2.legend(title = 'start time',loc='center left', frameon = False, bbox_to_anchor=(1, 0.5)) ############################################################################################################# ax3 = plt.subplot2grid((2,3), (0,1), colspan=1,rowspan = 1) # labels = list(time) labels = list(np.logspace(1, 13, num=13, base=2)) color=iter(cm.viridis(np.linspace(0,1,13))) for t in range(13): c=next(color) ax3.plot(means_tap_single_cycle_response[t],'-',color = c,linewidth = 1) ax3.set_title('tapping') ax3.set_yticks([]); ax3.set_xscale('log') # ax3.set_yscale('log') ax3.set_xlabel(r'$\tau$') # ax3.set_ylim([0,1.1]) # box = ax1.get_position() # lgd = ax1.legend() # ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # ax1.legend(title = 'start time',loc='center left', frameon = False, bbox_to_anchor=(1, 0.5)) ############################################################################################################# ax4 = plt.subplot2grid((2,3), (0,0), colspan=1,rowspan = 1) # labels = list(time) # labels = list(np.logspace(1, 13, num=13, base=2)) # t = np.arange(0,10,.1) color=iter(cm.viridis(np.linspace(0,1,13))) for t in range(13): c=next(color) ax4.plot(means_undisturbed[t],color = c,linewidth = 1)#,label = int(labels[t])) ax4.set_title('undisturbed') ax4.set_xscale('log') ax4.set_xlabel(r'$\tau$') # ax4.set_yticks([]); ax4.set_ylabel('G') fig.tight_layout() # # fit subplots and save fig fig.tight_layout() # fig.set_size_inches(w=7,h=4) # fig_name = 'fig2_glassytime.pdf' plt.savefig(fig_name)Movie/Animation frames# fig = plt.figure(figsize=(2, 2)) # ax1 = plt.subplot(1, 1, 1) n = 0 for i in range(len(correlation_tapping)): ax1 = plt.subplot(1, 1, 1) frame = correlation_tapping[i+100][1] masked_map = ma.masked_array(frame, mask=mask_tapping) # masked_map = -(np.log(masked_map)/c) im1 = ax1.imshow(masked_map,vmin = .9,vmax = 1,cmap = 'gray') cmap = plt.cm.gray cmap.set_bad('white',1.) divider = make_axes_locatable(ax1) cax = divider.append_axes('right', size='5%', pad=0.05) fig.colorbar(im1, cax=cax, orientation='vertical') cax.set_title('Correlation (G)',fontsize = 8) ax1.annotate("# of taps " + str(n), xy=(5, 5), xytext=(120, 20),fontsize = 8) ax1.set_xticks([]); ax1.set_yticks([]); plt.savefig("test" + str(n+100) +".png", format="PNG") plt.clf() n += 1Loading Temperature/Humidity sensor readoutimport pandas as pd environment = pd.read_csv('/home/ndeshpande/Desktop/Hayden_Feb_01_05_2020.txt',skiprows=1, sep=',', usecols = range(0,5), names = ['id', 'time','celsius','humidity','dew point']) environment = environment.values.tolist() a = len(environment) time = [] humidity = [] temperature = [] dew_point = [] for i in range(a): time.append(environment[i][1]) temperature.append(environment[i][2]) humidity.append(environment[i][3]) dew_point.append(environment[i][4]) # Create some mock data time_sensor = np.arange(1,5820,5) time_sensor = time_sensor/60/24 fig, ax1 = plt.subplots() ax1.set_xlabel('time (days)') ax1.set_ylabel('temperature(C)', color='r') ax1.plot(time_sensor,temperature, color='r') ax1.tick_params(axis='y', labelcolor='r') ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis ax2.set_ylabel('% relative humidity', color='b') # we already handled the x-label with ax1 ax2.plot(time_sensor,humidity, color='b') ax2.tick_params(axis='y', labelcolor='b') fig.tight_layout() # otherwise the right y-label is slightly clipped plt.savefig('temp_humidty.png') fig = plt.figure(figsize=(4, 4)) start_times = (81,81,81,81,82,83,85,89,98,115,149,217,354) time_sensor = np.arange(1,1670,5) time_sensor = time_sensor/60 plt.plot(time_sensor,humidity[81:415],linewidth = 2) plt.xlabel('time') plt.ylabel('% relative humidity') plt.xlabel('time (hours)') plt.savefig('temp_humidty_day1.png')Train a XGBoost regression model on Amazon SageMaker, host inference on a serverless function in AWS Lambda and optionally expose as an API with Amazon API Gateway[Amazon SageMaker](https://aws.amazon.com/sagemaker/) is a fully managed end-to-end Machine Learning (ML) service. With SageMaker, you have the option of using the built-in algorithms or you can bring your own algorithms and frameworks to train your models. After training, you can deploy the models in [one of two ways](https://docs.aws.amazon.com/sagemaker/latest/dg/deploy-model.html) for inference - persistent endpoint or batch transform.With a persistent inference endpoint, you get a fully-managed real-time HTTPS endpoint hosted on either CPU or GPU based EC2 instances. It supports features like auto scaling, data capture, model monitoring and also provides cost-effective GPU support using [Amazon Elastic Inference](https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). It also supports hosting multiple models using multi-model endpoints that provide A/B testing capability. You can monitor the endpoint using [Amazon CloudWatch](https://aws.amazon.com/cloudwatch/). In addition to all these, you can use [Amazon SageMaker Pipelines](https://aws.amazon.com/sagemaker/pipelines/) which provides a purpose-built, easy-to-use Continuous Integration and Continuous Delivery (CI/CD) service for Machine Learning.There are use cases where you may want to host the ML model on a real-time inference endpoint that is cost-effective and do not require all the capabilities provided by the SageMaker persistent inference endpoint. These may involve,* simple models* models whose sizes are lesser than 200 MB* models that are invoked sparsely and do not need inference instances running all the time* models that do not need to be re-trained and re-deployed frequently* models that do not need GPUs for inferenceIn these cases, you can take the trained ML model and host it as a serverless function on [AWS Lambda](https://aws.amazon.com/lambda/) and optionally expose it as an API by front-ending it with a HTTP/REST API hosted on [Amazon API Gateway](https://aws.amazon.com/api-gateway/). This will be cost-effective as compared to having inference instances running all the time and still provide a fully-managed and scalable solution.This notebook demonstrates this solution by using SageMaker's [built-in XGBoost algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.html) to train a regression model on the [California Housing dataset](https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html). It loads the trained model as a Python3 [pickle](https://docs.python.org/3/library/pickle.html) object in a container to be hosted on an [AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html) function. Finally, it provides instructions for exposing it as an API by front-ending it with a HTTP/REST API hosted on [Amazon API Gateway](https://aws.amazon.com/api-gateway/).**Warning:** The Python3 [pickle](https://docs.python.org/3/library/pickle.html) module is not secure. Only unpickle data you trust. Keep this in mind if you decide to get the trained ML model file from somewhere instead of building your own model.**Note:*** This notebook should only be run from within a SageMaker notebook instance as it references SageMaker native APIs. The underlying OS of the notebook instance can either be Amazon Linux v1 or v2.* At the time of writing this notebook, the most relevant latest version of the Jupyter notebook kernel for this notebook was `conda_python3` and this came built-in with SageMaker notebooks.* This notebook uses CPU based instances for training.* If you already have a trained model that can be loaded as a Python3 [pickle](https://docs.python.org/3/library/pickle.html) object, then you can skip the training step in this notebook and directly upload the model file to S3 and update the code in this notebook's cells accordingly.* Although you can host a Python3 function directly on AWS Lambda, choosing the container option to package the code and dependencies is the best fit for this use case as the ML model file along with its dependencies will easily exceed the maximum deployment package size of 50 MB for zipped or 250 MB for unzipped files.* In this notebook, the ML model generated in the training step has not been tuned as that is not the intent of this demo.* This notebook will create resources in the same AWS account and in the same region where this notebook is running.* Users of this notebook require `root` access to install/update required software. This is set by default when you create the notebook. For more info, refer [here](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-root-access.html).**Table of Contents:**1. [Complete prerequisites](Complete%20prerequisites) 1. [Check and configure access to the Internet](Check%20and%20configure%20access%20to%20the%20Internet) 2. [Check and upgrade required software versions](Check%20and%20upgrade%20required%20software%20versions) 3. [Check and configure security permissions](Check%20and%20configure%20security%20permissions) 4. [Organize imports](Organize%20imports) 5. [Create common objects](Create%20common%20objects)2. [Prepare the data](Prepare%20the%20data) 1. [Create the local directories](Create%20the%20local%20directories) 2. [Load the dataset and view the details](Load%20the%20dataset%20and%20view%20the%20details) 3. [(Optional) Visualize the dataset]((Optional)%20Visualize%20the%20dataset) 4. [Split the dataset into train, validate and test sets](Split%20the%20dataset%20into%20train,%20validate%20and%20test%20sets) 5. [Standardize the datasets](Standardize%20the%20datasets) 6. [Save the prepared datasets locally](Save%20the%20prepared%20datasets%20locally) 7. [Upload the prepared datasets to S3](Upload%20the%20prepared%20datasets%20to%20S3)3. [Perform training](Perform%20training) 1. [Set the training parameters](Set%20the%20training%20parameters) 2. [(Optional) Delete previous checkpoints]((Optional)%20Delete%20previous%20checkpoints) 3. [Run the training job](Run%20the%20training%20job)4. [Create and push the Docker container to an Amazon ECR repository](Create%20and%20push%20the%20Docker%20container%20to%20an%20Amazon%20ECR%20repository) 1. [Retrieve the model pickle file](Retrieve%20the%20model%20pickle%20file) 2. [(Optional) Test the model pickle file]((Optional)%20Test%20the%20model%20pickle%20file) 3. [View the inference script](View%20the%20inference%20script) 4. [Create the Dockerfile](Create%20the%20Dockerfile) 5. [Create the container](Create%20the%20container) 6. [Create the private repository in ECR](Create%20the%20private%20repository%20in%20ECR) 7. [Push the container to ECR](Push%20the%20container%20to%20ECR)5. [Create and test the AWS Lambda function](Create%20and%20test%20the%20AWS%20Lambda%20function) 1. [Create the Lambda function](Create%20the%20Lambda%20function) 2. [Test the Lambda function](Test%20the%20Lambda%20function) 6. [(Optional) Front-end the Lambda function with Amazon API Gateway]((Optional)%20Front-end%20the%20Lambda%20function%20with%20Amazon%20API%20Gateway)7. [Cleanup](Cleanup) 1. Complete prerequisites Check and complete the prerequisites. A. Check and configure access to the Internet This notebook requires outbound access to the Internet to download the required software updates. You can either provide direct Internet access (default) or provide Internet access through a VPC. For more information on this, refer [here](https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-notebook-and-internet-access.html). B. Check and upgrade required software versions This notebook requires:* [SageMaker Python SDK version 2.x](https://sagemaker.readthedocs.io/en/stable/v2.html)* [Python 3.6.x](https://www.python.org/downloads/release/python-360/)* [Boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html)* [AWS Command Line Interface](https://aws.amazon.com/cli/)* [Docker](https://www.docker.com/)* [XGBoost Python module](https://xgboost.readthedocs.io/en/latest/python/python_intro.html) Capture the version of the OS on which this notebook is running.import subprocess from subprocess import Popen p = Popen(['cat','/etc/system-release'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) os_cmd_output, os_cmd_error = p.communicate() if len(os_cmd_error) > 0: print('Notebook OS command returned error :: {}'.format(os_cmd_error)) os_version = '' else: if os_cmd_output.find('Amazon Linux release 2') >= 0: os_version = 'ALv2' elif os_cmd_output.find('Amazon Linux AMI release 2018.03') >= 0: os_version = 'ALv1' else: os_version = '' print('Notebook OS version : {}'.format(os_version))**Note:** When running the following cell, if you get 'module not found' errors, then uncomment the appropriate installation commands and install the modules. Also, uncomment and run the kernel shutdown command. When the kernel comes back, comment out the installation and kernel shutdown commands and run the following cell. Now, you should not see any errors.""" Last tested versions: On Amazon Linux v1 (ALv1) notebook: ----------------------------------- SageMaker Python SDK version : 2.54.0 Python version : 3.6.13 | packaged by conda-forge | (default, Feb 19 2021, 05:36:01) [GCC 9.3.0] Boto3 version : 1.18.27 XGBoost Python module version : 1.4.2 AWS CLI version : aws-cli/1.20.21 Python/3.6.13 Linux/4.14.238-125.422.amzn1.x86_64 botocore/1.21.27 Docker version : 19.03.13-ce, build 4484c46 On Amazon Linux v2 (ALv2) notebook: ----------------------------------- SageMaker Python SDK version : 2.59.1 Python version : 3.6.13 | packaged by conda-forge | (default, Feb 19 2021, 05:36:01) [GCC 9.3.0] Boto3 version : 1.18.36 XGBoost Python module version : 1.4.2 AWS CLI version : aws-cli/1.20.24 Python/3.6.13 Linux/4.14.243-185.433.amzn2.x86_64 botocore/1.21.36 Docker version : 20.10.7, build f0df350 Amazon ECR Docker Credential Helper : 0.6.3 """ import boto3 import IPython import sagemaker import sys try: import xgboost as xgb except ModuleNotFoundError: # Install XGBoost and restart kernel print('Installing XGBoost module...') !{sys.executable} -m pip install -U xgboost IPython.Application.instance().kernel.do_shutdown(True) # Install/upgrade the Sagemaker SDK, Boto3 and XGBoost and restart kernel #!{sys.executable} -m pip install -U sagemaker boto3 xgboost #IPython.Application.instance().kernel.do_shutdown(True) # Get the current installed version of Sagemaker SDK, Python, Boto3 and XGBoost print('SageMaker Python SDK version : {}'.format(sagemaker.__version__)) print('Python version : {}'.format(sys.version)) print('Boto3 version : {}'.format(boto3.__version__)) print('XGBoost Python module version : {}'.format(xgb.__version__)) # Get the AWS CLI version print('AWS CLI version : ') !aws --versionDocker should be pre-installed in the SageMaker notebook instance. Verify it by running the `docker --version` command. If Docker is not installed, you can install it by uncommenting the install command in the following cell. You will require `sudo` rights to install.# Verify if docker is installed !docker --version # Install docker #!sudo yum --assumeyes install docker**Additional prerequisite (when notebook is running on Amazon Linux v2):**Install and configure the [Amazon ECR credential helper](https://github.com/awslabs/amazon-ecr-credential-helper). This makes it easier to store and use Docker credentials for use with Amazon ECR private registries.if os_version == 'ALv2': # Install !sudo yum --assumeyes install amazon-ecr-credential-helper # Verify installation print('Amazon ECR Docker Credential Helper version : ') !docker-credential-ecr-login version # Create the .docker directory if it doesn't exist !mkdir -p ~/.docker # Configure !printf "{\\n\\t\"credsStore\": \"ecr-login\"\\n}" > ~/.docker/config.json # Verify configuration !cat ~/.docker/config.jsonC. Check and configure security permissions Users of this notebook require `root` access to install/update required software. This is set by default when you create the notebook. For more info, refer [here](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-root-access.html).This notebook uses the IAM role attached to the underlying notebook instance. This role should have the following permissions,1. Full access to the S3 bucket that will be used to store training and output data.2. Full access to launch training instances.3. Access to write to CloudWatch logs and metrics.4. Access to create and write to Amazon ECR private registries.5. Access to create and invoke AWS Lambda functions.To view the name of this role, run the following cell.print(sagemaker.get_execution_role())This notebook creates an AWS Lambda function for hosting the ML model. This function requires an IAM role that it assumes when it is invoked. For more information on this, refer [here](https://docs.aws.amazon.com/lambda/latest/dg/lambda-intro-execution-role.html).For the function created in this notebook, at a minimum, this role should provide access to write to CloudWatch logs and metrics. D. Organize imports Organize all the library and module imports for later use.from io import StringIO import json import logging import matplotlib.pyplot as plt import numpy as np import os import pickle import pandas as pd from sagemaker.inputs import TrainingInput import seaborn as sns import sklearn.model_selection from sklearn.preprocessing import StandardScaler import tarfile import timeE. Create common objects Create common objects to be used in future steps in this notebook.# Specify the S3 bucket name s3_bucket = '' # Create the S3 Boto3 resource s3_resource = boto3.resource('s3') s3_bucket_resource = s3_resource.Bucket(s3_bucket) # Create the SageMaker Boto3 client sm_client = boto3.client('sagemaker') # Create the ECR client ecr_client = boto3.client('ecr') # Create the AWS Lambda client lambda_client = boto3.client('lambda') # Get the AWS region name region_name = sagemaker.Session().boto_region_name # Base name to be used to create resources nb_name = 'sm-xgboost-ca-housing-lambda-model-hosting' # Names of various resources train_job_name = 'train-{}'.format(nb_name) # Names of local sub-directories in the notebook file system data_dir = os.path.join(os.getcwd(), 'data/{}'.format(nb_name)) train_dir = os.path.join(os.getcwd(), 'data/{}/train'.format(nb_name)) val_dir = os.path.join(os.getcwd(), 'data/{}/validate'.format(nb_name)) test_dir = os.path.join(os.getcwd(), 'data/{}/test'.format(nb_name)) # Location of the datasets file in the notebook file system dataset_csv_file = os.path.join(os.getcwd(), 'datasets/california_housing.csv') # Container artifacts directory in the notebook file system container_artifacts_dir = os.path.join(os.getcwd(), 'container-artifacts/{}'.format(nb_name)) # Location of the AWS Lambda script (containing the inference code) in the notebook file system lambda_script_file_name = 'lambda_sm_xgboost_ca_housing_inference.py' lambda_script_file = os.path.join(os.getcwd(), 'scripts/{}'.format(lambda_script_file_name)) # Sub-folder names in S3 train_dir_s3_prefix = '{}/data/train'.format(nb_name) val_dir_s3_prefix = '{}/data/validate'.format(nb_name) test_dir_s3_prefix = '{}/data/test'.format(nb_name) # Location in S3 where the model checkpoint will be stored model_checkpoint_s3_path = 's3://{}/{}/checkpoint/'.format(s3_bucket, nb_name) # Location in S3 where the trained model will be stored model_output_s3_path = 's3://{}/{}/output/'.format(s3_bucket, nb_name) # Names of the model tar file and extracted file - these are dependent on the # framework and algorithm you used to train the model. This notebook uses # SageMaker's built-in XGBoost algorithm and that will have the names as follows: model_tar_file_name = 'model.tar.gz' extracted_model_file_name = 'xgboost-model' # Container details container_image_name = nb_name container_registry_url_prefix = '' # Lambda function details lambda_function_name = nb_name lambda_iam_role = '' lambda_timeout_in_seconds = 30 lambda_memory_size_in_mb = 10242. Prepare the data The [California Housing dataset](https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html) consists of 20,640 observations on housing prices with 9 economic covariates. These covariates are,* MedianHouseValue* MedianIncome* HousingMedianAge* TotalRooms* TotalBedrooms* Population* Households* Latitude* LongitudeThis dataset has been downloaded to the local `datasets` directory and modified as a CSV file with the feature names in the first row. This will be used in this notebook.The following steps will help with preparing the datasets for training, validation and testing. A) Create the local directories Create the directories in the local system where the dataset will be copied to and processed.# Create the local directories if they don't exist os.makedirs(data_dir, exist_ok=True) os.makedirs(train_dir, exist_ok=True) os.makedirs(val_dir, exist_ok=True) os.makedirs(test_dir, exist_ok=True)B) Load the dataset and view the details Check if the CSV file exists in the `datasets` directory and load it into a Pandas DataFrame. Finally, print the details of the dataset.# Check if the dataset file exists and proceed if os.path.exists(dataset_csv_file): print('Dataset CSV file \'{}\' exists.'.format(dataset_csv_file)) # Load the data into a Pandas DataFrame pd_data_frame = pd.read_csv(dataset_csv_file) # Print the first 5 records #print(pd_data_frame.head(5)) # Describe the dataset print(pd_data_frame.describe()) else: print('Dataset CSV file \'{}\' does not exist.'.format(dataset_csv_file))C) (Optional) Visualize the dataset Display the distributions in the dataset.# Print the correlation matrix plt.figure(figsize=(11, 7)) sns.heatmap(cbar=False, annot=True, data=(pd_data_frame.corr() * 100), cmap='coolwarm') plt.title('% Correlation Matrix') plt.show()D) Split the dataset into train, validate and test sets Split the dataset into train, validate and test sets after shuffling. Split further into x and y sets.# Split into train and test datasets after shuffling train, test = sklearn.model_selection.train_test_split(pd_data_frame, test_size=0.2, random_state=35, shuffle=True) # Split the train dataset further into train and validation datasets after shuffling train, val = sklearn.model_selection.train_test_split(train, test_size=0.1, random_state=25, shuffle=True) # Define functions to get x and y columns def get_x(df): return df[['median_income','housing_median_age','total_rooms','total_bedrooms', 'population','households','latitude','longitude']] def get_y(df): return df[['median_house_value']] # Load the x and y columns for train, validation and test datasets x_train = get_x(train) y_train = get_y(train) x_val = get_x(val) y_val = get_y(val) x_test = get_x(test) y_test = get_y(test) # Summarize the datasets print("x_train shape:", x_train.shape) print("y_train shape:", y_train.shape) print("x_val shape:", x_val.shape) print("y_val shape:", y_val.shape) print("x_test shape:", x_test.shape) print("y_test shape:", y_test.shape)E) Standardize the datasets * Standardize the x columns of the train dataset using the `fit_transform()` function of `StandardScaler`.* Standardize the x columns of the validate and test datasets using the `transform()` function of `StandardScaler`.# Standardize the dataset scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_val = scaler.transform(x_val) x_test = scaler.transform(x_test)F) Save the prepared datasets locally Save the prepared train, validate and test datasets to local directories. Prior to saving, concatenate x and y columns as needed. Create the directories if they don't exist.# Save the prepared dataset (in numpy format) to the local directories as csv files np.savetxt(os.path.join(train_dir, 'train.csv'), np.concatenate((y_train.to_numpy(), x_train), axis=1), delimiter=',') np.savetxt(os.path.join(train_dir, 'train_x.csv'), x_train) np.savetxt(os.path.join(train_dir, 'train_y.csv'), y_train.to_numpy()) np.savetxt(os.path.join(val_dir, 'validate.csv'), np.concatenate((y_val.to_numpy(), x_val), axis=1), delimiter=',') np.savetxt(os.path.join(val_dir, 'validate_x.csv'), x_val) np.savetxt(os.path.join(val_dir, 'validate_y.csv'), y_val.to_numpy()) np.savetxt(os.path.join(test_dir, 'test.csv'), np.concatenate((y_test.to_numpy(), x_test), axis=1), delimiter=',') np.savetxt(os.path.join(test_dir, 'test_x.csv'), x_test) np.savetxt(os.path.join(test_dir, 'test_y.csv'), y_test.to_numpy())G) Upload the prepared datasets to S3 Upload the datasets from the local directories to appropriate sub-directories in the specified S3 bucket.# Upload the data to S3 train_dir_s3_path = sagemaker.Session().upload_data(path='./data/{}/train/'.format(nb_name), bucket=s3_bucket, key_prefix=train_dir_s3_prefix) val_dir_s3_path = sagemaker.Session().upload_data(path='./data/{}/validate/'.format(nb_name), bucket=s3_bucket, key_prefix=val_dir_s3_prefix) test_dir_s3_path = sagemaker.Session().upload_data(path='./data/{}/test/'.format(nb_name), bucket=s3_bucket, key_prefix=test_dir_s3_prefix) # Capture the S3 locations of the uploaded datasets train_s3_path = '{}/train.csv'.format(train_dir_s3_path) train_x_s3_path = '{}/train_x.csv'.format(train_dir_s3_path) train_y_s3_path = '{}/train_y.csv'.format(train_dir_s3_path) val_s3_path = '{}/validate.csv'.format(val_dir_s3_path) val_x_s3_path = '{}/validate_x.csv'.format(val_dir_s3_path) val_y_s3_path = '{}/validate_y.csv'.format(val_dir_s3_path) test_s3_path = '{}/test.csv'.format(test_dir_s3_path) test_x_s3_path = '{}/test_x.csv'.format(test_dir_s3_path) test_y_s3_path = '{}/test_y.csv'.format(test_dir_s3_path)3. Perform training In this step, SageMaker's [built-in XGBoost algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.html) is used to train a regression model on the [California Housing dataset](https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html).Note: This model has not been tuned as that is not the intent of this demo. A) Set the training parameters 1. Inputs - S3 location of the training and validation data.2. Hyperparameters.3. Training instance details: 1. Instance count 2. Instance type 3. The max run time of the training job 4. (Optional) Use Spot instances. For more info, refer [here](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html). 5. (Optional) The max wait for Spot instances, if using Spot. This should be larger than the max run time. 4. Base job name5. Appropriate local and S3 directories that will be used by the training job.# Set the input data input along with their content types train_input = TrainingInput(train_s3_path, content_type='text/csv') val_input = TrainingInput(val_s3_path, content_type='text/csv') inputs = {'train':train_input, 'validation':val_input} # Set the hyperparameters hyperparameters = { 'objective':'reg:squarederror', 'max_depth':'7', 'eta':'0.02', 'alpha':'1.77', 'colsample_bytree':'0.7', 'num_round':'1864'} # Set the instance count, instance type, volume size, options to use Spot instances and other parameters train_instance_count = 1 train_instance_type = 'ml.m5.xlarge' train_instance_volume_size_in_gb = 5 #use_spot_instances = True #spot_max_wait_time_in_seconds = 5400 use_spot_instances = False spot_max_wait_time_in_seconds = None max_run_time_in_seconds = 3600 algorithm_name = 'xgboost' algorithm_version = '1.2-1' py_version = 'py37' # Get the container image URI for the specified parameters container_image_uri = sagemaker.image_uris.retrieve(framework=algorithm_name, region=region_name, version=algorithm_version, py_version=py_version, instance_type=train_instance_type, image_scope='training') # Set the training container related parameters container_log_level = logging.INFO # Location where the model checkpoints will be stored locally in the container before being uploaded to S3 model_checkpoint_local_dir = '/opt/ml/checkpoints/' # Location where the trained model will be stored locally in the container before being uploaded to S3 model_local_dir = '/opt/ml/model'B) (Optional) Delete previous checkpoints If model checkpoints from previous trainings are found in the S3 checkpoint location specified in the previous step, then training will resume from those checkpoints. In order to start a fresh training, run the following code cell to delete all checkpoint objects from S3.# Delete the checkpoints if you want to train from the beginning; else ignore this code cell for checkpoint_file in s3_bucket_resource.objects.filter(Prefix='{}/checkpoint/'.format(nb_name)): checkpoint_file_key = checkpoint_file.key print('Deleting {} ...'.format(checkpoint_file_key)) s3_resource.Object(s3_bucket_resource.name, checkpoint_file_key).delete()C) Run the training job Prepare the `estimator` and call the `fit()` method. This will pull the container containing the specified version of the algorithm in the AWS region and run the training job in the specified type of EC2 instance(s). The training data will be pulled from the specified location in S3 and training results and checkpoints will be written to the specified locations in S3.Note: SageMaker Debugger is disabled.# Create the estimator estimator = sagemaker.estimator.Estimator( image_uri=container_image_uri, checkpoint_local_path=model_checkpoint_local_dir, checkpoint_s3_uri=model_checkpoint_s3_path, model_dir=model_local_dir, output_path=model_output_s3_path, instance_type=train_instance_type, instance_count=train_instance_count, use_spot_instances=use_spot_instances, max_wait=spot_max_wait_time_in_seconds, max_run=max_run_time_in_seconds, hyperparameters=hyperparameters, role=sagemaker.get_execution_role(), base_job_name=train_job_name, framework_version=algorithm_version, py_version=py_version, container_log_level=container_log_level, script_mode=False, debugger_hook_config=False, disable_profiler=True) # Perform the training estimator.fit(inputs, wait=True)4. Create and push the Docker container to an Amazon ECR repository In this step, we will create a Docker container containing the generated model along with its dependencies. If you bring a pre-trained model, you can upload it to S3 and use it to build the container. The following steps contains instructions for doing so. A) Retrieve the model pickle file * The model file generated using SageMaker's [built-in XGBoost algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.html) will be a Python pickle file zipped up in a tar file named `model.tar.gz`. The S3 URI for this file will be available in the `model_data` attribute of the `estimator` object created in the training step.* If you bring your pre-trained model, you have to specify the S3 URI appropriately in the following cell.* The zip file needs to be downloaded from S3 and extracted.* The name of the extracted pickle file will depend on the framework and algorithm that was used to train the model. In this notebook example, we have used SageMaker's [built-in XGBoost algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.html) and so the pickle file will be named `xgboost-model`. You will see this when the model tar file is extracted.# Create the container artifacts directory if it doesn't exist os.makedirs(container_artifacts_dir, exist_ok=True) # Set the file paths model_tar_file_s3_path_suffix = '{}/output/{}/output/{}'.format(nb_name, estimator.latest_training_job.name, model_tar_file_name) model_tar_file_local_path = '{}/{}'.format(container_artifacts_dir, model_tar_file_name) extracted_model_file_local_path = '{}/{}'.format(container_artifacts_dir, extracted_model_file_name) # Delete old model files if they exist if os.path.exists(model_tar_file_local_path): os.remove(model_tar_file_local_path) if os.path.exists(extracted_model_file_local_path): os.remove(extracted_model_file_local_path) # Download the model tar file from S3 s3_bucket_resource.download_file(model_tar_file_s3_path_suffix, model_tar_file_local_path) # Extract the model tar file and retrieve the model pickle file with tarfile.open(model_tar_file_local_path, "r:gz") as tar: tar.extractall(path=container_artifacts_dir)B) (Optional) Test the model pickle file The code in the following cell entirely depends on the framework and algorithm that was used to train the model. The extracted Python3 pickle file will contain the appropriate object name. If you are bringing your own model file, you have to change this cell appropriately.# Load the model pickle file as a pickle object pickle_file_path = extracted_model_file_local_path with open(pickle_file_path, 'rb') as pkl_file: model = pickle.load(pkl_file) # Run a prediction against the model loaded as a pickle object # by sending the first record of the test dataset test_pred_x_df = pd.read_csv(StringIO(','.join(map(str, x_test[0]))), sep=',', header=None) test_pred_x = xgb.DMatrix(test_pred_x_df.values) print('Input for prediction = {}'.format(test_pred_x_df.values)) print('Predicted value = {}'.format(model.predict(test_pred_x)[0])) print('Actual value = {}'.format(y_test.values[0][0])) print('Note: There may be a huge difference between the actual and predicted values as the model has not been tuned in the training step.')C) View the inference script The inference script is a Python3 script that implements the `handler` function required by Lambda and contains the following logic:* Load the ML model pickle object into memory.* Parse the request sent to the Lambda function either from direct invocation or from a REST/HTTP API in Amazon API Gateway.* Run the prediction.* Format the response to match with the parameter specified in the request.* Return the response.The request should be in the following format:`{ "response_content_type": "", "pred_x_csv": ""}`This script will be packaged into the container that will be built in the upcoming steps.You can view the script by running the following code cell.# View the inference script !cat {lambda_script_file}D) Create the Dockerfile In this step, we will create a [Dockerfile](https://docs.docker.com/engine/reference/builder/) which is required to build our [Docker](https://www.docker.com/) container containing the model pickle file, an inference script and its dependencies.In order to create the container, we will use the [AWS Lambda Python 3.9 container image](https://gallery.ecr.aws/lambda/python) available in the [Amazon ECR public registry](https://aws.amazon.com/ecr/) as the base image. As this is a public registry, you do not require any credentials or permissions to download it.Note: At the time of writing this notebook, this image was based on [Amazon Linux 2](https://aws.amazon.com/amazon-linux-2/).# Copy the inference script to the container-artifacts directory !cp -pr {lambda_script_file} {container_artifacts_dir}/app.py # Create the Dockerfile content dockerfile_content_lines = [] dockerfile_content_lines.append('# syntax=docker/dockerfile:1\n\n') dockerfile_content_lines.append('# Use AWS Lambda Python 3.9 as the base image\n') dockerfile_content_lines.append('FROM public.ecr.aws/lambda/python:3.9\n\n') dockerfile_content_lines.append('# Install the Python packages required for the inference script\n') dockerfile_content_lines.append('RUN pip install --upgrade pip\n') dockerfile_content_lines.append('RUN pip install pandas\n') dockerfile_content_lines.append('RUN pip install xgboost\n\n') dockerfile_content_lines.append('# Copy the extracted model file and the inference script\n') dockerfile_content_lines.append('COPY ') dockerfile_content_lines.append(extracted_model_file_name) dockerfile_content_lines.append(' ./\n') dockerfile_content_lines.append('COPY app.py ./\n\n') dockerfile_content_lines.append('# Specify the path to the extracted model file as an ENV variable\n') dockerfile_content_lines.append('ENV MODEL_PICKLE_FILE_PATH=') dockerfile_content_lines.append(extracted_model_file_name) dockerfile_content_lines.append('\n\n') dockerfile_content_lines.append('# Specify the default command to run\n') dockerfile_content_lines.append('CMD ["app.handler"]') # Create the Dockerfile dockerfile_local_path = '{}/Dockerfile'.format(container_artifacts_dir) with open(dockerfile_local_path, 'wt') as file: file.write(''.join(dockerfile_content_lines)) # Print the contents of the generated Dockerfile !cat {dockerfile_local_path}E) Create the container Create the Docker container using the `docker build` command. Specify the container image name and point to the container-artifacts directory that contains all the files to build the container.Note: You may see warning messages when the container is built with the Dockerfile that we created in the prior step. These warnings will be around installing the Python packages that are required by the inference script. You can choose to either ignore or fix them.# Create the Docker container !docker build -t {container_image_name} {container_artifacts_dir}F) Create the private repository in ECR In order to create an AWS Lambda function using a container, the container image should exist in [Amazon ECR](https://aws.amazon.com/ecr/). We will create a private repository in Amazon ECR for this demo.In this step, we will check if the private repository in Amazon ECR that we intend to create already exists or not. If it does not exist, we will create it with the repository name the same as the container image name.Note: When creating the repository, setting the `scanOnPush` parameter to `True` will automatically initiate a vulnerability scan on the container image that is pushed to the repository. For more info on image scanning, refer [here](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html).# Check if the ECR repository exists already; if not, then create it try: ecr_client.describe_repositories(repositoryNames=[container_image_name]) print('ECR repository {} already exists.'.format(container_image_name)) except ecr_client.exceptions.RepositoryNotFoundException: print('ECR repository {} does not exist.'.format(container_image_name)) print('Creating ECR repository {}...'.format(container_image_name)) # Create the ECR repository - here we use the container image name for the repository name ecr_client.create_repository(repositoryName=container_image_name, imageScanningConfiguration={ 'scanOnPush': True }) print('Completed creating ECR repository {}.'.format(container_image_name))G) Push the container to ECR In this step, we will push the container to a private registry that we created in Amazon ECR.When using an Amazon ECR private registry, you must authenticate your Docker client to your private registry so that you can use the `docker push` and `docker pull` commands to push and pull images to and from the repositories in that registry. For more information about this, refer [here](https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry_auth.html).1. If this notebook instance is running on Amazon Linux v1, the authentication happens through an authorization token generated by an AWS CLI command in the following code cell. This token will be automatically deleted when the code cell completes execution.2. If this notebook instance is running on Amazon Linux v2, the authentication happens through temporary credentials generated based on the IAM role attached to this notebook. For this, you have to complete the prerequisite mentioned in the first step of this notebook.# Set the image names source_image_name = '{}:latest'.format(container_image_name) target_image_name = '{}/{}:latest'.format(container_registry_url_prefix, container_image_name) if os_version == 'ALv1': # Get the private registry credentials using an authorization token !aws ecr get-login-password --region {region_name} | docker login --username AWS --password-stdin {container_registry_url_prefix} # Tag the container !docker tag {source_image_name} {target_image_name} # Push the container to the specified registry in Amazon ECR !docker push {target_image_name} if os_version == 'ALv1': # Delete the Docker credentials file print('\nDeleting the generated Docker credentials file...') !rm /home/ec2-user/.docker/config.json print('Completed deleting the generated Docker credentials file.') # Verify the delete print('Verifying the delete of the generated Docker credentials file...') !cat /home/ec2-user/.docker/config.json print('Completed verifying the delete of the generated Docker credentials file.')5. Create and test the AWS Lambda function In this step, we will create and test the [AWS Lambda](https://aws.amazon.com/lambda/) function using the Docker container that was created in the previous step. A) Create the Lambda function In this step, we will check if the Lambda function that we intend to create already exists or not. If it does not exist, we will create it.Note: We have not configured this function to use an [Amazon VPC](https://aws.amazon.com/vpc) for networking. If you require it, refer to the instructions [here](https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html).# Check if the AWS Lambda function exists already; if not, then create it try: lambda_client.get_function(FunctionName=lambda_function_name) print('AWS Lambda function {} already exists.'.format(lambda_function_name)) except lambda_client.exceptions.ResourceNotFoundException: print('AWS Lambda function {} does not exist.'.format(lambda_function_name)) print('Creating AWS Lambda function {}...'.format(lambda_function_name)) lambda_client.create_function( FunctionName=lambda_function_name, Role=lambda_iam_role, Code={'ImageUri' : target_image_name}, Description='California Housing price prediction regression model built on the SageMaker built-in XGBoost algorithm and a Python3 based inference function hosted inside a Docker container.', Timeout=lambda_timeout_in_seconds, MemorySize=lambda_memory_size_in_mb, Publish=True, PackageType='Image' ) print('Completed creating AWS Lambda function {}. The function will be in \'Pending\' state immediately after creation. Wait for it to be ready before invoking it. This should take a few seconds.'.format(lambda_function_name)) # Sleep every 5 seconds and print the state of the Lambda function until it is not 'Pending' while True: get_function_response = lambda_client.get_function(FunctionName=lambda_function_name) function_state = get_function_response['Configuration']['State'] print('Lambda function state = {}'.format(function_state)) if function_state not in {'Pending'}: break time.sleep(5)B) Test the Lambda function In this step, we will test the Lambda function that we created in the previous step by invoking it synchronously. For this, we will send the first record of the test dataset as a CSV string.The request should be in the following format:`{ "response_content_type": "", "pred_x_csv": ""}`# Set the payload x_test_lambda_payload_csv = ','.join(map(str, x_test[0])) lambda_payload = json.dumps({ 'response_content_type': 'text/plain', 'pred_x_csv': x_test_lambda_payload_csv}) # Invoke the Lambda function and test it lambda_invoke_response = lambda_client.invoke( FunctionName=lambda_function_name, InvocationType='RequestResponse', LogType='Tail', Payload=lambda_payload ) # Print the response try: lambda_function_error = lambda_invoke_response['FunctionError'] print('Function error :: {}'.format(lambda_function_error)) except KeyError: print('No function errors.') print('Response status code = {}'.format(lambda_invoke_response['StatusCode'])) print('Payload :: {}'.format(lambda_invoke_response['Payload'].read()))6. (Optional) Front-end the Lambda function with Amazon API Gateway For some use cases, you may prefer to front-end the Lambda function hosting the model with [Amazon API Gateway](https://docs.aws.amazon.com/apigateway/latest/developerguide/welcome.html). With this setup, you can serve the model inference as an API with a HTTPS endpoint.For the API, you have the following options to choose from:* [HTTP API](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api.html)* [REST API](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-rest-api.html)For guidance on choosing the right API option, refere [here](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-vs-rest.html).For information on setting up an AWS Lambda function as the backend for Amazon API Gateway, refer [here](https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-integrations.html).Note: The Lambda function that we created in prior steps has the logic to handle both REST and HTTP API requests from the Amazon API Gateway assuming the gateway passes through the request payload as-is to the backend Lambda function. 7. Cleanup As a best practice, you should delete resources and S3 objects when no longer required. This will help you avoid incurring unncessary costs.This step will cleanup the resources and S3 objects created by this notebook.Note: Apart from these resources, there will be Docker containers and related images created in the notebook instance that is running this Jupyter notebook. As they are already part of the notebook instance, you do not need to delete them. If you decide to delete them, then go to the Terminal of the Jupyter notebook and and run appropriate `docker` commands.# Delete the AWS Lambda function try: lambda_client.delete_function(FunctionName=lambda_function_name) print('AWS Lambda function {} deleted.'.format(lambda_function_name)) except lambda_client.exceptions.ResourceNotFoundException: print('AWS Lambda function {} does not exist.'.format(lambda_function_name)) # Delete the ECR private repository try: ecr_client.delete_repository(repositoryName=container_image_name, force=True) print('ECR repository {} deleted.'.format(container_image_name)) except ecr_client.exceptions.RepositoryNotFoundException: print('ECR repository {} does not exist.'.format(container_image_name)) # Delete data from S3 bucket for file in s3_bucket_resource.objects.filter(Prefix='{}/'.format(nb_name)): file_key = file.key print('Deleting {} ...'.format(file_key)) s3_resource.Object(s3_bucket_resource.name, file_key).delete()Quick Start GuideImport *qcodes* from the *zhinst* namespace as follows:import numpy as np import matplotlib.pyplot as plt import qcodes as qc import zhinst.qcodes as ziqcInitialize the device*zhinst-qcodes* provides instrument drivers for multiple different *Zurich Instruments* devices. It includes drivers for* HDAWG* UHFQA* UHFLI* MFLIAll of the instrument drivers available in *zhinst-qcodes* share some basic device properties. Every device is identified by a `name` and a `serial` number. The `name` is free for the user to chose and only for their convenience. The `serial` however, is a unique identifier for every *Zurich Instruments* device. The serial number can be found on the back of panel of instrument and is of the format `DEV1234`. In addition, the drivers needs to know what `interface` the device uses and where to find the data server (`host`). The value for the interface defaults to a connection via Ethernet (`1GbE`) and the host address is `localhost` unless specified otherwise.As a first example, we now initialize the instrument driver for a HDAWG. We name it `hdawg1` and we know that our device's serial number is `dev8138`.hdawg = ziqc.HDAWG("hdawg1", "dev8138", interface="1gbe", host="10.42.0.226")Successfully connected to data server at 10.42.0.2268004 api version: 6 Successfully connected to device DEV8138 on interface 1GBE Connected to: Zurich Instruments HDAWG (serial:dev8138, firmware:66245) in 3.26sNow the device is connected and we are ready to go!hdawg.get_idn()Access the Device's Nodetree The driver's *nodetree* is a data structure that allows the user to access all the settings and data on the device. The settings are highly structured into logical groups, enumerations and options. In QCoDeS they are represented as *submodules*.For example, all of the device's signal inputs and all of its signal outputs are grouped together or the HDAWG's 8 sine generators are grouped into the submodule `sines` (a `ChannelList`) that are then enumerated from `0 - 7`. Each `Node` in the nodetree can have other submodules or QCoDeS `Parameters` as attributes. The `Parameters` are the 'leaves' of the tree data structure and represent individual settings or data on the instrument.```HDAWG│└─── sigouts <--- ChannelList│ └─── 0 <--- InstrumentChannel │ │ └─── on <--- Parameter│ │ └─── range │ │ └─── direct│ │ └─── offset│ │ └─── ...│ └─── 1 │└─── oscs │ └─── 0│ │ └─── freq│ └─── 1│ └─── 2│ └─── ...│└─── sines│ └─── ...│└─── awgs│ └─── 0│ └─── 1│ └─── 2│ └─── 4│└─── ... ```Enumerated nodes such as the 8 sine-generators or 16 oscillators of the HDAWG are grouped together in `ChannelLists`. For example the node of the first sine-generator would be accessed via `hdawg.sines[0]`. In a *Jupyter* notebook or a console it is easy to navigate through the nodetree.print([k for k in hdawg.submodules.keys()]) print([k for k in hdawg.parameters.keys()]) hdawg.sines[0:2]At the end of the tree, the leaves, are QCoDeS `Parameters`. In case you wonder what a certain parameter does, you can print an insightful description.print(hdawg.oscs[0].freq.__doc__)* `Node`: /DEV8138/OSCS/0/FREQ * `Description`: Frequency control for each oscillator. * `Properties`: Read, Write, Setting * `Type`: Double * `Unit`: Hz Parameter class: * `name` freq * `label` freq * `unit` Hz * `vals` NoneThe parameters are callable to set and get the instrument values.# set oscillator frequency to 100 MHz hdawg.nodetree.oscs[0].freq(100e6) # what frequency is the oscillator set to now? hdawg.nodetree.oscs[0].freq()Tutorial: Implement your own Riemannian Geometry Lead author: .Geomstats provides several Riemannian manifolds in its `geometry` folder. Yet, the manifold that you are interested in might not be available there.This notebook shows how to use Riemannian geometry on any manifold defined by an immersion into a Euclidean space, such as high-dimensional surfaces immersed in a Euclidean space. Specifically, we focus on the case of an embedded manifold $M$ that can be defined by a map f:$f: M \rightarrow \mathbb{R}^n$ called the immersion, whose differential $df_x$ is injective for all $x \in M$.This immersion allows to define the pull-back metric $g$ on $M$, as:$$g : T_xM \times T_x M \rightarrow \mathbb{R}\\u, v \rightarrow $$where $$ represents the Euclidean inner-product of the embedding space. The pull-back metric gives a structure of Riemannian manifold to $M$. In particular, we can compute the Riemannian exp and log maps, the Riemannian distance, the Riemannian parallel transport, etc.This notion illustrates the computation of the pull-back metric, using the class `PullbackMetric` from geomstats, on two embedded manifolds:- the 2-sphere $S^2$ embedded in $\mathbb{R}^3$,- a surface defined by: $x, y \rightarrow z = x^2 + y^2$ embedded in $\mathbb{R}^3$. Setupimport os import sys import time import warnings sys.path.append(os.path.dirname(os.getcwd())) warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import geomstats.visualization as viz import geomstats.backend as gsWe import the main structure used in this notebook: the `PullbackMetric`:from geomstats.geometry.pullback_metric import PullbackMetricImmersed manifolds: the example of the 2-sphere We first consider the simple example of the 2-sphere. We define the immersion of the 2-sphere $S^2$ into the Euclidean space $\mathbb{R}^3$ as follows:$$f : S^2 \rightarrow \mathbb{R}^3\\(\theta, \phi) \rightarrow (\cos\phi.\sin\theta, \sin\phi.\sin\theta, \cos\theta)$$def sphere_immersion(spherical_coords): theta = spherical_coords[..., 0] phi = spherical_coords[..., 1] return gs.array( [gs.cos(phi) * gs.sin(theta), gs.sin(phi) * gs.sin(theta), gs.cos(theta)] )For the purpose of visualizing the results in the embedding space $\mathbb{R}^3$, we will need the jacobian of the immersion, which we compute here:jac_sphere_immersion = gs.autodiff.jacobian(sphere_immersion)We use the `PullbackMetric` structure to define the Riemannian metric on $S^2$ from the immersion. Note that the Riemannian metric on the sphere is already implemented in Geomstats using closed forms with the class `Hypersphere`. However, this notebook showcases how we can recover the computations of Riemanian geometry by only relying on the immersion.sphere_metric = PullbackMetric(dim=2, embedding_dim=3, immersion=sphere_immersion)Now, we can access the methods from any Riemannian metric, i.e. the Riemannian exp and log maps, the parallel transport, etc. We first show the computation of the Riemannian exp map of a tangent vector at a point.point_a = gs.array([gs.pi / 2.0, -gs.pi / 2.0]) tangent_vec = gs.array([0.0, gs.pi / 3.0]) end_point = sphere_metric.exp(tangent_vec=tangent_vec, base_point=point_a) print(end_point)[ 1.57079633 -0.52359878]And visualize the result of the Riemannian exp map in the embedding space $\mathbb{R}^3$:%matplotlib notebook # We immerse the points and the tangent vector in R^3 immersed_point_a = sphere_immersion(point_a) immersed_tangent_vec = gs.matmul(jac_sphere_immersion(point_a), tangent_vec) immersed_end_point = sphere_immersion(end_point) # We plot our results fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111, projection="3d") viz.plot(immersed_point_a, ax=ax, space="S2", label="Initial point", s=80) arrow = viz.Arrow3D(immersed_point_a, vector=immersed_tangent_vec) arrow.draw(ax, color="black", label="Tangent vector") viz.plot(immersed_end_point, ax=ax, space="S2", label="End point", s=80) ax.set_title("Riemannian Exp map on the sphere") ax.legend() ax.grid(False) plt.axis("off");Next, we show the computation of the parallel transport on the sphere. Note that `step`, `n_steps`, `tol` and `alpha` are integration parameters that control the efficiency-accuracy tradeoff of the computation.*Note*: The accuracy of the computation varies for the different backends. We recommend using `autograd`, and we discourage the use of `tensorflow`.point_a = gs.array([gs.pi / 2.0, -gs.pi / 2.0]) tangent_vec = gs.array([0.0, gs.pi / 3.0]) tangent_vec_to_transport = gs.array([gs.pi / 4.0, gs.pi / 3.0]) time_start = time.perf_counter() parallel_transport = sphere_metric.ladder_parallel_transport( tangent_vec=tangent_vec_to_transport, direction=tangent_vec, base_point=point_a, step="euler", n_steps=1, tol=1e-6, alpha=1, ) time_elapsed = time.perf_counter() - time_start print(f"Computing time for parallel transport: {time_elapsed:5.2f} secs") display(parallel_transport) transported_tangent_vec = parallel_transport["transported_tangent_vec"] end_point = parallel_transport["end_point"]Computing time for parallel transport: 4.71 secsWe visualize the result of the parallel transport in the embedding space $\mathbb{R}^3$:%matplotlib notebook # We first immerse the points and tangent vectors into the embedding space R^3 immersed_point_a = sphere_immersion(point_a) immersed_end_point = sphere_immersion(end_point) immersed_tangent_vec = gs.matmul(jac_sphere_immersion(point_a), tangent_vec) immersed_tangent_vec_to_transport = gs.matmul( jac_sphere_immersion(point_a), tangent_vec_to_transport ) immersed_transported_tangent_vec = gs.matmul( jac_sphere_immersion(end_point), transported_tangent_vec ) # We verify manually that the immersed tangent vector is actually tangent to the sphere # as the plot can be sometimes misleading. We use the method of the Hypersphere class. from geomstats.geometry.hypersphere import Hypersphere sphere = Hypersphere(dim=2) is_tangent = sphere.is_tangent( immersed_transported_tangent_vec, base_point=immersed_end_point ) print("is_tangent = ", is_tangent) # We plot the results fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111, projection="3d") viz.plot(immersed_point_a, ax=ax, space="S2", label="Initial point", s=80) arrow = viz.Arrow3D(immersed_point_a, vector=immersed_tangent_vec) arrow.draw(ax, color="black", label="Tangent vector") arrow = viz.Arrow3D(immersed_point_a, vector=immersed_tangent_vec_to_transport) arrow.draw(ax, color="red", label="Tangent vector to transport") viz.plot(immersed_end_point, ax=ax, space="S2", label="End point", s=80) arrow = viz.Arrow3D(immersed_end_point, vector=immersed_transported_tangent_vec) arrow.draw(ax, color="orange", label="Transported tangent vector") ax.set_title("Riemannian parallel transport on the sphere") ax.legend() ax.grid(False) plt.axis("off");is_tangent = TrueImmersed manifolds: the example of a surface defined by its graph We consider the example of a 2D surface immersed in $\mathbb{R}^3$. The surface is defined by its graph:$$ x, y \rightarrow z = x^2 + y^2$$which leads to the following immersion into $\mathbb{R}^3$:$$f : S^2 \rightarrow \mathbb{R}^3\\(x, y) \rightarrow (x, y, x^2 + y^2)$$ We first implement the graph and the immersion:def surface_graph(x, y): return x**2 + y**2 def surface_immersion(intrinsic_coords): x = intrinsic_coords[..., 0] y = intrinsic_coords[..., 1] return gs.transpose(gs.array([x, y, surface_graph(x, y)]))For the purpose of visualizing the results in the embedding space ℝ3 , we will need the jacobian of the immersion, which we compute here:jac_surface_immersion = gs.autodiff.jacobian(surface_immersion)We also add a utility function to visualization the surface in 3D:%matplotlib notebook def plot_surface(alpha=1.0, ax=None): if ax is None: fig = plt.figure() ax = fig.add_subplot(111, projection="3d") x = y = gs.arange(-3.0, 3.0, 0.1) X, Y = gs.meshgrid(x, y) zs = gs.array(surface_graph(gs.flatten(X), gs.flatten(Y))) Z = gs.reshape(zs, X.shape) ax.plot_surface(gs.to_numpy(X), gs.to_numpy(Y), gs.to_numpy(Z), alpha=alpha) ax.set_xlabel("X") ax.set_ylabel("Y") ax.set_zlabel("Zm") plt.show() return ax ax = plot_surface() ax.grid(False) plt.axis("off");We use the `PullbackMetric` structure to define the Riemannian metric on the surface from the immersion.surface_metric = PullbackMetric(dim=2, embedding_dim=3, immersion=surface_immersion)Now, we can access the methods from any Riemannian metric, i.e. the Riemannian exp and log maps, the parallel transport, etc. We show the computation of the Riemannian exp map:point_a = gs.array([-2.0, -2.0]) tangent_vec = gs.array([0.0, 1.0]) end_point = surface_metric.exp(tangent_vec=tangent_vec, base_point=point_a) print(end_point)[-1.85193664 -0.8761551 ]And visualize the result:%matplotlib notebook # We first immerse the points and tangent vector into the embedding space R^3 immersed_point_a = surface_immersion(point_a) immersed_tangent_vec = gs.matmul(jac_surface_immersion(point_a), tangent_vec) immersed_end_point = surface_immersion(end_point) two_points = gs.vstack([immersed_point_a, immersed_end_point]) # We plot the results ax = plot_surface(alpha=0.3) ax.plot( immersed_point_a[0], immersed_point_a[1], immersed_point_a[2], label="Initial point", marker="o", linestyle="None", ) arrow = viz.Arrow3D(immersed_point_a, vector=immersed_tangent_vec) arrow.draw(ax, color="black", label="Tangent vector") ax.plot( immersed_end_point[0], immersed_end_point[1], immersed_end_point[2], label="End point", marker="o", linestyle="None", ) ax.set_title("Riemannian exponential map on a surface") ax.legend() ax.grid(False) plt.axis("off");Next, we show the computation of the parallel transport on the surface. Again, note that `step`, `n_steps`, `tol` and `alpha` are integration parameters that control the efficiency-accuracy tradeoff of the computation.point_a = gs.array([-2.0, -2.0]) tangent_vec = gs.array([0.0, 1.0]) tangent_vec_to_transport = gs.array([-0.6, 0.6]) time_start = time.perf_counter() parallel_transport = surface_metric.ladder_parallel_transport( tangent_vec=tangent_vec_to_transport, direction=tangent_vec, base_point=point_a, step="rk4", n_steps=1, tol=1e-14, alpha=2, ) time_elapsed = time.perf_counter() - time_start print(f"Computing time for parallel transport: {time_elapsed:5.2f} secs") display(parallel_transport) transported_tangent_vec = parallel_transport["transported_tangent_vec"] end_point = parallel_transport["end_point"]Computing time for parallel transport: 4.40 secsWe visualize the result of the parallel transport.%matplotlib notebook # We first immerse the points and tangent vectors into the embedding space R^3 immersed_point_a = surface_immersion(point_a) immersed_tangent_vec = gs.matmul(jac_surface_immersion(point_a), tangent_vec) immersed_tangent_vec_to_transport = gs.matmul( jac_surface_immersion(point_a), tangent_vec_to_transport ) immersed_end_point = surface_immersion(end_point) immersed_transported_tangent_vec = gs.matmul( jac_surface_immersion(end_point), transported_tangent_vec ) # We plot the results ax = plot_surface(alpha=0.3) ax.plot( immersed_point_a[0], immersed_point_a[1], immersed_point_a[2], label="Initial point", marker="o", color="orange", ) arrow = viz.Arrow3D(immersed_point_a, vector=immersed_tangent_vec_to_transport) arrow.draw(ax, color="orange", label="Tangent vector to transport") arrow = viz.Arrow3D(immersed_point_a, vector=immersed_tangent_vec) arrow.draw(ax, color="black", label="Tangent vector") ax.plot( immersed_end_point[0], immersed_end_point[1], immersed_end_point[2], label="End point", marker="o", color="green", ) arrow = viz.Arrow3D(immersed_end_point, vector=immersed_transported_tangent_vec) arrow.draw(ax, color="green", label="Transported tangent vector") ax.set_title("Riemannian parallel transport on a surface") ax.legend() ax.grid(False) plt.axis("off");!pip install deap update_checker tqdm stopit xgboost !pip install dask[delayed] dask[dataframe] dask-ml fsspec>=0.3.3 distributed>=2.10.0 !pip install scikit-mdr skrebate !pip install tpot from tpot import TPOTClassifier from sklearn.model_selection import train_test_split from sklearn.datasets import load_digits digits = load_digits() print(digits.data.shape) import matplotlib.pyplot as plt plt.gray() plt.matshow(digits.images[0]) plt.show() digits = load_digits() X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target,train_size=0.75, test_size=0.25) pipeline_optimizer = TPOTClassifier(generations=5, population_size=20,random_state=42, verbosity=2) pipeline_optimizer.fit(X_train, y_train) print(pipeline_optimizer.fitted_pipeline_) print("TEST:") print(pipeline_optimizer.score(X_test, y_test)) pipeline_optimizer.export('tpot_exported_pipeline.py') from sklearn.datasets import load_iris import numpy as np iris = load_iris() X_train, X_test, y_train, y_test = train_test_split(iris.data.astype(np.float64),iris.target.astype(np.float64), train_size=0.75, test_size=0.25, random_state=42) tpot = TPOTClassifier(generations=20, population_size=50, verbosity=2, random_state=42) tpot.fit(X_train, y_train) print(tpot.fitted_pipeline_) print("TEST:") print(tpot.score(X_test, y_test)) tpot.export('tpot_iris_pipeline.py') from mlxtend.plotting import plot_confusion_matrix from sklearn.metrics import confusion_matrix ypred = tpot.predict(X_test) matriz = confusion_matrix(y_test,ypred) plot_confusion_matrix(conf_mat=matriz, figsize=(3,3), show_normed=False) plt.tight_layout()使用Tensorflow训练MNIST数据集 1.引入依赖包import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data %matplotlib inline2.定义参数mnist = input_data.read_data_sets('/tmp/', one_hot=True) # 定义每一层神经元的个数 """ 层数的选择:线性数据使用1层,非线性数据使用2册, 超级非线性使用3+册。层数/神经元过多会导致过拟合 """ n_input_layer = 28*28 # 输入层 n_hidden_layer_1 = 30 # hidden layer 1 n_hidden_layer_2 = 20 # hidden layer 2 n_output_layer = 10 # 输出层 W_xh = tf.Variable(tf.random_normal([n_input_layer, n_hidden_layer_1])) b_h1 = tf.Variable(tf.random_normal([n_hidden_layer_1])) W_hh = tf.Variable(tf.random_normal([n_hidden_layer_1, n_hidden_layer_2])) b_h2 = tf.Variable(tf.random_normal([n_hidden_layer_2])) W_ho = tf.Variable(tf.random_normal([n_hidden_layer_2, n_output_layer])) b_o = tf.Variable(tf.random_normal([n_output_layer])) batch_size = 100 # 定义实际输入和输出数据 X = tf.placeholder('float', [None, 28*28]) Y = tf.placeholder('float', [None,10])Extracting /tmp/train-images-idx3-ubyte.gz Extracting /tmp/train-labels-idx1-ubyte.gz Extracting /tmp/t10k-images-idx3-ubyte.gz Extracting /tmp/t10k-labels-idx1-ubyte.gz3.定义神经网络模型def neural_network(x): hidden_layer_1_output = tf.matmul(x, W_xh) + b_h1 hidden_layer_1_activate = tf.nn.sigmoid(hidden_layer_1_output) # 激活函数 hidden_layer_2_output = tf.matmul(hidden_layer_1_activate, W_hh) + b_h2 hidden_layer_2_output = tf.nn.sigmoid(hidden_layer_2_output) output = tf.matmul(hidden_layer_2_output, W_ho) + b_o output = tf.nn.softmax(output) return output4.训练def train_neural_network(x, y): predict = neural_network(x) cross_entropy = tf.reduce_mean(- tf.reduce_sum(y*tf.log(predict), axis=1)) train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy) #学习率调整到0.1,加快梯度下降 init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) epochs = 20000 for epoch in range(epochs): batch_x, batch_y = mnist.train.next_batch(batch_size) sess.run(train_step, feed_dict={x: batch_x, y: batch_y}) correct = tf.equal(tf.argmax(predict, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct, 'float')) print '准确率:', sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}) train_neural_network(X, Y)准确率: 0.9155Data loading and churn label creation: Demo The raw data contains all the users' activities from March 30 to May 12, 2017. (Fresh data!!) Since the data amount is so huge for a PC, reading all data into memory is a mission impossible. On the other hand, it's not necessary to get all the activity information to determine if a user is a churn. To be more specific, whether one has activity is important, while the detail of the activities shall not be considered for the moment.In this demo, the file operations are walked through, from unzipping original data in .tar.gz zipped files to eventually print the churn user ids to a .txt file. For simplicity, only the data of the first day and the last day are used. More slim codes for processing all the data is given in another notebook Procedure: 1. Unzip all the "play" activity data in a batch. 2. Choose a cut-off date for the churn labeling: Any user who is active before this cut-off date but has no activity after the date shall be labeled as a churn. 3. Read only "user_id","date/time" of each activity log before and after the cut-off date, and save them in two new file respectively. 4. Get two sets of active users, before and after the cut-off date: {active_before}, {active_after} 5. Output the churns to a .txt file. {Churn} = {active_before} - {active_before} & {active_after} 1. Unzip the 400+ .tar.gz compressed files of raw data7z can only unzip the .tar.gz files to .tar, so the complete unzip takes two steps: 1. Batch unzip from .tar.gz --> .tar 2. Batch unzip from .tar --> log files## In windows powershell, run the following iteration commands in the raw data directory: ''' $files = Get-ChildItem "../data/raw/" -Filter *_play.log.tar.gz foreach ($f in $files) {7z e $f -oC../data/raw/unzip} cd unzip $files = Get-ChildItem "../raw/unzip" -Filter *_play.log.tar foreach ($f in $files) {7z e $f} '''References:__[for loop in windows powershell](https://stackoverflow.com/questions/18847145/loop-through-files-in-a-directory-using-powershell)____[Unzip .tar.gz files using 7z commands](https://stackoverflow.com/questions/1359793/programmatically-extract-tar-gz-in-a-single-step-on-windows-with-7zip)__ 2. Cut-off date = April 21st. In this demo, the first file of 3/30 is before the cut-off date, while the second file of 5/12 is after the cut-off. In reality there are three weeks before cut-off, and three weeks after. The cut off might need changing later, depending on the model's performance. 3. File operations on the play logs 1. Open *play.log files one by one 2. "Read - Append" for each line 3. Write the new line to a new file for all lines of all files. 4. Meawhile, extract the user_id (first item) in each line, and save them into two python sets for logs before and after the "snapshot date" 5. Save the sets into .log files. 1. Open all play logs, using * wildcardimport glob filepath = 'C:\\Users\\Sean\\Documents\\BitTiger\\Capston_music_player_python\\*play.log' files = glob.glob(filepath) len(files) files log_amounts = [] for the_file in files: f = open(the_file, 'r') lines = f.readlines() log_amounts.append(len(lines)) f.close() log_amounts2. Read the files, append the date to each linewith open('C:\\Users\\Sean\\Documents\\BitTiger\\Capston_music_player_python\\20170512_3_play.log','r') as f: content = f.readlines() len(content) first_line = content[0] first_line first_line_fields = content[0].strip('\n').split('\t') #first_line_fields.append(f.name.split('\\')[-1][8]) first_line_fields first_line_fields.append(f.name.split('\\')[-1][:8]) '\t'.join(first_line_fields) new_line = '\t'.join(first_line_fields) + '\n' new_line3. write the appended lines into a new file Using the f.write() methodoutput = open('C:\\Users\\Sean\\Documents\\BitTiger\\Capston_music_player_python\\output\\all.log','a') for the_file in files: with open(the_file, 'r') as f: lines = f.readlines() for line in lines: contents = line.strip('\n').split('\t') contents.append(f.name.split('\\')[-1][:8]) output.write('\t'.join(contents)+'\n') output.close() with open('C:\\Users\\Sean\\Documents\\BitTiger\\Capston_music_player_python\\output\\all.log','r') as output: lines = output.readlines() len(lines) sum(log_amounts)4. Save the user_ids into sets Delete the all.log just created, as the procedure can be done at the same timefrom sets import Set list_of_sets = [] # for each day's data, set the active users' user_id into a set. with open('C:\\Users\\Sean\\Documents\\BitTiger\\Capston_music_player_python\\output\\all.log','a') as output: for the_file in files: with open(the_file, 'r') as f: lines = f.readlines() list_of_sets.append(Set([line.split('\t')[0] for line in lines])) for line in lines: contents = line.strip('\n').split('\t') contents.append(f.name.split('\\')[-1][:8]) output.write('\t'.join(contents)+'\n') [len(each_set) for each_set in list_of_sets]5. Churn labeling and file saving Save the user_id of churns into a new file.active_before, active_after = list_of_sets[0],list_of_sets[1] #. set method: s.intersection(t) returns to a new set of s & t loyal_users = active_before.intersection(active_after) len(loyal_users) #. set method: s.difference(t) returns to a new set of items in s but not in t churn = active_before.difference(active_after) len(churn) new_users = active_after.difference(active_before) len(new_users) # Use loyal_user as an example with open('C:\\Users\\Sean\\Documents\\BitTiger\\Capston_music_player_python\\loyal.log','a') as loyal_file: loyal_file.write('\n'.join(list(loyal_users))+'\n') # check loyal_file with open('C:\\Users\\Sean\\Documents\\BitTiger\\Capston_music_player_python\\loyal.log','r') as loyal_file: lines = loyal_file.readlines() linesDijkstra's algorithm Dijkstra's algorithm finds the shortest paths between one source and the other nodes in a graph. Packagesimport numpy as np import random import matplotlib.pyplot as plt import matplotlib.collections as collections import networkx as nxUser parametersSTARTING_NODE = 5 NUM_NODES = 17 NUM_EDGES = 16Generate a graph# Create a random graph and extract positions and node_ids np.random.seed(5) num_nodes = NUM_NODES num_edges = NUM_EDGES G = nx.gnm_random_graph(num_nodes, num_edges, seed=np.random) node_positions = nx.spring_layout(G) node_ids = nx.nodes(G) # Prepare the adjacency distances adjacency_list = {} adjacency_distances = {} adjacency_labels = {} for node_id, node_adjacency in G.adjacency(): adjacencies = list(node_adjacency.keys()) adjacency_list[node_id] = adjacencies distances = [] for adj_id in adjacencies: dist = np.linalg.norm(node_positions[node_id]-node_positions[adj_id]) dist *= 10.0 # scale up for the display distances.append(dist) adjacency_labels[(node_id, adj_id)] = "{:.1f}".format(dist) adjacency_distances[node_id] = distances fig, ax = plt.subplots(figsize=(10, 10)) plt.title('Graph') nx.draw(G, pos=node_positions, node_color='lightgreen', with_labels = True,ax=ax) nx.draw_networkx_edge_labels(G, pos=node_positions, edge_labels = adjacency_labels,ax=ax) ax.set_axis_on() ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True) plt.show()Dijkstra's algorithm# Node informations which will be updated distance_from_source = [float('inf')] * num_nodes previous_node = [-1] * num_nodes # next node on the source graph to get the shortest route to the source # Set starting Node distance_from_source[STARTING_NODE] = 0 queue = list(node_ids) # Dijkstra's algorithm while len(queue)>0: # Find the node with smallest distance node_index = 0; node_distance = distance_from_source[queue[node_index]] for i in range(len(queue)): if distance_from_source[queue[i]] < node_distance: node_index = i node_distance = distance_from_source[queue[i]] # Remove selected node from queue node = queue.pop(node_index) # Check all the adjacencies of the selected node for i, adjacency in enumerate(adjacency_list[node]): adjacency_distance = adjacency_distances[node][i] dist = node_distance + adjacency_distance if dist < distance_from_source[adjacency]: distance_from_source[adjacency] = dist previous_node[adjacency] = nodeShow result# find the longest path furthest_node = np.argmax(distance_from_source) longest_distance = distance_from_source[furthest_node] longest_path = [] node = furthest_node while (node != -1): longest_path.append(node) node = previous_node[node] print("the longest distance is {:.1f} ".format(longest_distance)) print("the longest path is ", longest_path) # TODO - add heatmap on the node to show distance # TODO - add from source to targe # TODO - show the path in the graphMeta-Learning-Shared-HieariesВ [статье](https://arxiv.org/pdf/1710.09767.pdf) рассматривается подход к обучению иерархический политики.Основная цель данного метода - быстрое дообучение политики под новую задачу (модель не видела их в процессе обучения). ИдеяМодель имеет иерархическую структуру и состоит из *мастер политики* обучаемой отдельно для каждой задачи,и набора *подполитик*, общих для всего набора задач.Мастер политика рассматривается как политика над подполитиками, т.е. отвечает за переключение между подполитикамив процессе работы алгоритма.Подполитики, в свою очередь, отвечают за обучение некоторых примитивов работы, т.е. каждая полполитика отвечает заспецифичный сценарий. ФормализацияЗадача рассматривается как марковси процесс $ P(s',r | s, a) $, где$s', s$ - следующее и текущее состояние, $a$ действие, $r$ - ревард на данном шаге.Есть распределение над задачами (над марковскики процессами) $P_{M}$.Агент описывается двумя наборами параметров: $\theta, \phi$, тогда политика агента $\pi_{\theta, \phi}(a|s)$.Здесь* $\theta$ - набор параметров мастер-политики, обучаемый заново для каждой задачи;* $\phi$ - параметры подполитик, общие для всех для всех задач и обучаемые на наборе задач.Задача в *meta-learning* задаче - оптимизировать награду на протяжении жизни агента.Т.е в процессе обучения агента на выбранной задаче. АрхитектураВ работе предлагается иерархическая структура политики.Общие параметры $\phi = (\phi_1, \dots, \phi_k)$, где каждый вектор параметров $\phi_k$ соответствует отдельнойподполитике $\pi_{\phi_k}(a|s)$.Вектор параметров $\theta$ задает мастер-политику $\pi_{\theta}(a|s)$, задающую распределение над подполитиками.В предлагаемом метода переключение между подполитиками происходит каждые $N$ шагов $(0, N, 2N, \dots)$.![Мотивирующая картинка: схема работы](../resources/motivation_picture_1.png)Актор и критик для мастер-политики и подполитик представлены двухслойными feed-forward сетями. Алгоритм обученияПредлагается итеративно учить множество подполитик, при этом уча на каждом итерации мастер-политику.Обучение на каждой $m ~ P_M$ происходит в два этапа: WarmupПредварительное обучение мастер политики. На этом этапе учатся только параметры $\theta$.Сыгранные шаги рассматриваются сгруппированными по $N$. То есть, действие - выбор подполитики, награда - суммарнаянаграда за $N$ шагов. JointСовместное обучение мастер политики и подполитик. Делается $T$ шагов агента, затем оптимизируем $\theta$ группируя шагипо $N$. Затем оптимизируем $\phi$ обычным способом.Оптимизация проводилась с помощью A2C. Эксперимент: WIPВ ходе эксперимента проверялось:* возможность метода к обучению. для этого сравнивался график среднего ревардра на проэмлированной задаче для обученного MLSH и для необученного MLSH (т.е. для каждой задачи тренируем заново.)* преимущество иерархического подхода перед одной shared политикой. Для этого сравнивался средний ревард для MLSH c среднем ревардом для одной политики, обучаемой тем же способом, т.е. по задачам.Тестирование проводиться в средах Minigrid: DoorKey5x5, Empty, FourRoom.На **Графике 1** изображена зависимость среднего реварда от итерации обучения (номера просэмлированной задачи).По оси $x$ изображен номер задачи*100 *(TODO: надо поправить, извиняюсь, рудимент)*.На **Графике 2** изображен зависимость среднего реварда по 5 играм после каждой итерации обучения.Желтой линией обозначен MLSH, зеленой - Shared Policy, бежевой - необученная MLSH. Все графики построены для среды DoorKey.![График 1](../resources/mean_rewards.png)![График 2](../resources/seen_rewards.png)На текущем этапе эксперимент подтверждает только первый пункт: средний reward для MLSH растет.Похожие графики показываются на других небольших средах, но их не превожу, так как результаты не однозначные.При этом колебания награды при обучении очень велики.Возможно, это обусловлено выбранным методом обучения (A2C) и малым размером батча.Хотя, в случае обучения обычной политики в обычном режиме (не "эпоха - одна задача"), алгоритм сходиться.Далее планируется проверить ещё раз A2C на наличие ошибок, произвести эксперименты c PPO, как в оригинальной статье. OtherЗапуски данных экспериментов на wandb:* [MLSH reset](https://app.wandb.ai/morgachev/mlsh/runs/2d4etdkz?workspace=user-morgachev)* [MLSH](https://app.wandb.ai/morgachev/mlsh/runs/2jeevlst?workspace=user-morgachev)* [Shared Policy](https://app.wandb.ai/morgachev/mlsh/runs/2vi3styx?workspace=user-morgachev)Соответствующие ноутбуки лежат в репозитории.import sys import wandb import torch from tqdm import tqdm sys.path.append("..") %load_ext autoreload %autoreload 2 import numpy as np from gym import wrappers from torch import nn from matplotlib import pyplot as plt from src import utils as utils env_name = "MiniGrid-DoorKey-5x5-v0" # env_name = "MiniGrid-Empty-Random-5x5-v0" # env_name = "MiniGrid-DoorKey-8x8-v0" env = utils.make_env(env_name) obs_space_shape = env.observation_space.shape n_actions = env.action_space.n plt.title('Game image') plt.imshow(env.render('rgb_array')) plt.show() from src.a2c import A2CAlgo config = { "max_reward": 0.99, "device": "cpu", "env": env_name, "hidden_dim": 128, "emb_dim": 128, "n_env": 8, "gamma": 0.99, "max_grad_norm": 0.5, "lr": 0.001, "value_loss_coef": 0.5, "entropy_coef": 0.01, "n_sub": 4, "sub_n_iter": 100, "sub_n_steps": 3, "sub_lr": 1e-4, "master_n_iter": 30, "master_step_size": 3, "master_n_steps": 3, "master_lr": 1e-3, "n_iter_epoch": 50, "n_steps_sub": 16 } # import os # os.environ["WANDB_MODE"] = "dryrun" from src.mlsh_model import MLSHAgent from src.env_pool import MLSHPool agent = MLSHAgent( config["n_sub"], n_actions, obs_space_shape[1] ) for p in agent.parameters(): nn.init.uniform_(p, -0.1, 0.1) pool = MLSHPool(agent, lambda : utils.make_env(env_name), config["n_env"], random_reset=False) wandb.init(project="mlsh", monitor_gym=True, name=f"mlsh_{env_name[9:]}+{config['n_sub']}_fixed", config=config, dir="..", magic=True, group="tests") wandb.watch(agent) a2c_subpolicies = \ A2CAlgo(agent.subpolicies.parameters(), config["device"], n_actions, config["gamma"], config["max_grad_norm"], config["entropy_coef"], config["sub_lr"], config["value_loss_coef"]) ac2_master = \ A2CAlgo(list(agent.master_policy.parameters()), config["device"], config["n_sub"], config["gamma"], config["max_grad_norm"], config["entropy_coef"], config["master_lr"], config["value_loss_coef"]) from src import mlsh_algo for i in tqdm(range(300)): pool.update_seeds() for seed, env in zip(pool.seeds, pool.envs): env.seed(seed) env.reset() for p in agent.master_policy.parameters(): nn.init.uniform_(p, -0.1, 0.1) mlsh_algo.warmup(ac2_master, pool, config["master_n_iter"], config["master_step_size"], config["master_n_steps"], config["n_env"]) epoch_rew = mlsh_algo.joint_train( ac2_master, a2c_subpolicies, pool, config["sub_n_iter"], config["master_step_size"], config["sub_n_steps"], config["n_env"])[0] with torch.no_grad(): wandb.log({ "mean_rewards_epoch": epoch_rew, "seen_evaluate_reward": np.mean(utils.evaluate_mlsh(agent, env, 5, config["master_step_size"], last_env=pool.seeds[0])[0]), "unseen_evaluate_reward": np.mean(utils.evaluate_mlsh(agent, env, 5, config["master_step_size"], last_env=None)[0]) })82%|████████▏ | 245/300 [2:24:47<29:29, 32.17s/it]from google.colab import drive drive.mount('/content/drive')Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly&response_type=code Enter your authorization code: ·········· Mounted at /content/driveimports#For the system import os #Manage of time from datetime import datetime from pytz import timezone import time #Manage of files import pandas as pd import csv #scrap from bs4 import BeautifulSoup from openpyxl.workbook import Workbook import requests %cd 'drive/My Drive/Colab Notebooks/4SS/4SS_db' !ls def centered_len(max_len): def centered_string(string): if type(string) != 'string': string = str(string) rest = max_len - len(string) n_spaces = int(rest/2) string_spaces = ' ' * n_spaces centered_string = string_spaces + string + string_spaces if len(centered_string)Identificador de series extras Se usa parquet por que es más rápido para cargar DF# %cd '..' %cd 'br/parquet' !ls databases = os.listdir() n_databases = len(databases) df = [None]*n_databases n_headers = [None]*n_databases headers = [None]*n_databases largest_len_db = 0 n = 0 for db in databases: dataframe = pd.read_parquet(db) df[n] = db headers[n] = list(dataframe.columns) n_headers[n] = len(headers[n]) if len(db) > largest_len_db: largest_len_db = len(db) n = n + 1 # print(largest_df) centered_db = centered_len(largest_len_db) centered_header = centered_db('masterdbs') dashes = '-'*largest_len_db print(f' {centered_header} | len | headers') print(f' {dashes} | --- | ') for d in range(len(df)): print(f'{centered_db(df[d])} | {n_headers[d]} | {headers[d]}') if d < (len(df)-1): if n_headers[d+1]>11: print(f' {dashes} | *** | ------') if n_headers[d]>11: edition = pd.read_parquet(df[d]) edition['time'].fillna(edition['Date'], inplace=True) edition.drop(['Date'],axis=1, inplace=True) file_name = f'{df[d][:(len(df[d])-8)]}' csv_file = f'{file_name}.csv' excel_file = f'{file_name}.xlsx' parquet_file = f'{file_name}.parquet' edition.to_csv(csv_file,sep="|") edition.to_excel(excel_file, index=False) edition.to_parquet(parquet_file) new_headers = list(edition.columns) len_new_headers = len(new_headers) print('check:') print(f'{centered_db(df[d])} | {len_new_headers} | {new_headers}') print(f' {dashes} | *** | ------') def extract_inter_rank(rank): inter_rank = int(rank[1:len(rank)]) return inter_rank ranks = edition['Rank'].str.extract(r'(\d+)') ranks edition = pd.read_parquet('br-master_db_music.parquet') edition.dtypes edition = pd.read_csv('master_db_digital-text.csv', sep="|") right_serie = edition['Date'] right_serie left_serie = edition['time'] right_serie = edition['Date'] # pd.merge(left_serie, right_serie, how="outer") right_serie.fillna(left_serie) edition['time'].fillna(edition['Date'], inplace=True) edition.drop(['Date'],axis=1, inplace=True) edition edition.to_csv("master_db_digital-text_3.csv",sep="|") edition.to_excel("master_db_digital-text_3.xlsx")Laboratorio# #por pais en paises # initial_time = datetime.now() # print(initial_time) # country_counter = 0 # print(' Added date/time | With date/time | csv_files | country | Category ') # centered_missing = centered_len(len('Missing date/time')) # centered_with = centered_len(len('With date/time')) # centered_csv = centered_len(len('csv_files')) # centered_country = centered_len(len('country')) # #largest category name: musical-instruments # centered_category = centered_len(len('musical-instruments')) # total_csv_files = 0 # total_dirs = 0 # total_changed = 0 # error_count = 0 # errors = [] # for country in countries: # country_entries = os.listdir(country) # if country_counter > 0: # print(' ----------------- | -------------- | --------- | ------- | -------------------') # #por carpeta en paises # for entry in country_entries: # files_ok = False # try: # total_dirs = total_dirs + 1 # #Archivos en carpetas # files_dir = f'{country}/{entry}' # folder_files = os.listdir(files_dir) # files_ok = True # except: # total_dirs = total_dirs + 1 # pass # if files_ok == True: # if folder_files[:2] != country: # csv_counter = 0 # with_dt = 0 # changed_dt = 0 # for file_content in folder_files: # total_dirs = total_dirs + 1 # #Archivo csv # if file_content[-3:] == 'csv': # csv_counter = csv_counter + 1 # complete_file = f'{country}/{entry}/{file_content}' # with open(complete_file, mode="r", encoding='utf-8') as csv_file: # csv_reader = csv.reader(csv_file, delimiter='|') # try: # header = next(csv_reader) # if header[0] == 'time' or header[0] == 'Date': # with_dt = with_dt + 1 # else: # rename_folders_files(complete_file, ':', '_') # try: # add_date(complete_file) # except: # pass # changed_dt = changed_dt + 1 # total_changed = total_changed + 1 # if header[0] == header[1]: # error_count = error_count + 1 # errors.append(complete_file) # elif header[0] == header[1][:4]: # error_count = error_count + 1 # errors.append(complete_file) # csv_file.close() # #File is empty # except StopIteration: # csv_file.close() # print(f' {centered_missing(changed_dt)}', end=' | ') # print(centered_with(with_dt), end=' | ') # print(centered_csv(csv_counter), end=' | ') # print(centered_country(country), end=' | ') # print(centered_category(entry)) # total_csv_files = total_csv_files + csv_counter # country_counter = country_counter+1 # final_time = datetime.now() # time_elapsed = final_time - initial_time # print(f'\nPasaron {time_elapsed.total_seconds()} segundos') # print(final_time) # print(f'\nChanged to check: {total_changed}') # print(f'Total of csv files = {total_csv_files}') # print(f'Total dirs: {total_dirs}') # print(f'\nErrors: {error_count}') # print(errors)Experimento para unir bases con Pandaspd.options.display.max_rows = 300 country = 'mx' folder = 'handmade' entry = 'mx/handmade/mx-test_handmade_2020-07-31 16_00.csv' master_db_csv = f'{country}/csv/master_db_{folder}.csv' master_db_excel = f'{country}/excel/master_db_{folder}.xlsx' add_1 = 'mx/handmade/mx-test_handmade_2020-07-31 16_28.csv' add_2 = 'mx/handmade/mx-test_handmade_2020-07-31 16_49.csv' add_3 = 'mx/handmade/mx-test_handmade_2020-07-31 17_10.csv' to_add = [add_1, add_2, add_3] main_df = pd.read_csv(entry, sep='|') for csv_file in to_add: add_df = pd.read_csv(csv_file, sep='|') main_df = pd.concat([main_df, add_df]) main_df.reset_index(drop=True, inplace=True) main_df.to_csv(master_db_csv, sep="|", index=False) main_df.to_excel(master_db_excel, index=False) main_dfCentered Lendef centered_len(max_len): def centered_string(string): if type(string) != 'string': string = str(string) rest = max_len - len(string) n_spaces = int(rest/2) string_spaces = ' ' * n_spaces centered_string = string_spaces + string + string_spaces if len(centered_string)DB Unifier%cd 'testing' !ls initial_time = datetime.now() print(initial_time) country_counter = 0 print(' country | Category | csv_files | Created_Master_DB | time') centered_csv = centered_len(len('csv_files')) centered_country = centered_len(len('country')) #largest category name: musical-instruments centered_category = centered_len(len('musical-instruments')) centered_file = centered_len(len('Created_Master_DB')) total_files = 0 total_dirs = 0 # total_folders = 0 total_bases = 0 error_count = 0 total_csv_files = 0 errors = [] error_message = [] countries = ['br'] for country in countries: # total_folders = total_folders + 1 country_entries = os.listdir(country) if country_counter > 0: print(' ------- | ------------------- | --------- | ------------------- |') #por carpeta en paises for entry in country_entries: is_folder = False if entry == 'csv' or entry == 'excel' or entry == 'parquet': pass else: try: # total_folders = total_folders + 1 #Archivos en carpetas files_dir = f'{country}/{entry}' folder_files = os.listdir(files_dir) is_folder = True except: pass if is_folder == True: # total_folders = total_folders + 1 #Archivos en carpetas folder_files = os.listdir(country + '/' + entry) if folder_files[:2] != country: print(f' {centered_country(country)}', end=' | ') print(centered_category(entry), end=' | ') csv_counter = 0 for file_content in folder_files: total_dirs = total_dirs + 1 #Archivo csv if file_content[-3:] == 'csv': start_master = datetime.now() #here's why csv_counter never enters as 0 in main_df csv_counter = csv_counter + 1 complete_file = country + '/' + entry + '/' + file_content #For a start if csv_counter == 1: # print('entered main') main_df = pd.read_csv(complete_file, sep='|') #Add the rest else: try: add_df = pd.read_csv(complete_file, sep='|') main_df = pd.concat([main_df, add_df]) # print(f'loaded {csv_counter}') except Exception as e: errors.append(complete_file) error_message.append(e) error_count = error_count + 1 pass print(centered_csv(csv_counter), end=' | ') total_csv_files = total_csv_files + csv_counter master_db_csv = f'{country}/csv/{country}-master_db_{entry}.csv' master_db_excel = f'{country}/excel/{country}-master_db_{entry}.xlsx' master_db_parquet = f'{country}/parquet/{country}-master_db_{entry}.parquet' finish_master = datetime.now() time_file_making = finish_master - start_master main_df.reset_index(drop=True, inplace=True) main_df.to_csv(master_db_csv, sep="|", index=False) main_df.to_excel(master_db_excel, index=False) main_df.to_parquet(master_db_parquet) print(centered_file('csv, excel, parquet'),"| ", time_file_making.total_seconds(), " sec") total_bases = total_bases + 1 country_counter = country_counter + 1 final_time = datetime.now() time_elapsed = final_time - initial_time print(f'\nPasaron {time_elapsed.total_seconds()} segundos') print(final_time) print(f'\nBases creadas: {total_bases} en csv y excel') print(f'Total of csv files = {total_csv_files}') print(f'Total dirs: {total_dirs}') if error_count > 0: print(f'Errores en archivos: {error_count}. Estos son los archivos:') for e in range(len(errors)): print(f'Message: {error_message[e]} | file: {errors[e]} ')2020-08-12 04:48:53.964066 country | Category | csv_files | Created_Master_DB | time br | kitchen | 114 | csv, excel, parquet | 0.012722 sec br | hi | 97 | csv, excel, parquet | 0.302133 sec br | electronics | 115 | csv, excel, parquet | 0.296863 sec br | sports | 115 | csv, excel, parquet | 0.37554 sec br | baby-products | 98 | csv, excel, parquet | 0.29418 sec br | amazon-devices | 116 | csv, excel, parquet | 0.17662 sec br | furniture | 92 | csv, excel, parquet | 0.314285 sec br | lawn-and-garden | 92 | csv, excel, parquet | 0.280246 sec br | appliances | 92 | csv, excel, parquet | 0.17013 sec br | home | 92 | csv, excel, parquet | 0.177683 sec br | beauty | 92 | csv, excel, parquet | 0.265231 sec [...]Corrigiendo todoMX__COL_tz = 'America/Mexico_City' timezone_MXCOL = timezone(MX__COL_tz) prueba = datetime.now(timezone_MXCOL) prueba def extract_soup(url, preview=True): response = requests.get(url) status = response.status_code soup = BeautifulSoup(response.text, 'lxml') if preview==True: print(soup.prettify()) return soup, status def top_amazon_boxes(soup): boxes = soup.find_all('div', attrs={'class':"a-section a-spacing-none aok-relative"}) return boxes def scrap_boxes(boxes, domain, dict_to_rows=False): ranks = [None]*50 product_names = [None]*50 image_urls = [None]*50 product_links = [None]*50 star_ratings = [None]*50 reviews = [None]*50 authors_companies = [None]*50 editions_consoles = [None]*50 min_prices = [None]*50 max_prices = [None]*50 time_log = [None]*50 amz_mx_url = 'https://www.amazon.com.mx' n_box = 0 for box in boxes: MX__COL_tz = 'America/Mexico_City' timezone_MXCOL = timezone(MX__COL_tz) time_log[n_box] = datetime.now(timezone_MXCOL) rank_box = box.find_all('span', attrs={'class':'zg-badge-text'}) products_and_image_box = box.find_all('div', attrs={'class' : 'a-section a-spacing-small'}) product_links_box = box.find_all('a', attrs={'class' : 'a-link-normal'}) star_ratings_box = box.find_all('span', attrs={'class' : 'a-icon-alt'}) reviews_box = box.find_all('a', attrs={'class' : 'a-size-small a-link-normal'}) authors_company_box = box.find_all('span', attrs={'class' : 'a-size-small a-color-base'}) editions_console_box = box.find_all('span', attrs={'class' : 'a-size-small a-color-secondary'}) prices_box = box.find_all('span', attrs={'class' : "p13n-sc-price"}) ranks[n_box] = int(rank_box[0].get_text()[1:]) #In case the element was removed (yes, it happens) try: product_names[n_box] = products_and_image_box[0].img.get('alt') image_urls[n_box] = products_and_image_box[0].img.get('src') product_links[n_box] = amz_mx_url + product_links_box[0].get('href') except: pass try: star_ratings[n_box] = float(star_ratings_box[0].get_text()[:3]) reviews[n_box] = int(reviews_box[0].get_text().replace(',','')) except: pass #Individual cases try: authors_companies[n_box] = authors_company_box[0].get_text() except:pass try: editions_consoles[n_box] = editions_console_box[0].get_text() except:pass #Courrencies if domain == 'mx': coin_symbol = 1 elif domain == 'br': coin_symbol = 2 try: min_prices[n_box] = float(prices_box[0].get_text()[coin_symbol:].replace(',','')) except: pass try: max_prices[n_box] = float(prices_box[1].get_text()[coin_symbol:].replace(',','')) except: pass n_box = n_box + 1 # Dictionary boxes_dict = { 'time' : time_log, "Rank" : ranks, "Product Names": product_names, "Image urls": image_urls, "Product links": product_links, "Stars": star_ratings, "Reviews": reviews, "Authors/Company": authors_companies, "Edition/Console": editions_consoles, "Price_std_or_min" : min_prices, "Max_prices" : max_prices } if dict_to_rows == True: dict_rows = [None]*50 for n in range(len(dict_rows)): dict_rows[n] = [ boxes_dict["time"][n], boxes_dict["Rank"][n], boxes_dict["Product Names"][n], boxes_dict["Image urls"][n], boxes_dict["Product links"][n], boxes_dict["Stars"][n], boxes_dict["Reviews"][n], boxes_dict["Authors/Company"][n], boxes_dict["Edition/Console"][n], boxes_dict["Price_std_or_min"][n], boxes_dict["Max_prices"][n]] return boxes_dict, dict_rowsExperimento con de Append con reader y writer Desde un archivo csvbox_file = 'box_scraped.csv' with open(box_file, mode="r", encoding="utf-8") as box_file: box_reader = csv.reader(box_file, delimiter='|') header = next(box_reader) box_data = [line for line in box_reader] # print(box_data) # # add row to CSV file database_file = 'Testof master_db_music.csv' with open(database_file, "a", encoding="utf-8") as db_file: db_writer = csv.writer(db_file, delimiter='|') db_writer.writerow('\n') db_writer.writerows(box_data) # # entry = 'mx/handmade/mx-test_handmade_2020-07-31 16_00.csv' main_df = pd.read_csv(database_file, sep='|') main_dfDesde el scraper!ls amz_rows = [None]*50 for n in 49: amz_rows[n] = [ amz_key_top_boxes["time"][n], amz_key_top_boxes["Rank"][n], amz_key_top_boxes["Product Names"][n], amz_key_top_boxes["Image urls"][n], amz_key_top_boxes["Product links"][n], amz_key_top_boxes["Stars"][n], amz_key_top_boxes["Reviews"][n], amz_key_top_boxes["Authors/Company"][n], amz_key_top_boxes["Edition/Console"][n], amz_key_top_boxes["Price_std_or_min"][n], amz_key_top_boxes["Max_prices"][n]] print(amz_rows[n]) url = f'https://www.amazon.com.mx/gp/bestsellers/music/ref=zg_bs_nav_0' soup, status = extract_soup(url, preview=False) print(f'status de la solicitud: {status}') boxes = top_amazon_boxes(soup) amz_key_top_boxes, amz_rows = scrap_boxes(boxes, 'mx', dict_to_rows=True) database_file = 'Testof_master_db_music.csv' main_df = pd.read_csv(database_file, sep='|') with open(database_file, "a", encoding="utf-8") as db_file: db_writer = csv.writer(db_file, delimiter='|') for row in amz_rows: db_writer.writerow(row) main_df def update_csv(file, rows, sep=','): with open(file, "a", encoding="utf-8") as csv_file: csv_file = csv.writer(csv_file, delimiter=sep) for row in rows: csv_file.writerow(row) url = f'https://www.amazon.com.mx/gp/bestsellers/music/ref=zg_bs_nav_0' soup, status = extract_soup(url, preview=False) print(f'status de la solicitud: {status}') boxes = top_amazon_boxes(soup) amz_key_top_boxes, amz_rows = scrap_boxes(boxes, 'mx', dict_to_rows=True) database_file = 'Testof_master_db_music.csv' main_df = pd.read_csv(database_file, sep='|') update_csv(database_file, amz_rows, sep='|') main_df.to_parquet(database_file) domain_dict = {'Mexico' : 'mx', 'Brazil' : 'br'} !ls domain_path = '4SS/4SS_db/testing/{}' date = datetime.utcnow() datemain scraperdef scrap_amazon(): MX__COL_tz = 'America/Mexico_City' timezone_MXCOL = timezone(MX__COL_tz) log_status = [None]*45 log_date = [None]*45 log_domain = [None]*45 log_category = [None]*45 n_log = 0 country_count = 0 print(' Log Date | Country | category | status | Loaded |') for country_name in domain_dict: country_count = country_count + 1 if country_count > 1: print(' -------------------------- | ------- | --------------------- | ------ | ------ |') country = domain_dict[country_name] if country == 'mx': categories_dict = mx_dict elif country == 'br': categories_dict = br_dict for key in categories_dict: #Date-time log_date[n_log] = datetime.now(timezone_MXCOL) print(f' {log_date[n_log]}', end=' | ') #Country where you are at log_domain[n_log] = country print(f' {log_domain[n_log]} ', end=' | ') #Category that is scraping category = categories_dict[key] log_category[n_log] = category centered_category = centered_string(log_category[n_log]) print(centered_category, end=' | ') #The main scrap url = f'https://www.amazon.com.{country}/gp/bestsellers/{category}/ref=zg_bs_nav_0' soup, status = extract_soup(url, preview=False) if status == 503: while status == 503: time.sleep(1) soup, status = extract_soup(url, preview=False) log_status[n_log] = status log_date[n_log] = datetime.now() if status ==200: log_status[n_log] = status print(f' {log_status[n_log]} ', end=' | ') boxes = top_amazon_boxes(soup) amz_key_top_boxes, amz_rows = scrap_boxes(boxes, country) add_df = pd.DataFrame.from_dict(amz_key_top_boxes) #Updating csv files csv_file = f'/{country}-master_db_{category}.csv' dir_csv_testing = f'/content/drive/My Drive/Colab Notebooks/4SS/4SS_db/testing/{country}/csv/{csv_file}' update_csv(dir_csv_testing, amz_rows, sep='|') #Updating Parquet files parquet_file = f'/{country}-master_db_{category}.parquet' dir_parquet_maindb = f'/content/drive/My Drive/Colab Notebooks/4SS/4SS_db/testing/{country}/parquet/{parquet_file}' main_df = pd.read_parquet(dir_parquet_maindb) main_df = pd.concat([main_df, add_df]) main_df.to_parquet(dir_parquet_maindb) excel_file = f'/{country}-master_db_{category}.xlsx' dir_excel_maindb = f'/content/drive/My Drive/Colab Notebooks/4SS/4SS_db/testing/{country}/excel/{excel_file}' #For the testing main_df.to_excel(dir_excel_maindb, index=False) print(' Yes |') else: log_status[n_log] == status print(f' {log_status[n_log]} ', end=' | ') log_date[n_log] = datetime.now() print(' +.No.+ |') n_log = n_log + 1 log_dict = { 'log_date' : log_date, 'category' : log_category, 'country' : log_domain, 'status' : log_status } log_rows = [None]*45 for n in range(len(dict_rows)): dict_rows[n] = [ log_date[n], log_category[n], log_domain[n], log_status[n]] log_file = f'/master_db_logs.csv' dir_log_testing = f'/content/drive/My Drive/Colab Notebooks/4SS/4SS_db/testing/{log_file}' update_csv(dir_log_testing, log_rows, sep='|') log_df = pd.DataFrame.from_dict(log_dict) log_print = """ ---------------------- | Log file loaded | ---------------------- """ print(log_print)NLP Sequence Classification using LSTM Recurrent Neural NetworkThis exercise reproduce lee's post:https://machinelearningmastery.com/sequence-classification-lstm-recurrent-neural-networks-python-keras/Sequence classification is a predictive modeling problem where you have some sequence of inputs over space or time and the task is to predict a category for the sequence.What makes this problem difficult is that the sequences can vary in length, be comprised of a very large vocabulary of input symbols and may require the model to learn the long-term context or dependencies between symbols in the input sequence.This exercise shows: 1. How to develop an LSTM model for a sequence classification problem. 2. How to reduce overfitting in the LSTM models through the use of dropout. 3. How to combine LSTM models with Convolutional Neural Networks that excel at learning spatial relationships. How to modify this code for other datasetTo classify multiple classes of text based data, change dense layer's loss function from *binary_crossentropy* to *categorical_crossentropy*To output probability of each class, instead of 1 or 0, change dense layer's activation function from *sigmoid* to *softmax* Referencehttps://machinelearningmastery.com/sequence-classification-lstm-recurrent-neural-networks-python-keras/import numpy from keras.datasets import imdb from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers.embeddings import Embedding from keras.preprocessing import sequence from keras.utils.vis_utils import plot_model # fix random seed for reproducibility numpy.random.seed(7) output_dir = 'output/lstm/'Step 1. Load the IMDB dataset. This exercise uses IMDB movie review sentiment dataset. Each movie review is a variable sequence of words and the sentiment of each movie review is to be classified. Keras provides access to the IMDB dataset built-in. The imdb.load_data() function allows you to load the dataset in a format that is ready for use in neural network and deep learning models.Constrain the dataset to the top 5,000 words, also split the dataset into train (50%) and test (50%) sets.# load the dataset but only keep the top n words, zero the rest top_words = 5000 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)Step 2. truncate and pad the input sequencesTruncate and pad the input sequences so that they are all the same length for modeling. The model will learn the zero values carry no information so indeed the sequences are not the same length in terms of content, but same length vectors is required to perform the computation in Keras.# truncate and pad input sequences max_review_length = 500 X_train = sequence.pad_sequences(X_train, maxlen=max_review_length) X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)Step 3. Define, compile and fit LSTM model.The first layer is the Embedded layer that uses 32 length vectors to represent each word. The next layer is the LSTM layer with 100 memory units (smart neurons). Finally, because this is a classification problem we use a Dense output layer with a single neuron and a sigmoid activation function to make 0 or 1 predictions for the two classes (good and bad) in the problem.Because it is a binary classification problem, log loss is used as the loss function (binary_crossentropy in Keras). The efficient ADAM optimization algorithm is used. The model is fit for only 2 epochs because it quickly overfits the problem. A large batch size of 64 reviews is used to space out weight updates.# create the model embedding_vecor_length = 32 model = Sequential() model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length)) model.add(LSTM(100)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=64)_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_1 (Embedding) (None, 500, 32) 160000 _________________________________________________________________ lstm_1 (LSTM) (None, 100) 53200 _________________________________________________________________ dense_1 (Dense) (None, 1) 101 ================================================================= Total params: 213,301 Trainable params: 213,301 Non-trainable params: 0 _________________________________________________________________ None Train on 25000 samples, validate on 25000 samples Epoch 1/3 25000/25000 [==============================] - 240s 10ms/step - loss: 0.4333 - acc: 0.7980 - val_loss: 0.3625 - val_acc: 0.8431 Epoch 2/3 25000/25000 [==============================] - 239s 10ms/step - l[...]Step 4. Estimate the performance of the model on unseen reviews.# Final evaluation of the model scores = model.evaluate(X_test, y_test, verbose=0) print("Accuracy: %.2f%%" % (scores[1]*100))Accuracy: 85.91%Step 5. Solve overfitting problem using dropoutRNN such as LSTM generally have the problem of overfitting. Dropout can be applied to solve the overfitting problem.The following is an example of applying dropout between layers using Keras Dropout layer: model = Sequential() model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length)) model.add(Dropout(0.2)) model.add(LSTM(100)) model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid')) To apply the gate specific dropout on the input and recurrent connections of the memory units of the LSTM, use the following: model = Sequential() model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length)) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid'))# LSTM with Dropout for sequence classification in the IMDB dataset import numpy from keras.datasets import imdb from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout from keras.layers.embeddings import Embedding from keras.preprocessing import sequence # fix random seed for reproducibility numpy.random.seed(7) # load the dataset but only keep the top n words, zero the rest top_words = 5000 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) # truncate and pad input sequences max_review_length = 500 X_train = sequence.pad_sequences(X_train, maxlen=max_review_length) X_test = sequence.pad_sequences(X_test, maxlen=max_review_length) # create the model embedding_vecor_length = 32 model = Sequential() model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length)) model.add(Dropout(0.2)) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) model.fit(X_train, y_train, epochs=3, batch_size=64) # Final evaluation of the model scores = model.evaluate(X_test, y_test, verbose=0) print("Accuracy: %.2f%%" % (scores[1]*100))_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_2 (Embedding) (None, 500, 32) 160000 _________________________________________________________________ dropout_1 (Dropout) (None, 500, 32) 0 _________________________________________________________________ lstm_2 (LSTM) (None, 100) 53200 _________________________________________________________________ dropout_2 (Dropout) (None, 100) 0 _________________________________________________________________ dense_2 (Dense) (None, 1) 101 ================================================================= Total params: 213,301 Trainable params: 213,301 Non-trainable params: 0 _________________________________________________________________ None[...]Step 6 CNN + LSTMConvolutional Neural Networks excel at learning the spatial structure in input data. The IMDB review data has a one-dimensional spatial structure in the sequence of workds in reviews, and CNN should be good at picking out the features. This learned spatial features may then be learned as sequences by an LSTM layer.We can easily add a one-dimensional CNN and max pooling layers after the Embedding layer which then feed the consolidated features to the LSTM. We can use a smallish set of 32 features with a small filter length of 3. The pooling layer can use the standard length of 2 to halve the feature map size.For example, we would create the model as follows: model = Sequential() model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length)) model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu')) model.add(MaxPooling1D(pool_size=2)) model.add(LSTM(100)) model.add(Dense(1, activation='sigmoid'))# LSTM and CNN for sequence classification in the IMDB dataset import numpy from keras.datasets import imdb from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers.convolutional import Conv1D from keras.layers.convolutional import MaxPooling1D from keras.layers.embeddings import Embedding from keras.preprocessing import sequence # fix random seed for reproducibility numpy.random.seed(7) # load the dataset but only keep the top n words, zero the rest top_words = 5000 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) # truncate and pad input sequences max_review_length = 500 X_train = sequence.pad_sequences(X_train, maxlen=max_review_length) X_test = sequence.pad_sequences(X_test, maxlen=max_review_length) # create the model embedding_vecor_length = 32 model = Sequential() model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length)) model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu')) model.add(MaxPooling1D(pool_size=2)) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) model.fit(X_train, y_train, epochs=3, batch_size=64) # Final evaluation of the model scores = model.evaluate(X_test, y_test, verbose=0) print("Accuracy: %.2f%%" % (scores[1]*100))_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_3 (Embedding) (None, 500, 32) 160000 _________________________________________________________________ conv1d_1 (Conv1D) (None, 500, 32) 3104 _________________________________________________________________ max_pooling1d_1 (MaxPooling1 (None, 250, 32) 0 _________________________________________________________________ lstm_3 (LSTM) (None, 100) 53200 _________________________________________________________________ dense_3 (Dense) (None, 1) 101 ================================================================= Total params: 216,405 Trainable params: 216,405 Non-trainable params: 0 _________________________________________________________________ None[...]Step 7. With Dropout layers, and increase epoch# LSTM and CNN for sequence classification in the IMDB dataset import numpy from keras.datasets import imdb from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers.convolutional import Conv1D from keras.layers.convolutional import MaxPooling1D from keras.layers.embeddings import Embedding from keras.preprocessing import sequence # fix random seed for reproducibility numpy.random.seed(7) # load the dataset but only keep the top n words, zero the rest top_words = 5000 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words) # truncate and pad input sequences max_review_length = 500 X_train = sequence.pad_sequences(X_train, maxlen=max_review_length) X_test = sequence.pad_sequences(X_test, maxlen=max_review_length) # create the model embedding_vecor_length = 32 model = Sequential() model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length)) model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu')) model.add(Dropout(0.2)) model.add(MaxPooling1D(pool_size=2)) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) plot_model(model, show_shapes=True, to_file=output_dir+'lstm.png') model.fit(X_train, y_train, epochs=5, batch_size=64) model.save(output_dir+'lstm_model.h5') # Final evaluation of the model scores = model.evaluate(X_test, y_test, verbose=0) print("Accuracy: %.2f%%" % (scores[1]*100))_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_4 (Embedding) (None, 500, 32) 160000 _________________________________________________________________ conv1d_2 (Conv1D) (None, 500, 32) 3104 _________________________________________________________________ dropout_3 (Dropout) (None, 500, 32) 0 _________________________________________________________________ max_pooling1d_2 (MaxPooling1 (None, 250, 32) 0 _________________________________________________________________ lstm_4 (LSTM) (None, 100) 53200 _________________________________________________________________ dropout_4 (Dropout) (None, 100) 0 _________________________________________________________________ dense_4 (D[...]Predictive maintenance Part 1: Data PreparationThe original data can be [downloaded from this link.](https://ti.arc.nasa.gov/tech/dash/groups/pcoe/prognostic-data-repository/turbofan) Since the content in the train and test datasets is different, we are making it uniform before we start the data exploration and the model buiding process. We will convert the data into a more natural format for Vaex.import vaexRead the dataThe data contains a list of sensors. These are their names and meanings:| Name |Description |Unit | |-----------|---------------------------------|---------| | T2 | Total temperature at fan inlet | °R | | T24 | Total temperature at LPC outlet | °R | | T30 | Total temperature at HPC outlet | °R | | T50 | Total temperature at LPT outlet | °R | | P2 | Pressure at fan inlet | psia | | P15 | Total pressure in bypass-duct | psia | | P30 | Total pressure at HPC outlet | psia | | Nf | Physical fan speed | rpm | | Nc | Physical core speed | rpm | | epr | Engine pressure ratio (P50/P2) | -- | | Ps30 | Static pressure at HPC outlet | psia | | phi | Ratio of fuel flow to Ps30 | pps/psi | | NRf | Corrected fan speed | rpm | | NRc | Corrected core speed | rpm | | BPR | Bypass Ratio | -- | | farB | Burner fuel-air ratio | -- | | htBleed | Bleed Enthalpy | -- | | Nf_dmd | Demanded fan speed | rpm | | PCNfR_dmd | Demanded corrected fan speed | rpm | | W31 | HPT coolant bleed | lbm/s | | W32 | LPT coolant bleed | lbm/s |column_names = ['unit_number', 'time_in_cycles', 'setting_1', 'setting_2', 'setting_3', 'T2', 'T24', 'T30', 'T50', 'P2', 'P15', 'P30', 'Nf', 'Nc', 'epr', 'Ps30', 'phi', 'NRf', 'NRc', 'BPR', 'farB', 'htBleed', 'Nf_dmd', 'PCNfR_dmd', 'W31', 'W32'] # The training data train_data = vaex.read_csv("./data/train_FD001.txt", sep='\s+', names=column_names) # The testing data test_data = vaex.read_csv("./data/test_FD001.txt", sep='\s+', names=column_names) # The "answer" to the test data y_test = vaex.read_csv('./data/RUL_FD001.txt', names=['remaining_cycles']) y_test['unit_number'] = vaex.vrange(1, 101) y_test['unit_number'] = y_test.unit_number.astype('int')Create proper train and test datasets- in the training set, the engines are run until failure occurs, so we can calculate the target varuable, i,e, the RUL (Remaining Useful Life) based on when a particular engines running;- in the test set the engines are run for some time, and our goal is to predict their RULs. Their RUL are provided in a separate file, so we need to join it so it can be made available for evaluating scores and estimateing model performancedef prepare_data(data, y=None): df = data.copy() # As to not modify the underlying dataframe # Count how many cycles each unit is run for - groupby and count g = df.groupby('unit_number').agg({'max_cycles': vaex.agg.count('time_in_cycles')}) # Join to the main data - basically adds the "max_cycle" column df = df.join(other=g, on='unit_number', how='left') # Calculate the RUL: if y is None: # This is for the train data -> last point is the point of failure # Calculate the RUL df['RUL'] = df.max_cycles - df.time_in_cycles # Drop the column that is not needed anymore df = df.drop(columns=['max_cycles']) else: # This is for the test data -> add the answer to calculate the RUL # Join the answers df = df.join(y, on='unit_number', how='left') # Calculate the RUL df['RUL'] = df.max_cycles + df.remaining_cycles - df.time_in_cycles # Drop the columns that are not needed anymore df = df.drop(columns=['remaining_cycles', 'max_cycles']) # Done return df # Add the RUL to the train and test sets df_train = prepare_data(train_data) df_test = prepare_data(test_data, y=y_test)Quick preview of the datasetsdf_train df_testExport the datasets to HDF5df_train.export_hdf5('./data/data_train.hdf5') df_test.export_hdf5('./data/data_test.hdf5')AssumptionsTransaction cost Total Cost: 0.05% on large portfolioFrazzini, Israel, Moskowitz, ., Trading Costs (April 7, 2018). Available at SSRN: https://ssrn.com/abstract=3229719 or http://dx.doi.org/10.2139/ssrn.3229719import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # plt.style.use('ggplot') # # import matplotlib # # matplotlib.style.use('classic') # # sns.set_style("whitegrid") # # %matplotlib inline pd.set_option('display.max_columns', None) import sys positions = pd.read_csv("../../Predictions/Metalearner/df_test_AAPL_predictions.csv") positions.Date = pd.to_datetime(positions.Date, dayfirst=False) # Create extra column of long only returns positions["long_only"] = 1 # Need to read in prices prices = pd.read_csv("../../Raw Data/Price/price_labels.csv") prices["Date"] = pd.to_datetime(prices["Date"], dayfirst=False) # We use adjusted close. This accounts for dividends stock splits (if we're holding and we get dividends, this # will be reflected in the adj close) prices = prices.pivot(columns="Ticker", index="Date", values="Adj Close") returns = np.log(prices).diff() # log returns so that we can add # Index and columns same as positions returns = returns[returns.columns[returns.columns.isin(positions.Ticker.unique())]] returns = returns.loc[positions.Date.unique()] col_order = returns.columns positions = positions[positions.Date < "2019-11-05"] returns = returns.loc[:"2019-11-04"] from sklearn.metrics import roc_auc_score, accuracy_score, f1_score, recall_score, precision_scoreObtain scoring metrics across different predictionsperformance_results = pd.DataFrame() for pred_type in ['Predictions_8k', 'Predictions_10kq', 'Predictions_tweets', 'Predictions_news', 'Predictions_amxn', 'ensemble_pred', 'xgb_pred', 'logreg_pred', 'RF_pred', 'long_only']: preds = positions[~(positions[pred_type] == 0)][pred_type] y_actual = positions[~(positions[pred_type] == 0)].Label performance_results[pred_type] = [ accuracy_score(y_actual, preds), f1_score(y_actual, preds), recall_score(y_actual, preds), precision_score(y_actual, preds) ] plt.figure(figsize=(20,6)) sns.heatmap(performance_results, cmap="Blues", annot=True, cbar_kws = dict(use_gridspec=False,location="bottom")) plt.title("Performance Metrics")Preprocess Data to obtain model returns Pivot positions to get time series of positions of companies by datesec_8k = positions.pivot(columns="Ticker", index="Date", values="Predictions_8k")[col_order] sec_10k10q = positions.pivot(columns="Ticker", index="Date", values="Predictions_10kq")[col_order] tweets = positions.pivot(columns="Ticker", index="Date", values="Predictions_tweets")[col_order] news = positions.pivot(columns="Ticker", index="Date", values="Predictions_news")[col_order] amzn = positions.pivot(columns="Ticker", index="Date", values="Predictions_amxn")[col_order] meta_ensemble = positions.pivot(columns="Ticker", index="Date", values="ensemble_pred")[col_order] meta_xgb = positions.pivot(columns="Ticker", index="Date", values="xgb_pred")[col_order] meta_logreg = positions.pivot(columns="Ticker", index="Date", values="logreg_pred")[col_order] meta_rf = positions.pivot(columns="Ticker", index="Date", values="RF_pred")[col_order] long_only = positions.pivot(columns="Ticker", index="Date", values="long_only")[col_order] perfect_pred = positions.pivot(columns="Ticker", index="Date", values="Label")[col_order] sec_8k.head()Scale all values, absolute sum of all values should = 1epsilon = sys.float_info.epsilon epsilon # for adding small values sec_8k = sec_8k.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) sec_10k10q = sec_10k10q.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) tweets = tweets.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) news = news.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) amzn = amzn.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) meta_ensemble = meta_ensemble.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) meta_xgb = meta_xgb.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) meta_logreg = meta_logreg.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) meta_rf = meta_rf.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) long_only = long_only.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) perfect_pred = perfect_pred.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1) sec_8k.head()Pre-TC Model Returns Calculationsec_8k_pretc = sec_8k * returns # no tc subtracted sec_10k10q_pretc = sec_10k10q * returns tweets_pretc = tweets * returns news_pretc = news * returns amzn_pretc = amzn * returns meta_ensemble_pretc = meta_ensemble * returns meta_xgb_pretc = meta_xgb * returns meta_logreg_pretc = meta_logreg * returns meta_rf_pretc = meta_rf * returns long_only_pretc = long_only * returns perfect_pred_pretc = perfect_pred * returnsSum up row-wise to obtain model returnssec_8k_pretc = sec_8k_pretc.apply(lambda x: sum(x), axis = 1) sec_10k10q_pretc = sec_10k10q_pretc.apply(lambda x: sum(x), axis = 1) tweets_pretc = tweets_pretc.apply(lambda x: sum(x), axis = 1) news_pretc = news_pretc.apply(lambda x: sum(x), axis = 1) amzn_pretc = amzn_pretc.apply(lambda x: sum(x), axis = 1) meta_ensemble_pretc = meta_ensemble_pretc.apply(lambda x: sum(x), axis = 1) meta_xgb_pretc = meta_xgb_pretc.apply(lambda x: sum(x), axis = 1) meta_logreg_pretc = meta_logreg_pretc.apply(lambda x: sum(x), axis = 1) meta_rf_pretc = meta_rf_pretc.apply(lambda x: sum(x), axis = 1) long_only_pretc = long_only_pretc.apply(lambda x: sum(x), axis = 1) perfect_pred_pretc = perfect_pred_pretc.apply(lambda x: sum(x), axis = 1) data = {"sec_8k":sec_8k_pretc, "sec_10k10q":sec_10k10q_pretc, "tweets":tweets_pretc, "news":news_pretc, "amzn": amzn_pretc, "meta_ensemble":meta_ensemble_pretc, "meta_xgb":meta_xgb_pretc, "meta_logreg":meta_logreg_pretc, "meta_rf":meta_rf_pretc, "long_only":long_only_pretc, "perfect_pred":perfect_pred_pretc} model_returns_pretc = pd.DataFrame(data).iloc[1:] model_returns_pretc.head()Post-TC Model Returns Calculation Get transaction costsec_8k_tc = abs(sec_8k.diff()) * 0.05 / 100 sec_10k10q_tc = abs(sec_10k10q.diff()) * 0.05 / 100 tweets_tc = abs(tweets.diff()) * 0.05 / 100 news_tc = abs(news.diff()) * 0.05 / 100 amzn_tc = abs(amzn.diff()) * 0.05 / 100 meta_ensemble_tc = abs(meta_ensemble.diff()) * 0.05 / 100 meta_xgb_tc = abs(meta_xgb.diff()) * 0.05 / 100 meta_logreg_tc = abs(meta_logreg.diff()) * 0.05 / 100 meta_rf_tc = abs(meta_rf.diff()) * 0.05 / 100 long_only_tc = abs(long_only.diff()) * 0.05 / 100 perfect_pred_tc = abs(perfect_pred.diff()) * 0.05 / 100 sec_8k = sec_8k * returns - sec_8k_tc sec_10k10q = sec_10k10q * returns - sec_10k10q_tc tweets = tweets * returns - tweets_tc news = news * returns - news_tc amzn = amzn * returns - amzn_tc meta_ensemble = meta_ensemble * returns - meta_ensemble_tc meta_xgb = meta_xgb * returns - meta_xgb_tc meta_logreg = meta_logreg * returns - meta_logreg_tc meta_rf = meta_rf * returns - meta_rf_tc long_only = long_only * returns - long_only_tc perfect_pred = perfect_pred * returns - perfect_pred_tc sec_8k.head()Lastly, sum up all the returns from different assets to obtain model returnssec_8k = sec_8k.apply(lambda x: sum(x), axis = 1) sec_10k10q = sec_10k10q.apply(lambda x: sum(x), axis = 1) tweets = tweets.apply(lambda x: sum(x), axis = 1) news = news.apply(lambda x: sum(x), axis = 1) amzn = amzn.apply(lambda x: sum(x), axis = 1) meta_ensemble = meta_ensemble.apply(lambda x: sum(x), axis = 1) meta_xgb = meta_xgb.apply(lambda x: sum(x), axis = 1) meta_logreg = meta_logreg.apply(lambda x: sum(x), axis = 1) meta_rf = meta_rf.apply(lambda x: sum(x), axis = 1) long_only = long_only.apply(lambda x: sum(x), axis = 1) perfect_pred = perfect_pred.apply(lambda x: sum(x), axis = 1) sec_8k.head() data = {"sec_8k":sec_8k, "sec_10k10q":sec_10k10q, "tweets":tweets, "news":news, "amzn":amzn, "meta_ensemble":meta_ensemble, "meta_xgb":meta_xgb, "meta_logreg":meta_logreg, "meta_rf":meta_rf, "long_only":long_only, "perfect_pred":perfect_pred} model_returns = pd.DataFrame(data).iloc[1:] model_returns.head()Visualisations with Perfect Predictionsharpe_ratios = round(model_returns_pretc.mean() / model_returns_pretc.std() * 252 ** 0.5, 2) ax = sharpe_ratios.plot(kind="bar", rot=45, figsize=(15,5)) for p in ax.patches: if p.get_height() <= 0: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, 0.01)) else: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, p.get_height() * 1.005)) plt.title("Sharpe Ratios by NLP (Pre TC)") plt.ylabel("Annualised Sharpe Ratio") sharpe_ratios = round(model_returns.mean() / model_returns.std() * 252 ** 0.5, 2) ax = sharpe_ratios.plot(kind="bar", rot=45, figsize=(15,5)) for p in ax.patches: if p.get_height() <= 0: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, 0.01)) else: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, p.get_height() * 1.005)) plt.title("Sharpe Ratios by NLP (Post-TC)") plt.ylabel("Annualised Sharpe Ratio") model_returns_pretc.expanding(2).sum().plot(figsize=(15,5)) plt.title("Model Returns (Pre-TC)") model_returns.expanding(2).sum().plot(figsize=(15,5)) plt.title("Model Returns (Post-TC)") transaction_cost = round((model_returns_pretc.expanding(2).sum().iloc[-1] - model_returns.expanding(2).sum().iloc[-1]) * 100, 2) ax = transaction_cost.plot(kind="bar", figsize=(15, 5), rot=45) for p in ax.patches: if p.get_height() <= 0: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.1, 0.01)) else: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.1, p.get_height() * 1.005)) plt.title("Total Transaction Cost") plt.ylabel("Transaction Cost (%)") plt.xlabel("Prediction Type")Visualisationsplot_cols = ['amzn', 'meta_ensemble', 'meta_xgb', 'meta_logreg', 'meta_rf', 'long_only'] model_returns_pretc[plot_cols].expanding(2).sum().plot(figsize=(15,8)) plt.title("Model Returns (Pre-TC)") model_returns[plot_cols].expanding(2).sum().plot(figsize=(15,8)) plt.title("Model Returns (Post-TC)") model_returns[["amzn", "long_only"]].expanding(2).sum().plot(figsize=(15,8)) plt.title("Model Returns (Post-TC)") plt.axhline(0, color='blue', ls="--") sharpe_ratios = round(model_returns_pretc[plot_cols].mean() / model_returns_pretc[plot_cols].std() * 252 ** 0.5, 2) ax = sharpe_ratios.plot(kind="bar", rot=45, figsize=(15,5)) for p in ax.patches: if p.get_height() <= 0: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, 0.01)) else: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, p.get_height() * 1.005)) plt.title("Sharpe Ratios by NLP (Pre TC)") plt.ylabel("Annualised Sharpe Ratio") plt.xlabel("Prediction Type") sharpe_ratios = round(model_returns[plot_cols].mean() / model_returns[plot_cols].std() * 252 ** 0.5, 2) ax = sharpe_ratios.plot(kind="bar", rot=45, figsize=(15,7)) for p in ax.patches: if p.get_height() <= 0: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.1, 0.01)) else: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.1, p.get_height() * 1.005)) plt.title("Sharpe Ratios by NLP (After Accouting for Transaction Cost)") plt.ylabel("Annualised Sharpe Ratio") plt.xlabel("Prediction Type") plt.axhline(0, color='black') plt.axhline(0.82, color='orange', ls="--") a_returns = model_returns[['amzn', 'long_only']].expanding(2).sum().iloc[-1] / 501 * 252 annual_rtns = round(pd.DataFrame(a_returns) * 100, 2) annual_rtns.columns = ["Annual Returns"] plt.figure(figsize=(1.5,2)) ax = sns.heatmap(annual_rtns, cmap = sns.diverging_palette(10, 150, n=9), annot=True) ax.xaxis.set_ticks_position('top') ax.set_yticklabels(ax.get_yticklabels(), rotation = 0, fontsize = 8) vol = pd.DataFrame(round(model_returns[['amzn', 'long_only']].std() * 252 ** 0.5 * 100, 2)) vol.columns = ["Annual Volatility"] plt.figure(figsize=(1.5,2)) ax = sns.heatmap(vol, cmap = sns.diverging_palette(150, 10, n=9), annot=True) ax.xaxis.set_ticks_position('top') ax.set_yticklabels(ax.get_yticklabels(), rotation = 0, fontsize = 8) cumulative_returns = model_returns[['amzn', 'long_only']].expanding(2).sum() drawdowns = dict() for col in cumulative_returns.columns: maximum_drawdown = [] highest = cumulative_returns[col].values[1] lowest = cumulative_returns[col].values[1] for cum_rtn in cumulative_returns[col].values: if cum_rtn >= highest: maximum_drawdown.append(highest - lowest) highest = cum_rtn lowest = cum_rtn continue if cum_rtn <= lowest: lowest = cum_rtn drawdowns[col] = [max(maximum_drawdown)] drawdowns plt.figure(figsize=(1.5,2)) dd_df = round(pd.DataFrame.from_dict(drawdowns).T * 100, 2) dd_df.columns = ["Max Drawdown"] ax = sns.heatmap(dd_df, cmap = sns.diverging_palette(150, 10, n=9), annot=True) ax.xaxis.set_ticks_position('top') ax.set_yticklabels(ax.get_yticklabels(), rotation = 0, fontsize = 8) no_amzn = pd.read_csv("../../Predictions/Metalearner/appl_no_amzn.csv")[["Date", "meta_ensemble", "meta_xgb", "meta_logreg","meta_rf"]] no_amzn.Date = pd.to_datetime(no_amzn.Date) no_amzn.set_index("Date", inplace=True) no_amzn = pd.DataFrame(round(no_amzn.mean() / no_amzn.std() * 252 ** 0.5, 2)) no_amzn = no_amzn.reset_index() no_amzn["type"] = "No Amzn" no_amzn.columns = ["Model", "Sharpe", "type"] no_amzn sharpe_ratios = round(model_returns[["meta_ensemble","meta_xgb", "meta_logreg", "meta_rf"]].mean() / model_returns[["meta_ensemble","meta_xgb", "meta_logreg", "meta_rf"]].std() * 252 ** 0.5, 2) sharpe_ratios = pd.DataFrame(sharpe_ratios).reset_index() sharpe_ratios["type"] = "Amzn" sharpe_ratios.columns = ["Model", "Sharpe", "type"] sharpe_ratios plt_sharpes = pd.concat([sharpe_ratios, no_amzn]) plt.style.use('ggplot') import matplotlib matplotlib.style.use('classic') sns.set_style("whitegrid") %matplotlib inline plt.figure(figsize=(15, 7)) ax = sns.barplot(x="Model", y="Sharpe", hue="type", data=plt_sharpes, color="blue") for p in ax.patches: if p.get_height() <= 0: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, 0.01)) else: ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, p.get_height() * 1.005)) plt.ylabel("Annualised Sharpe Ratio") plt.xlabel("Prediction Type") plt.ylim([0, 1.65]) plt.title("Sharpe Ratio with and Without Amazon")Now You Code 3: Shopping CartIn this program you will implement an online shopping cart using a Python list of dictionary. The dictionary will contain the product name, price and quantity. The program should loop continually, asking the user to enter - Product name - Product price - Product quantityuntil the user enters a product name of `'checkout'` at which time the loop should break.Each time through the loop you should create a dictionary of product name, product price and product quantity then add the dictionary to a list.After you enter `'checkout'` the program should show: - all the items in the cart, including their quantity and price - and the total amount of the order, a running sum of quantity times priceNOTE: Don't worry about handling bad inputs for this exercise.Example Run:```E-Commerce Shopping CartEnter product name or 'checkout':pencilEnter pencil Price:0.99Enter pencil Quantity:10Enter product name or 'checkout':calculatorEnter calculator Price:9.99Enter calculator Quantity:1Enter product name or 'checkout':checkoutpencil 10 $0.99calculator 1 $9.99TOTAL: $19.89```Start out your program by writing your TODO list of steps you'll need to solve the problem! Step 1: Problem AnalysisInputs:- stuff i want to buy- price of stuff- quantity of stuffOutputs:- list consists of item i buy and corrisponding price Algorithm (Steps in Program):- create list- start loop - create dictionary - input item - input price - quatity - assign item to key - assign price to value - put stuff to dictionary - put dicitonary to list- print lsit# STEP 2: Write code print("E-Commerce Shopping Cart") #set up an empty list ShoppingList = [] while True: ShoppingCart = {} product = input("Enter product name or 'checkout': ") if product == 'checkout': for items in ShoppingCart: #total = total + price*quantity print(item, total) print("%s %.2f %d"%(product, price, quantity)) print(total) break price = float(input("Enter %s Price: "% product)) quantity = int(input("Enter %s quantity: " % product)) ShoppingCart ['product'] = product ShoppingCart ['price and quantity'] = price, quantity ShoppingList.append(ShoppingCart) total = total + price*quantity #debug print(ShoppingList) print(total) for items in ShoppingList: print("")Read in federal level datafiscal = pd.read_sas('../../data/fiscal2018', format = 'sas7bdat', encoding='iso-8859-1')Generate list of districts in the state in the federal datastate_fiscal = fiscal[(fiscal['STABBR'] == abbr) & (fiscal['GSHI'] == '12')] len(state_fiscal) state_fiscal.head()Read in state level datastate_grads = pd.read_excel('../../data/state_data_raw/' + file) state_grads.head()Reset columns.state_grads.columns = state_grads.iloc[2] state_grads = state_grads.iloc[3:] state_gradsSelect and rename columns.state_grads['Total'] = np.full_like(state_grads['Overall Cohort Grad Rate'], '') state_grads = state_grads[['School/School System Name', 'Total', 'Overall Cohort Grad Rate']] state_grads.columns = ['District Name', 'Total', 'Graduation Rate'] state_grads state_grads.info() RangeIndex: 419 entries, 3 to 421 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 District Name 419 non-null object 1 Total 419 non-null object 2 Graduation Rate 419 non-null object dtypes: object(3) memory usage: 9.9+ KBConvert data types.state_grads['Graduation Rate'] = state_grads['Graduation Rate'].astype(str).str.replace('>', '') state_grads['Graduation Rate'] = state_grads['Graduation Rate'].astype(str).str.replace('<', '') state_grads['Graduation Rate'] = state_grads['Graduation Rate'].astype(str).str.replace('~', '') state_grads['Total'] = pd.to_numeric(state_grads['Total']) state_grads['Graduation Rate'] = pd.to_numeric(state_grads['Graduation Rate']) / 100/opt/anaconda3/envs/dsi/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy """Entry point for launching an IPython kernel. /opt/anaconda3/envs/dsi/lib/python3.7/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copyCheck for matches and non-matches in the two listsMatches = [name for name in list(state_grads['District Name']) if name in list(state_fiscal['NAME'])] Matches.sort() len(Matches) A = [name for name in list(state_grads['District Name']) if name not in list(state_fiscal['NAME'])] A.sort() A B = [name for name in list(state_fiscal['NAME']) if name not in list(state_grads['District Name'])] B.sort() BRename the samples I can find matches for.state_grads_rename = { #'Algiers Technology Academy', #'Community School for Apprenticeship Learning Inc.', 'Delta Charter Group' : 'Delta Charter School MST', 'Dr Martin Luther King Charter School for Sci/Tech' : 'Dr. Martin Luther King Charter School for Sci Tech', #'Howard School', 'JCFA Lafayette' : 'JCFA', 'KIPP Renaissance High School' : 'KIPP Renaissance', #'LA Schools for the Deaf and Visually Impaired', #'Lake Area New Tech Early College High School', #'Louisiana Special Education Center', #'The NET2 High School', 'University View Academy Inc. (FRM LA Connections)' : 'University View Academy, Inc. (FRM LA Connections)', #'Voices for International Business & Education' } state_fiscal = state_fiscal.replace(state_grads_rename)Merge federal and state data, keeping only matches between the two.state_grads_merged = pd.merge(state_fiscal, state_grads, how='inner', left_on='NAME', right_on='District Name')Save cleaned data.state_grads_merged.to_csv('../../data/state_data_merged/' + abbr + '.csv', index=False)Creating a new card deck![](https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcRyukGiG5BM_zAFGS0qVvvKH-J3uLQeBQOOaS9OnMs59IaSVyxQ)def new_card_deck(lowest=9): ''' Creates an returns a brand new card deck. The card deck is made of a list which contains all cards. Each card is tuple (rank, suit) where rank goes from the lowest (defaut 9) to the Ace (rank = 14). There is four suits: Pikes (P), Hearts (H), Tiles (T), Clovers (C) ''' deck = [] ranks = list(range(lowest,15)) # suits = list(['P', 'H', 'T', 'C']) for rank in ranks: for suit in suits: deck.append((rank, suit)) return deckShuffling and splitting the deck![](http://www.pokerology.com/wp-content/uploads/card-shuffling1.jpg)from random import shuffle def shuffle_cards(list_of_cards): ''' Shuffle a list of cards. Returns the shuffled list. ''' shuffle(list_of_cards) # returns None and shuffle the list in place return list_of_cards def split_cards(list_of_cards): ''' Split a list of cards into two decks A and B. Returns A and B ''' A, B = [], [] if len(list_of_cards) % 2 == 0: # splitting only works for even number A = list_of_cards[::2] B = list_of_cards[1::2] return A, BWeight of a deckdef weight(list_of_cards, lowest_rank = 9): ''' Calculates the strength of a deck. The strength is defined by the In case of 24 cards : ( 9, 10, V, D, R, A) --> (-3, -2,-1, 1, 2, 3) In case of 56 cards : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, V, D, R, A) --> (-7,-6,-5,-4,-3,-2,-1, 1, 2, 3, 4, 5, 6, 7) NB: The strength of a complete deck is 0. ''' if lowest_rank == 1: table = {1:-7, 2:-6, 3:-5, 4:-4, 5:-3, 6:-2, 7:-1, 8:+1, 9:+2, 10:+3, 11:4, 12:5, 13:6, 14:7} elif lowest_rank == 9: table = {9:-3, 10:-2, 11:-1, 12:1, 13:2, 14:3} else: raise('Bad deck size') weight = 0 for card in list_of_cards: rank = card[0] weight += table[rank] return weightPlaying War (_bataille_)![](https://upload.wikimedia.org/wikipedia/commons/thumb/1/13/Wojna_gra_karciana.jpg/220px-Wojna_gra_karciana.jpg)def play_bataille(A,B): ''' Joue à la bataille et retourne l'évolution du nombre de cartes par joueur au cours d'une partie. ''' manche_A, manche_B = [], [] taille_A, taille_B = [], [] weight_A, weight_B = [], [] nb_war = 0 winner = '' lowest_rank = min(A+B)[0] continuer = True while continuer: taille_A.append(len(A)) taille_B.append(len(B)) weight_A.append(weight(A, lowest_rank)) weight_B.append(weight(B, lowest_rank)) if len(A) == 0: # B wins the game #print('### B gagne la partie ###') continuer = False winner = 'B' elif len(B) == 0: # A wins the game #print('### A gagne la partie ###') continuer = False winner = 'A' else: manche_A.append(A.pop()) manche_B.append(B.pop()) if manche_A[-1][0] == manche_B[-1][0]: #print('Bataille !!') nb_war += 1 pass elif manche_A[-1][0] > manche_B[-1][0]: #print('A remporte la manche') A = manche_A + manche_B + A manche_A.clear() manche_B.clear() else: #print('B remporte la manche') B = manche_B + manche_A + B manche_A.clear() manche_B.clear() return taille_A, taille_B, weight_A, weight_B, nb_war, winner deck = shuffle_cards(new_card_deck()) A, B = split_cards(deck) nb_A, nb_B, wgt_A, wgt_B, nb_war, winner = play_bataille(A,B) print(winner) plot(nb_A, lw=2) plot(nb_B, lw=2) xlabel('Number of turns', fontsize=14) grid(True) legend(('A','B')) plot(wgt_A, lw=2) plot(wgt_B, lw=2) grid(True) xlabel('Number of turns') ylabel('Deck weight') # fait un nombre N de partie et affiche les statistiques def play_N_batailles(N, lowest_rank=8): nb_manches = [] winners = [] initial_weights = [] for idx in range(N): A, B = split_cards(shuffle_cards(new_card_deck(lowest=lowest_rank))) nb_A, nb_B, wgt_A, wgt_B, nb_war, winner = play_bataille(A,B) nb_manches.append(len(nb_A)) winners.append(winner) initial_weights.append((wgt_A[0], wgt_B[0])) return nb_manches, winners, initial_weights N = 10000 nb_manches1, winners1, initial_weights1 = play_N_batailles(N, lowest_rank=9) nb_manches2, winners2, initial_weights2 = play_N_batailles(N, lowest_rank=1) ax2=hist(nb_manches2, bins=N/100, color='r', alpha=0.7, range=(0,2000)) ax1=hist(nb_manches1, bins=N/100, color='b', alpha=0.7) xlabel('Nombre de manches de la partie', fontsize=14) ylabel('Occurences', fontsize=14) xlim(1, 1200) legend(('56 cartes','24 cartes'))C:\Users\JH218595\Documents\Anaconda3\lib\site-packages\numpy\lib\function_base.py:564: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future n = np.zeros(bins, ntype) C:\Users\JH218595\Documents\Anaconda3\lib\site-packages\numpy\lib\function_base.py:611: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)Probability fittingimport scipy.stats as stats # returns shape, loc, scale parameters params1 = stats.lognorm.fit(nb_manches1, scale=N/10) print(params1) params2 = stats.lognorm.fit(nb_manches2, scale=N/10) print(params2) x = arange(1,N) pdf1 = stats.lognorm.pdf(x, *params1[:-2], loc=params1[-2], scale=params1[-1]) pdf2 = stats.lognorm.pdf(x, *params2[:-2], loc=params2[-2], scale=params2[-1]) plot(x, pdf1, lw=3, color='b') plot(x, pdf2, lw=3, color='r') ax2=hist(nb_manches2, bins=N/100, color='r', alpha=0.3, range=(0,2000), normed=True) ax1=hist(nb_manches1, bins=N/100, color='b', alpha=0.3, normed=True) xlim(0,1000) grid(True) #ylim(0,300) # not normalized histogram and fit ax2=hist(nb_manches2, bins=N/100, color='r', alpha=0.3) ax1=hist(nb_manches1, bins=N/100, color='b', alpha=0.3) # in order to scale the pdf, must multiply by data_length*bins_width plot(x, len(nb_manches1)*(ax1[1][2]-ax1[1][1])*pdf1, lw=3, color='b') plot(x, len(nb_manches2)*(ax2[1][2]-ax2[1][1])*pdf2, lw=3, color='r') grid(True) xlim(1,1000)C:\Users\JH218595\Documents\Anaconda3\lib\site-packages\numpy\lib\function_base.py:564: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future n = np.zeros(bins, ntype) C:\Users\JH218595\Documents\Anaconda3\lib\site-packages\numpy\lib\function_base.py:611: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)Gradient Boosting for Prediction and Inference Lesson 2In this notebook, we will walk through the details of how Gradient Boosting Works, and demonstrate their use via the scikit-learn `GradientBoostingClassifier` and `GradientBoostingRegressor`. We will also begin to discuss how to approach setting parameters and using early stopping.import pandas as pd import numpy as np import matplotlib.pyplot as pltGradient Boosting: History and Terminology- Generally, when people refer to "Gradient Boosting" what they really mean is "Gradient Boosted Decision Trees"- "Boosting" in general, originally referred to a process where you fit a model, and then gave more weight to the examples in the training data that it got wrong, refit the model, and then added the new model to the previous one. - AdaBoost (Freund and Schapire) was an early example of this, and using trees as the base learner was shown to be quite effective- Later, it was shown that AdaBoost was equivalent to fitting the base learner to the *gradient* of a particular loss function (exponential). - The idea was then generalized to any particular loss function, and referred to as *gradient boosting*. Additionally, other loss functions were shown to be better in practice than the exponential loss.- Though the idea can apply to any base learner, decision trees have been most effective. So, what we are learning about today is *gradient boosting with decision trees as the base learner* or *gradient boosted decision trees* (but usually just called *gradient boosting*) Gradient boosting: rough pass- Consider a regression problem. Suppose you have a model $M_1$ that already performs reasonably well. But you would like to improve it if possible.- You use your model to make predictions on the training data and measure the *residuals*: i.e. the difference between the true answer and the prediction.- Next, you build a decision tree to try and predict the *residuals* given your predictors. For example you want to find a tree that can distinguish between the cases where your previous model underpredicted vs overpredicted (and also distinguish between a small under/overprediction and a large under/overprediction) - In theory, if your new tree does a great job at predicting the residuals, you can add the result of the tree to the previous model, and voila, you will have a better model.- In reality we need to be concerned with overfitting: Suppose the residuals are truly random noise. With enough predictors, you may find an incidental pattern that works on your training data, but is worthless on new cases.- Therefore, we do a few things to try to prevent this from happening (i.e. to help *regularize*)- Most importantly, rather than directly add the new model to the old model, we first multiply it by a small fraction (say .1 or even .01 or .001). We will do this process many times, so it doesn't (generally) hurt to take small steps.- It also doesn't matter whether we start with a "pretty good" model. We can start with a "nothing" model (predict 0 for everything) and still iterate through this process. Wait! This makes sense, but I still don't get a few things...- Where does the loss function come in?- Where does the gradient come in?- What if I am doing a classification problem? Good questions!- What I just described was specific to regression.- In fact, it was specific to regression with a mean squared error loss function- When using mean squared error, the derivative of the loss function is the residual (distance from the true answer) (times a constant of 2)- $\frac{\partial}{\partial y} (y -y_t)^2 = 2(y-y_t)$- So fitting a model to the residuals is *equivalent* to fitting a model to the *gradient of the loss function*- So the notion of predicting small/large under/overpredictions can be thought of as predicting where my loss is increasing/decreasing by a small/large amount.- The latter notion will even generalize to cases where the loss function has a weirder form. Reiteration of Gradient Boosting- In general, (for classification, regression with any loss function) we take the current model, use it to make predictions on the training data, and then find the *derivative of the loss function* for each point *at its current predicted value*.- For most loss functions, the gradient is "steeper" the further you are from the right answer, so the idea of predicting the "degree" of over/underprediction is still the right one. Example of a regression loss functiontvec1 = np.linspace(-20,20,201) plt.figure(figsize=(8,4)) plt.subplot(1,2,1) plt.plot(tvec1, (tvec1-2)**2) plt.title('Loss fn when true value = 2') plt.subplot(1,2,2) plt.plot(tvec1, (tvec1-(-7))**2) plt.title('Loss fn when true value = -7');Example of classification (cross-entropy) loss functiontvec = np.linspace(.01,.99,99) plt.figure(figsize=(8,4)) plt.subplot(1,2,1) plt.plot(tvec, np.log(1/tvec)) plt.title('Loss fn when true value = 1') plt.subplot(1,2,2) plt.plot(tvec, np.log(1/(1-tvec))) plt.title('Loss fn when true value = 0');Detailed steps- Let $M_0$ be the current model- Let $y_0$ be the current model's predictions on the training set - These will be point estimates for regression - These will be predicted probabilities for classification- Let $y_t$ be the true answers (numbers for regression, 0/1 for classification)- Let $L(y_{true}, y_{pred})$ be the loss function.- Let $L^{\prime}_{y_{true}}(y_{pred}) = \frac{\partial}{\partial y_{pred}}(L(y_{true}, y_{pred}))$- Let $z = L^{\prime}_{y_t}(y_1)$ -- i.e. the derivative of the loss function (with respect to the prediction) evaluated at each pair of (true, predicted) values.- Finally, fit a decision tree that tries to predict $z$ using your predictors $X$. - Call the resulting tree $T_1$.- Let $M_1 = M_0 + \epsilon T_1$ (epsilon is the "learning rate")- Repeat with $M_1$ as the current model to build $T_2$ (etc. etc.) Alternate interpretation: - We are doing "gradient descent" by building trees Let's play with some dataWe'll return to the games from the NBA, predicting the winner from team statistics (not including points).from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from sklearn.metrics import log_loss, roc_auc_score, accuracy_score from sklearn.model_selection import train_test_split df_nba = pd.read_csv('data/games.csv') df_nba.dropna(inplace=True) df_nba.shape df_nba.sample(5) df_nba.columns feat_1 = ['FG_PCT_home','FT_PCT_home', 'FG_PCT_away', 'FT_PCT_away'] feat_2 = ['FG_PCT_home','FT_PCT_home', 'FG3_PCT_home', 'AST_home', 'REB_home', 'FG_PCT_away', 'FT_PCT_away', 'FG3_PCT_away', 'AST_away', 'REB_away'] X = df_nba.iloc[:,:-1] # everything except winner y = df_nba.HOME_TEAM_WINS X_train_full, X_test, y_train_full, y_test = train_test_split(X,y,test_size = 2000, random_state=0) X_train_full.columns n_train_pts = 2000 X_train_1 = X_train_full.iloc[:n_train_pts].loc[:, feat_1] y_train_1 = y_train_full.iloc[:n_train_pts] X_test_1 = X_test.loc[:, feat_1] ## For reference, lets redo the random forest rf1 = RandomForestClassifier(n_estimators=1000) rf1 rf1.fit(X_train_1, y_train_1) prob_preds_rf1 = rf1.predict_proba(X_test_1)[:,1] log_loss(y_test, prob_preds_rf1), roc_auc_score(y_test, prob_preds_rf1) hard_preds_rf1 = (prob_preds_rf1 > .5).astype(int) accuracy_score(y_test, hard_preds_rf1) ## now let's try a gb model gb1 = GradientBoostingClassifier(max_depth=2, n_estimators=100, learning_rate=.1) gb1 gb1.fit(X_train_1, y_train_1) prob_preds_gb1 = gb1.predict_proba(X_test_1)[:,1] log_loss(y_test, prob_preds_gb1), roc_auc_score(y_test, prob_preds_gb1) hard_preds_gb1 = (prob_preds_gb1 > .5).astype(int) accuracy_score(y_test, hard_preds_gb1)Now let's repeat with the bigger feature setX_train_2 = X_train_full.iloc[:n_train_pts].loc[:, feat_2] y_train_2 = y_train_full.iloc[:n_train_pts] X_test_2 = X_test.loc[:, feat_2] rf2 = RandomForestClassifier(n_estimators=1000) rf2.fit(X_train_2, y_train_2) prob_preds_rf2 = rf2.predict_proba(X_test_2)[:,1] log_loss(y_test, prob_preds_rf2), roc_auc_score(y_test, prob_preds_rf2) hard_preds_rf2 = (prob_preds_rf2>.5).astype(int) accuracy_score(y_test, hard_preds_rf2) gb2 = GradientBoostingClassifier(max_depth=2, n_estimators=100, learning_rate=.1) gb2.fit(X_train_2, y_train_2) prob_preds_gb2 = gb2.predict_proba(X_test_2)[:,1] log_loss(y_test, prob_preds_gb2), roc_auc_score(y_test, prob_preds_gb2) hard_preds_gb2 = (prob_preds_gb2>.5).astype(int) accuracy_score(y_test, hard_preds_gb2)Gradient Boosting vs Random Forest- Typically, Gradient Boosting can outperform Random Forests by a small, but not insignificant amount.- However, Gradient Boosting requires much more "parameter tuning" to get the best performance- For Random Forest, can usually use 1K or 2K trees and do well. The only "major" parameter is the `max_features` and the default is usually reasonable.- For Gradient Boosting, parameter tuning is considerably more complicated.- Random Forests tend to be less "well-calibrated" than GB. That means that the log_loss of random forests might be bad even when the AUROC or Accuracy are still reasonably good. Major parameters to tune for Gradient Boosting- `max_depth`: How deep to build the trees. This is a very important parameter and performance may change dramatically for different values. Larger values are more likely to overfit, smaller values more likely to underfit. I've worked on problems where max_depth was 10 or even higher for best performance.- `learning_rate`, `n_estimators`: These parameters are also very important and interact highly with one another (and with `max_depth`). Typically, the smaller your "step_size" (learning_rate), the more steps you will need to take to reach maximum performance. However, unlike random forests, if you continue to build trees in boosting, you will start overfitting, and performance (measured on the test set) will get worse.The best way to deal with `learning_rate` and `n_estimators` is to use a "validation set". That is, take some data (that is not in the training set) and evaluate the performance of the model periodically as it trains. When performance starts getting worse on the validation set, then stop.In theory, you should not use the test set as your validation set, because you will be overstating your performance a bit. (Since you will be choosing the moment of peak performance on that test set to stop). In practice, this is usually not such a big deal. It is also fine just to set aside some data out of the training set. The validation set does not need to be huge, and it is not crucial to stop at the exact perfect moment -- you just want to be in the right neighborhood. Sklearn early stoppingin version 0.20 (Sep 2018), sklearn added support for early stopping via `validation_fraction` and `n_iter_no_change`. Unfortunately, it does not output the loss values on the validation set, so it is hard to truly see it in action. Also, it does not support passing a separate validation set. We will revisit early stopping when we explore the other boosting packages.- `validation_fraction`: fraction of data to use for validation- `n_iter_no_change`: Compare current iteration to the value $x$ trees ago to decide whether to stop or not# Let's run with early stopping # We can make the number of trees large and just rely on early stopping # We can also make the learning rate small to be on the safe side gb3 = GradientBoostingClassifier(max_depth=2, n_estimators=5000, learning_rate=.01, validation_fraction=.1, n_iter_no_change=20, verbose=2) gb3.fit(X_train_2, y_train_2) prob_preds_gb3 = gb3.predict_proba(X_test_2)[:,1] log_loss(y_test, prob_preds_gb3), roc_auc_score(y_test, prob_preds_gb3)NOTE: When comparing boosting models, I put more credence in the log_loss values then the ROC or Accuracy scoreshard_preds_gb3 = (prob_preds_gb3>.5).astype(int) accuracy_score(y_test, hard_preds_gb3)Caution !!! I heve not checked bugs yet. Two-leg ladder spin-$1/2$ Heisenberg modelThis program uses the single-site unit cell VUMPS algorithm [1].This program corresponds to Fig. 5 in Ref. [3]. Reference[1] Phys. Rev. B 97, 045145 (2018)https://journals.aps.org/prb/abstract/10.1103/PhysRevB.97.045145https://arxiv.org/abs/1701.07035[2] Phys. Rev. B 86, 125441 (2012)https://journals.aps.org/prb/abstract/10.1103/PhysRevB.86.125441https://arxiv.org/abs/1204.0704[3] Phys. Rev. B 85, 075125 (2012)https://journals.aps.org/prb/abstract/10.1103/PhysRevB.85.075125https://arxiv.org/abs/0909.4059import numpy as np import scipy as sp from scipy.sparse.linalg import LinearOperator import sys sys.path.append('Library') import MathFunctions as MF import MPSOperators as MO import SingleVUMPS as SV dtype = np.dtype("float") #dtype = np.dtype("complex") D = 2 # physical bond D = 2 corresponds to spin-1/2 M = 30 # virtual bond max_iter = 500 # the number of the VUMPS steps D2 = D * D Sx,Sy,Sz,Su,Sd = MF.Spin(D) Sz2 = np.einsum("ab,bc -> ac",Sz,Sz) Jleg = 1.0; Jrung = 1.0; h_xxx = ( np.kron(Sx,Sx) + np.kron(Sy,Sy) + np.kron(Sz,Sz) ).real I = np.eye(D*D) h = 0.5 * Jrung * np.kron(h_xxx,I).reshape(D2,D2,D2,D2) \ + 0.5 * Jrung * np.kron(I,h_xxx).reshape(D2,D2,D2,D2) \ + Jleg * np.kron(h_xxx,I).reshape(D,D,D,D,D,D,D,D).transpose(0,2,1,3,4,6,5,7).reshape(D**4,D**4).reshape(D2,D2,D2,D2) \ + Jleg * np.kron(I,h_xxx).reshape(D,D,D,D,D,D,D,D).transpose(0,2,1,3,4,6,5,7).reshape(D**4,D**4).reshape(D2,D2,D2,D2) # initial state np.random.seed(3162277) A = np.random.rand(M,D2,M) HR = np.random.rand(M,M); HL = np.random.rand(M,M) tol = 1e-10 AC,C,AR,AL = MO.MixedCanonicalForm(A,dtype) for i in range (max_iter): HR,er = SV.Calc_HR(AR,HR,h,dtype,tol=tol) HL,el = SV.Calc_HL(AL,HL,h,dtype,tol=tol) AC = SV.Next_AC(AC,AR,AL,HR,HL,h,dtype,tol=tol) C = SV.Next_C(C,AR,AL,HR,HL,h,dtype,tol=tol) #AR = SV.Next_AR_SVD(AC,C) AR = SV.Next_AR_PolarDecomposition(AC,C) #AL = SV.Next_AL_SVD(AC,C) AL = SV.Next_AL_PolarDecomposition(AC,C) B = SV.Calc_B(AC,C,AR,AL) tol = B / 100 if B < 1e-12: print ("Converged!") break E = 0.5 * ( er + el ).real print ("step {}, E {}, |B| {}".format(i,E/2,B)) #Tensors = np.empty(4, dtype=object) #Tensors[0] = AC; Tensors[1] = C; Tensors[2] = AR; Tensors[3] = AL #np.save("Data/Tensors_Heisenberg_{}_{}_{}_{}_{}.npy".format(D,M,JJ,BB,DD),Tensors)step 0, E 0.3747957383451404, |B| 0.6901806392842745 step 1, E -0.4612725447683895, |B| 0.06960191599807383 step 2, E -0.577781576589631, |B| 0.0009100835238643438 step 3, E -0.5780400087111095, |B| 0.000246792654496597 step 4, E -0.5780420727775445, |B| 0.0001562412785329729 step 5, E -0.5780421957134088, |B| 0.00016645682931763095 step 6, E -0.5780421952743988, |B| 0.00018238592750027382 step 7, E -0.5780421870690309, |B| 0.00018652472074347367 step 8, E -0.5780421977346266, |B| 0.00018773207064901848 step 9, E -0.5780422158046312, |B| 0.00017708586219277044 step 10, E -0.5780422368773837, |B| 0.00016986518790324093 step 11, E -0.5780422548967421, |B| 0.00016569172879399196 step 12, E -0.5780422695940132, |B| 0.0001646029161318995 step 13, E -0.5780422811436825, |B| 0.0001668291579029103 step 14, E -0.578042289845023, |B| 0.00017163751353287062 step 15, E -0.5780422962500299, |B| 0.00016765130351065126 step 16, E -0.5780423329285013, |B| 0.0001534460008433905 step 17, E -0.5780423571[...]ExperimentsIn this notebook we will cover some advanced configurations for Experiment. 1. **Experiment Creation**: Creating an Experiment from scratch.2. **Experiment Running**: Running an experiment and evaluates its learners on its environments.5. **Experiment Multiprocessing**: Controlling execution to manage resources and optimize runtime.6. **Experiment Restoration**: Saving results to file as an Experiment evaluates so that work isn't loss to unexpected failures. Experiment CreationBefore you do anything with an **Experiment** you have to create one. To create an **Experiment** we need a list of Environments and Learners:from coba.environments import Environments from coba.learners import RandomLearner, VowpalEpsilonLearner from coba.experiments import Experiment #this creates our experiment. There are more options that can be passed here. These are discussed further down. experiment = Experiment(Environments.from_linear_synthetic(500),[VowpalEpsilonLearner(),RandomLearner()])Experiment EvaluationOnce an **Experiment** has been created the next step is to use it to evaluate it. This looks like this:experiment.run().plot_learners()2022-06-08 13:50:28 -- Processing chunk... 2022-06-08 13:50:28 -- * Recording Learner 0 parameters... (0.0 seconds) (completed) 2022-06-08 13:50:28 -- * Recording Learner 1 parameters... (0.0 seconds) (completed) 2022-06-08 13:50:28 -- * Loading LinearSynth(A=5,c=5,a=5,R=['a', 'xa'],seed=1)... (0.11 seconds) (completed) 2022-06-08 13:50:28 -- * Creating Environment 0 from Loaded Source... (0.0 seconds) (completed) 2022-06-08 13:50:28 -- * Recording Environment 0 statistics... (0.0 seconds) (completed) 2022-06-08 13:50:28 -- * Evaluating Learner 0 on Environment 0... (0.15 seconds) (completed) 2022-06-08 13:50:28 -- * Evaluating Learner 1 on Environment 0... (0.01 seconds) (completed)Experiment MultiprocessingThe **Experiment** class comes with highly configurable multiprocessing functionality. There are three parameters which control how **Experiment** orchestrates tasks across process:1. `processes` Determines how many processes to use when executing an experiment.2. `maxtasksperchild` Determines how many "tasks" processes should complete before being replaced with new processes3. `chunk_by` Determines how tasks (i.e., environment/learner pairs) are chunked when passed to processeses for evaluationThese parameters can be set in one of three way: 1. A coba configuration file containing the following (more information about that in the configurations notebook)```json{ "experiment": {"processes":1, "maxtasksperchild":0, "chunk_by":"source" } }```2. Programatically using the **CobaContext** interface```python from coba.contexts import CobaContext CobaContext.experiment.processes=1 CobaContext.experiment.maxtasksperchild= 0 CobaContext.experiment.chunk_by = 'source'```3. Directly on an **Experiment**```python Experiment(environments,learners).config(processes=1,chunk_by='source',maxtasksperchild=1).evaluate()```By default **Experiment** multiprocessing settings are `processes=1, maxtasksperchild=0, chunk_by='source'`, which means only one process will be used. Multiple processes will only be used if `processes>1` or `maxtasksperchild>0`. The `chunk_by` parameter is only relevant when multiple processes are used. Otherwise it is ignored. Below are a few examples.%%time from coba.environments import Environments from coba.learners import RandomLearner, VowpalEpsilonLearner from coba.experiments import Experiment environments = Environments.from_linear_synthetic(2000).shuffle([1,2,3,4,5]) learners = [VowpalEpsilonLearner(),RandomLearner()] #single processing, chunk_by is ignored (i.e., everything is in one chunk) Experiment(environments, learners).run().plot_learners(err='se')2022-06-08 13:50:38 -- Processing chunk... 2022-06-08 13:50:38 -- * Recording Learner 0 parameters... (0.0 seconds) (completed) 2022-06-08 13:50:38 -- * Recording Learner 1 parameters... (0.0 seconds) (completed) 2022-06-08 13:50:38 -- * Loading LinearSynth(A=5,c=5,a=5,R=['a', 'xa'],seed=1)... (0.43 seconds) (completed) 2022-06-08 13:50:38 -- * Creating Environment 0 from Loaded Source... (0.0 seconds) (completed) 2022-06-08 13:50:38 -- * Recording Environment 0 statistics... (0.0 seconds) (completed) 2022-06-08 13:50:39 -- * Evaluating Learner 0 on Environment 0... (0.56 seconds) (completed) 2022-06-08 13:50:39 -- * Evaluating Learner 1 on Environment 0... (0.03 seconds) (completed) 2022-06-08 13:50:39 -- * Creating Environment 1 from Loaded Source... (0.0 seconds) (completed) 2022-06-08 13:50:39 -- * Recording Environment 1 statistics... (0.0 seconds) (completed) 2022-06-08 13:50:39 -- * Evaluating Learner 0 on Environment 1... (0.54 seconds) (completed) 2022-06-0[...]And below is an example of a Experiment that processes source chunks in parallel. Notice that when multiple processesors are executing the logging output now contains which processor sent the message in addition to when the message was sent. Also notice the difference in execution time between the single process and multiprocess runs. This example still only has one evaluation chunk because the three shuffle environments all come from the same "source".%%time from coba.environments import Environments from coba.learners import RandomLearner, VowpalEpsilonLearner from coba.experiments import Experiment environments = Environments.from_linear_synthetic(2000).shuffle([1,2,3,4,5]) learners = [VowpalEpsilonLearner(),RandomLearner()] #processes=2, chunk_by='source', #There are 10 tasks in this experiment (2 learner tasks, 3 environment tasks, 6 evaluation tasks) #There is only 1 source, Environments.from_linear_synthetic(2000), which we filter to 3 environments #This means there will be one chunk containing the 3 environment and 6 evaluation tasks Experiment(environments, learners).config(processes=2, chunk_by='source').run().plot_learners(err='se')2022-06-08 13:50:54 -- pid-8016 -- Processing chunk... 2022-06-08 13:50:54 -- pid-8016 -- * Recording Learner 0 parameters... (0.0 seconds) (completed) 2022-06-08 13:50:54 -- pid-8016 -- Processing chunk... 2022-06-08 13:50:54 -- pid-8016 -- * Recording Learner 1 parameters... (0.0 seconds) (completed) 2022-06-08 13:50:54 -- pid-8016 -- Processing chunk... 2022-06-08 13:50:55 -- pid-8016 -- * Loading LinearSynth(A=5,c=5,a=5,R=['a', 'xa'],seed=1)... (0.45 seconds) (completed) 2022-06-08 13:50:55 -- pid-8016 -- * Creating Environment 0 from Loaded Source... (0.0 seconds) (completed) 2022-06-08 13:50:55 -- pid-8016 -- * Recording Environment 0 statistics... (0.0 seconds) (completed) 2022-06-08 13:50:55 -- pid-8016 -- * Evaluating Learner 0 on Environment 0... (0.66 seconds) (completed) 2022-06-08 13:50:55 -- pid-8016 -- * Evaluating Learner 1 on Environment 0... (0.03 seconds) (completed) 2022-06-08 13:50:55 -- pid-8016 -- * Creating Environment 1 fro[...]Finally, if one wants to evaluate an Experiment in full parallel `chunk_by` can be set to `'task'`. This means each chunk will be a single task. This can drastically speed up evaluations when Learners are the processing bottleneck. Unfortunately this also means we'll end up doing some duplicate work. Notice that our environment has to be reloaded every time a task is processed.%%time from coba.environments import Environments from coba.learners import RandomLearner, VowpalEpsilonLearner from coba.experiments import Experiment environments = Environments.from_linear_synthetic(2000).shuffle([1,2,3,4,5]) learners = [VowpalEpsilonLearner(),RandomLearner()] #processes=2, chunk_by='task', #There are 10 separate tasks (2 learner tasks, 3 environment tasks, 6 evaluation tasks) #Each task will be chunked into its own process making 11 total chunks Experiment(environments, learners).config(processes=2, chunk_by='task').run().plot_learners(err='se')2022-06-08 13:51:06 -- pid-19376 -- Processing chunk... 2022-06-08 13:51:06 -- pid-19376 -- * Recording Learner 0 parameters... (0.0 seconds) (completed) 2022-06-08 13:51:06 -- pid-19376 -- Processing chunk... 2022-06-08 13:51:06 -- pid-19376 -- * Recording Learner 1 parameters... (0.0 seconds) (completed) 2022-06-08 13:51:06 -- pid-19376 -- Processing chunk... 2022-06-08 13:51:06 -- pid-14576 -- Processing chunk... 2022-06-08 13:51:07 -- pid-19376 -- * Loading LinearSynth(A=5,c=5,a=5,R=['a', 'xa'],seed=1)... (0.44 seconds) (completed) 2022-06-08 13:51:07 -- pid-19376 -- * Creating Environment 0 from Loaded Source... (0.0 seconds) (completed) 2022-06-08 13:51:07 -- pid-19376 -- * Recording Environment 0 statistics... (0.0 seconds) (completed) 2022-06-08 13:51:07 -- pid-19376 -- Processing chunk... 2022-06-08 13:51:07 -- pid-14576 -- * Loading LinearSynth(A=5,c=5,a=5,R=['a', 'xa'],seed=1)... (0.44 seconds) (completed) 2022-06-08 13:51:07 -- pid-14576 -- * Creat[...]Experiment RestorationExperiments come with one final bit of functionality: if provided a file path Experiments will write their results to file as they run. This can be handy in the case of something interrupting an **Experiment** during evaluation (e.g., the internet going down or a computer being forced to restart) because the **Experiment** will be able to resume at the point of interuption. The **Experiment** restoration file is also useful for later analysis since a **Result** object can be created from it. Here's what it looks like to create a restoration/result file. Notice that if you run this code block twice the second time there will be no output due to restoration. (If you are runing this code on mybinder this may or may not work as file write permissions can be blocked)from coba.environments import Environments from coba.learners import RandomLearner, VowpalEpsilonLearner from coba.experiments import Experiment environments = Environments.from_linear_synthetic(2000).shuffle([1,2,3,4,5]) learners = [VowpalEpsilonLearner(),RandomLearner()] Experiment(environments, learners).run('example.log').plot_learners()2022-06-08 13:51:23 -- Processing chunk... 2022-06-08 13:51:23 -- * Recording Learner 0 parameters... (0.0 seconds) (completed) 2022-06-08 13:51:23 -- * Recording Learner 1 parameters... (0.0 seconds) (completed) 2022-06-08 13:51:24 -- * Loading LinearSynth(A=5,c=5,a=5,R=['a', 'xa'],seed=1)... (0.43 seconds) (completed) 2022-06-08 13:51:24 -- * Creating Environment 0 from Loaded Source... (0.0 seconds) (completed) 2022-06-08 13:51:24 -- * Recording Environment 0 statistics... (0.0 seconds) (completed) 2022-06-08 13:51:24 -- * Evaluating Learner 0 on Environment 0... (0.57 seconds) (completed) 2022-06-08 13:51:25 -- * Evaluating Learner 1 on Environment 0... (0.05 seconds) (completed) 2022-06-08 13:51:25 -- * Creating Environment 1 from Loaded Source... (0.0 seconds) (completed) 2022-06-08 13:51:25 -- * Recording Environment 1 statistics... (0.01 seconds) (completed) 2022-06-08 13:51:25 -- * Evaluating Learner 0 on Environment 1... (0.56 seconds) (completed) 2022-06-[...]Analyzing the UncertaintyForest Class by Reproducing Figure 2This set of three tutorials (`uncertaintyforest_running_example.ipynb`,`uncertaintyforest_fig1.ipynb`, and `uncertaintyforest_fig2.ipynb`) will explain the UncertaintyForest class. After following these tutorials, you should have the ability to run UncertaintyForest on your own machine and generate Figures 1 and 2 from [this paper](https://arxiv.org/pdf/1907.00325.pdf). If you haven't seen it already, take a look at other tutorials to setup and install the ProgLearn package: `installation_guide.ipynb`.*Goal: Run the UncertaintyForest class to produce the results from Figure 2**Note: Figure 2 refers to Figure 2 from [this paper](https://arxiv.org/pdf/1907.00325.pdf)* Import Required Packagesimport numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.calibration import CalibratedClassifierCV from proglearn.forest import UncertaintyForest from functions.unc_forest_tutorials_functions import generate_data_fig2, cart_estimate, true_cond_entropy, format_func, estimate_ce, get_cond_entropy_vs_n, get_cond_entropy_vs_mu, plot_cond_entropy_by_n, plot_cond_entropy_by_mu, plot_fig2Using TensorFlow backend.Specify Parameters# The following are two sets of parameters. # The first are those that were actually used to produce figure 2. # These take a long time to actually run since there are up to 6000 data points. # Below those, you'll find some testing parameters so that you can see the results more quickly. # Here are the "Real Parameters" # mus = [i * 0.5 for i in range(1, 11)] # effect_size = 1 # d1 = 1 # d2 = 20 # n1 = 3000 # n2 = 6000 # num_trials = 20 # num_plotted_trials = 10 # sample_sizes_d1 = range(300, 1201, 90) # sample_sizes_d2 = range(500, 3001, 250) # Here are the "Test Parameters" mus = [i * 0.5 for i in range(1, 3)] # range of means of the data (x-axis in right column) effect_size = 1 # mu for left column d1 = 1 # data dimensions = 1 d2 = 3 # data dimensions = 1, noise dimensions = 19 n1 = 100 # number of data points for top row, right column (d1) n2 = 110 # number of data points for bottom row, right column (d2) num_trials = 2 # number of trials to run num_plotted_trials = 2 # the number of "fainter" lines to be displayed on the figure sample_sizes_d1 = range(100, 120, 10) # range of data points for top row, left column (d1) sample_sizes_d2 = range(100, 130, 10) # range of data points for bottom row, left column (d1)Specify LearnersNow, we'll specify which learners we'll compare (by label). Figure 2 uses three different learners, which are further specified in the function `estimate_ce`, which returns estimates of conditional entropy for a given dataset (X, y) and type of learner.# Algorithms used to produce Figure 2 algos = [ { 'label': 'CART', 'title': 'CART Forest', 'color': "#1b9e77", }, { 'label': 'IRF', 'title': 'Isotonic Reg. Forest', 'color': "#fdae61", }, { 'label': 'UF', 'title': 'Uncertainty Forest', 'color': "#F41711", }, ]Plot Figure 2Finally, we'll run the code to obtain and plot the estimated conditional entropy vs. means and sample sizes (4 subplots).plot_fig2(num_plotted_trials, d1, d2, n1, n2, effect_size, algos, num_trials, sample_sizes_d1, sample_sizes_d2, mus)/home/ubuntu/EvaPL/ProgLearn/docs/tutorials/functions/unc_forest_tutorials_functions.py:192: RuntimeWarning: divide by zero encountered in log entropies = -np.sum(np.log(probs)*probs, axis = 1) /home/ubuntu/EvaPL/ProgLearn/docs/tutorials/functions/unc_forest_tutorials_functions.py:192: RuntimeWarning: invalid value encountered in multiply entropies = -np.sum(np.log(probs)*probs, axis = 1) /home/ubuntu/EvaPL/ProgLearn/docs/tutorials/functions/unc_forest_tutorials_functions.py:192: RuntimeWarning: divide by zero encountered in log entropies = -np.sum(np.log(probs)*probs, axis = 1) /home/ubuntu/EvaPL/ProgLearn/docs/tutorials/functions/unc_forest_tutorials_functions.py:192: RuntimeWarning: invalid value encountered in multiply entropies = -np.sum(np.log(probs)*probs, axis = 1) /home/ubuntu/EvaPL/ProgLearn/docs/tutorials/functions/unc_forest_tutorials_functions.py:192: RuntimeWarning: divide by zero encountered in log entropies = -np.sum(np.log(probs)*probs, axis = 1) /home/ubuntu[...]https://www.analyticsvidhya.com/blog/2018/08/dimensionality-reduction-techniques-python/import pandas as pd import numpy as np import matplotlib.pyplot as plt # get data from google.colab import files uploaded = files.upload()3.1 Missing Value Ratiotrain = pd.read_csv("Train_UWu5bXk.csv") # nuls ? train.isnull().sum()/len(train)*100 # bo % nuls = train.isnull().sum()/len(train)*100 vars = train.columns var = [] for i in range(0,12): if (nuls[i]<=20): var.append(vars[i]) varSo the variables to be used are stored in “variable”, which contains only those features where the missing values are less than 20%. 3.2 Low Variance Filter Let’s first impute the missing values in the Item_Weight column using the median value of the known Item_Weight observationstrain['Item_Weight'].fillna(train['Item_Weight'].median(), inplace=True) train['Outlet_Size'].fillna(train['Outlet_Size'].mode()[0], inplace=True) train.isnull().sum()/len(train)*100calculate all varible variance :train.var()As the above output shows, the variance of Item_Visibility is very less as compared to the other variables.# get numeric columns num = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] numeric = train.select_dtypes(include=num) var = numeric.var() numeric = numeric.columns vars = [] for i in range(0, len(var)): if (var[i] >= 10): vars.append(numeric[i]) vars3.3 High Correlation filter High correlation between two variables means they have similar trends and are likely to carry similar information. This can bring down the performance of some models drastically (linear and logistic regression models, for instance). We can calculate the correlation between independent numerical variables that are numerical in nature. If the correlation coefficient crosses a certain threshold value, we can drop one of the variables (dropping a variable is highly subjective and should always be done keeping the domain in mind).train.corr()**As a general guideline, we should keep those variables which show a decent or high correlation with the target variable.** Wonderful, we don’t have any variables with a high correlation in our dataset. Generally, if the correlation between a pair of variables is greater than 0.5-0.6, we should seriously consider dropping one of those variables. 3.4 Random Forest Random Forest is one of the most widely used algorithms for feature selection. It comes packaged with in-built feature importance so you don’t need to program that separately. This helps us select a smaller subset of features.We need to convert the data into numeric form by applying one hot encoding, as Random Forest (Scikit-Learn Implementation) takes only numeric inputs. Let’s also drop the ID variables (Item_Identifier and Outlet_Identifier) as these are just unique numbers and hold no significant importance for us currently.from sklearn.ensemble import RandomForestRegressor df=train.drop('Item_Outlet_Sales', 1) df = df.drop(['Item_Identifier', 'Outlet_Identifier'], axis=1) model = RandomForestRegressor(random_state=1, max_depth=10) # one hot encoding df = pd.get_dummies(df) model.fit(df, train.Item_Outlet_Sales) features = df.columns importances = model.feature_importances_ indices = np.argsort(importances)[-9:] # top 10 features plt.title('Feature Importances') plt.barh(range(len(indices)), importances[indices], color='b', align='center') plt.yticks(range(len(indices)), [features[i] for i in indices]) plt.xlabel('Relative Importance') plt.show()Based on the above graph, we can hand pick the top-most features to reduce the dimensionality in our dataset. Alernatively, we can use the SelectFromModel of sklearn to do so. It selects the features based on the importance of their weights.from sklearn.feature_selection import SelectFromModel feature = SelectFromModel(model) fit = feature.fit_transform(df, train.Item_Outlet_Sales)3.5 Backward Feature Elimination -We first take all the n variables present in our dataset and train the model using them -We then calculate the performance of the model -Now, we compute the performance of the model after eliminating each variable (n times), i.e., we drop one variable every time and train the model on the remaining n-1 variables -We identify the variable whose removal has produced the smallest (or no) change in the performance of the model, and then drop that variable -Repeat this process until no variable can be droppedfrom sklearn.linear_model import LinearRegression from sklearn.feature_selection import RFE from sklearn import datasets lreg = LinearRegression() rfe = RFE(lreg, 10) rfe = rfe.fit_transform(df, train.Item_Outlet_Sales) rfe3.6 Forward Feature Selection -We start with a single feature. Essentially, we train the model n number of times using each feature separately -The variable giving the best performance is selected as the starting variable -Then we repeat this process and add one variable at a time. The variable that produces the highest increase in performance is retained -We repeat this process until no significant improvement is seen in the model’s performancefrom sklearn.feature_selection import f_regression ffs = f_regression(df,train.Item_Outlet_Sales ) ffsThis returns an array containing the F-values of the variables and the p-values corresponding to each F value. For our purpose, we will select the variables having F-value greater than 1variable = [ ] for i in range(0,len(df.columns)-1): if ffs[0][i] >=10: variable.append(df.columns[i])**NOTE : Both Backward Feature Elimination and Forward Feature Selection are time consuming and computationally expensive.They are practically only used on datasets that have a small number of input variables.** 3.7 Factor Analysis Suppose we have two variables: Income and Education. These variables will potentially have a high correlation as people with a higher education level tend to have significantly higher income, and vice versa.In the Factor Analysis technique, variables are grouped by their correlations, i.e., all variables in a particular group will have a high correlation among themselves, but a low correlation with variables of other group(s). Here, each group is known as a factor. These factors are small in number as compared to the original dimensions of the data. However, these factors are difficult to observe.import pandas as pd import numpy as np from glob import glob import cv2 import tensorflow as tf (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train.shape image = [] for i in range(0,60000): img = x_train[i].flatten() image.append(img) x_train = np.array(image) x_train.shape y_train.shape feat_cols = [ 'pixel'+str(i) for i in range(x_train.shape[1]) ] df = pd.DataFrame(x_train, columns=feat_cols) df['label'] = y_train from sklearn.decomposition import FactorAnalysis FA = FactorAnalysis(n_components = 3).fit_transform(df[feat_cols].values)Here, n_components will decide the number of factors in the transformed data. After transforming the data, it’s time to visualizeimport matplotlib.pyplot as plt plt.figure(figsize=(12,8)) plt.title('Factor Analysis Components') plt.scatter(FA[:,0], FA[:,1]) plt.scatter(FA[:,1], FA[:,2]) plt.scatter(FA[:,2],FA[:,0])3.8 Principal Component Analysis (PCA) PCA is a technique which helps us in extracting a new set of variables from an existing large set of variables. These newly extracted variables are called Principal Components. -A principal component is a linear combination of the original variables -Principal components are extracted in such a way that the first principal component explains maximum variance in the dataset -Second principal component tries to explain the remaining variance in the dataset and is uncorrelated to the first principal component -Third principal component tries to explain the variance which is not explained by the first two principal components and so onrndperm = np.random.permutation(df.shape[0]) plt.gray() fig = plt.figure(figsize=(20,10)) for i in range(0,15): ax = fig.add_subplot(3,5,i+1) ax.matshow(df.loc[rndperm[i],feat_cols].values.reshape((28,28)).astype(float)) from sklearn.decomposition import PCA pca = PCA(n_components=50) pca_result = pca.fit_transform(df[feat_cols].values)In this case, n_components will decide the number of principal components in the transformed data. Let’s visualize how much variance has been explained using these 10 components.plt.plot(range(50), pca.explained_variance_ratio_) plt.plot(range(50), np.cumsum(pca.explained_variance_ratio_)) plt.title("Component-wise and Cumulative Explained Variance")In the above graph, the blue line represents component-wise explained variance while the orange line represents the cumulative explained variance. We are able to explain around 80% variance in the dataset using just 50 components. Each additional dimension we add to the PCA technique captures less and less of the variance in the model. The first component is the most important one, followed by the second, then the third, and so on. **SDV** We can also use Singular Value Decomposition (SVD) to decompose our original dataset into its constituents, resulting in dimensionality reduction.SVD decomposes the original variables into three constituent matrices. It is essentially used to remove redundant features from the dataset. It uses the concept of Eigenvalues and Eigenvectors to determine those three matrices.from sklearn.decomposition import TruncatedSVD svd = TruncatedSVD(n_components=3, random_state=42).fit_transform(df[feat_cols].values) plt.figure(figsize=(12,8)) plt.title('SVD Components') plt.scatter(svd[:,0], svd[:,1]) plt.scatter(svd[:,1], svd[:,2]) plt.scatter(svd[:,2],svd[:,0])The above scatter plot shows us the decomposed components very neatly. As described earlier, there is not much correlation between these components. 3.9 Independent Component Analysis Independent Component Analysis (ICA) is based on information-theory and is also one of the most widely used dimensionality reduction techniques. The major difference between PCA and ICA is that PCA looks for uncorrelated factors while ICA looks for independent factors. If two variables are uncorrelated, it means there is no linear relation between them. If they are independent, it means they are not dependent on other variables. For example, the age of a person is independent of what that person eats, or how much television he/she watches. This algorithm assumes that the given variables are linear mixtures of some unknown latent variables. It also assumes that these latent variables are mutually independent, i.e., they are not dependent on other variables and hence they are called the independent components of the observed data.from sklearn.decomposition import FastICA ICA = FastICA(n_components=3, random_state=12) X=ICA.fit_transform(df[feat_cols].values) plt.figure(figsize=(12,8)) plt.title('ICA Components') plt.scatter(X[:,0], X[:,1]) plt.scatter(X[:,1], X[:,2]) plt.scatter(X[:,2], X[:,0])The data has been separated into different independent components which can be seen very clearly in the above image. X-axis and Y-axis represent the value of decomposed independent components. 3.10 Methods Based on Projections In projection techniques, multi-dimensional data is represented by projecting its points onto a lower-dimensional space. We will perform non-linear dimensionality reduction through Isometric Mapping. For visualization, we will only take a subset of our dataset as running it on the entire dataset will require a lot of time.from sklearn import manifold trans_data = manifold.Isomap(n_neighbors=5, n_components=3, n_jobs=-1).fit_transform(df[feat_cols][:6000].values)n_neighbors decides the number of neighbors for each point n_components decides the number of coordinates for manifold n_jobs = -1 will use all the CPU cores availableplt.figure(figsize=(12,8)) plt.title('Decomposition using ISOMAP') plt.scatter(trans_data[:,0], trans_data[:,1]) plt.scatter(trans_data[:,1], trans_data[:,2]) plt.scatter(trans_data[:,2], trans_data[:,0])3.11 t- Distributed Stochastic Neighbor Embedding (t-SNE)from sklearn.manifold import TSNE tsne = TSNE(n_components=3, n_iter=300).fit_transform(df[feat_cols][:6000].values) plt.figure(figsize=(12,8)) plt.title('t-SNE components') plt.scatter(tsne[:,0], tsne[:,1]) plt.scatter(tsne[:,1], tsne[:,2]) plt.scatter(tsne[:,2], tsne[:,0])3.12 UMAPimport umap umap_data = umap.UMAP(n_neighbors=5, min_dist=0.3, n_components=3).fit_transform(df[feat_cols][:6000].values) plt.figure(figsize=(12,8)) plt.title('Decomposition using UMAP') plt.scatter(umap_data[:,0], umap_data[:,1]) plt.scatter(umap_data[:,1], umap_data[:,2]) plt.scatter(umap_data[:,2], umap_data[:,0])As mentioned in UMAP’s GitHub repository, it often performs better at preserving aspects of the global structure of the data than t-SNE. This means that it can often provide a better “big picture” view of the data as well as preserving local neighbor relations.My first model is overfitting. New ModelY = titanic_df['Survived'] X = titanic_df[['age*fare', 'Parch', 'Embarked', 'Fare']] lrm = linear_model.LinearRegression() lrm.fit(X, Y) print('\nCoefficients: \n', lrm.coef_) print('\nIntercept: \n', lrm.intercept_) X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 465) print("The number of observations in training set is {}".format(X_train.shape[0])) print("The number of observations in test set is {}".format(X_test.shape[0])) # We add constant to the model as it's a best practice # to do so everytime! X_train = sm.add_constant(X_train) # We fit an OLS model using statsmodels results = sm.OLS(y_train, X_train).fit() # We print the summary results display(results.summary()) X_test = sm.add_constant(X_test) # We are making predictions here y_preds = results.predict(X_test) plt.scatter(y_test, y_preds) plt.plot(y_test, y_test, color="red") plt.xlabel("true values") plt.ylabel("predicted values") plt.title("Charges: true and predicted values") plt.show() print("Mean absolute error of the prediction is: {}".format(mean_absolute_error(y_test, y_preds))) print("Mean squared error of the prediction is: {}".format(mse(y_test, y_preds))) print("Root mean squared error of the prediction is: {}".format(rmse(y_test, y_preds))) print("Mean absolute percentage error of the prediction is: {}".format(np.mean(np.abs((y_test - y_preds) / y_test)) * 100)) from sklearn.linear_model import LogisticRegression log_reg = LogisticRegression(solver='lbfgs', multi_class="ovr") from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.20, random_state=111) log_reg.fit(X_train, y_train) train_accuracy = log_reg.score(X_train, y_train) test_accuracy = log_reg.score(X_test, y_test) print('One-vs.-Rest', '-'*30, 'Accuracy on Train Data : {:.2f}'.format(train_accuracy), 'Accuracy on Test Data : {:.2f}'.format(test_accuracy), sep='\n') log_reg_mnm = LogisticRegression(multi_class='multinomial', solver='lbfgs') log_reg_mnm.fit(X_train, y_train) train_accuracy = log_reg_mnm.score(X_train, y_train) test_accuracy = log_reg_mnm.score(X_test, y_test) print('Multinomial (Softmax)', '-'*20, 'Accuracy on Train Data : {:.2f}'.format(train_accuracy), 'Accuracy on Test Data : {:.2f}'.format(test_accuracy), sep='\n')Multinomial (Softmax) -------------------- Accuracy on Train Data : 0.66 Accuracy on Test Data : 0.65Now my train and test scores came out as I wantedC_values = [0.001,0.01, 0.1,1,10,100, 1000] accuracy_values = pd.DataFrame(columns=['C_values', 'Train Accuracy', 'Test Accuracy']) for c in C_values: # Apply logistic regression model to training data lr = LogisticRegression(penalty = 'l2', C = c, random_state = 0, solver='lbfgs', multi_class='multinomial') lr.fit(X_train, y_train) accuracy_values = accuracy_values.append({'C_values': c, 'Train Accuracy': lr.score(X_train, y_train), 'Test Accuracy': lr.score(X_test, y_test) }, ignore_index=True) display(accuracy_values)It looks like C=0.001 is the best option for this model. Performance Metricsfrom sklearn import datasets titanic = titanic_df X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.20, random_state=111) titanic_prediction_model = LogisticRegression() titanic_prediction_model.fit(X_train, y_train) train_prediction = titanic_prediction_model.predict(X_train) test_prediction = titanic_prediction_model.predict(X_test) test_prediction_probability = titanic_prediction_model.predict_proba(X_test)[:,1] from sklearn.metrics import confusion_matrix confusion_matrix_train = confusion_matrix(y_train, train_prediction) confusion_matrix_test = confusion_matrix(y_test, test_prediction) print("Confusion Matrix (Train Data)", "-"*30, confusion_matrix_train, sep="\n") print("\n\nConfusion Matrix (Test Data)", "-"*30, confusion_matrix_test, sep="\n") TN = confusion_matrix_test[0][0] TP = confusion_matrix_test[1][1] FN = confusion_matrix_test[1][0] FP = confusion_matrix_test[0][1] print("True negative amount :", TN) print("True positive amount :", TP) print("False positive amount :", FP) print("False negative amount :", FN)True negative amount : 100 True positive amount : 16 False positive amount : 13 False negative amount : 50Accuracyfrom sklearn.metrics import accuracy_score print("Accuracy value by the model : ", titanic_prediction_model.score(X_test, y_test)) print("Calculated accuracy value : ", (TN + TP)/(FN + FP + TN + TP)) print("accuracy_score() value : ", accuracy_score(y_test, test_prediction))Accuracy value by the model : 0.6480446927374302 Calculated accuracy value : 0.6480446927374302 accuracy_score() value : 0.6480446927374302Error Rateprint("Error rate value by the model : ", 1 - titanic_prediction_model.score(X_test, y_test)) print("Calculated error rate : ", 1 - (TN + TP)/(FN + FP + TN + TP)) print("error_rate_score() value : ", 1 - accuracy_score(y_test, test_prediction))Error rate value by the model : 0.35195530726256985 Calculated error rate : 0.35195530726256985 error_rate_score() value : 0.35195530726256985Precisionfrom sklearn.metrics import precision_score print("Calculated precision value : ", (TP)/(FP + TP)) print("precision_score() value : ", precision_score(y_test, test_prediction))Calculated precision value : 0.5517241379310345 precision_score() value : 0.5517241379310345Recallfrom sklearn.metrics import recall_score print("Calculated recall value : ", (TP)/(TP + FN)) print("recall_score() value : ", recall_score(y_test, test_prediction))Calculated recall value : 0.24242424242424243 recall_score() value : 0.24242424242424243Specificityprint("Calculated specificity value : ", (TN)/(TN + FP)) from sklearn.metrics import f1_score precision = precision_score(y_test, test_prediction) recall = recall_score(y_test, test_prediction) print("Calculated F1 score : ", 2*((recall*precision)/(recall + precision))) print("f1_score() value : ", f1_score(y_test, test_prediction))Calculated F1 score : 0.33684210526315794 f1_score() value : 0.33684210526315794General Viewfrom sklearn.metrics import classification_report, precision_recall_fscore_support print(classification_report(y_test, test_prediction)) print("f1_score() value : {:.2f}".format(f1_score(y_test, test_prediction))) print("recall_score() value : {:.2f}".format(recall_score(y_test, test_prediction))) print("precision_score() value : {:.2f}".format(precision_score(y_test, test_prediction))) print('\n') metrics = precision_recall_fscore_support(y_test, test_prediction) print("Precision :" , metrics[0]) print("Recall :" , metrics[1]) print("F1 Score :" , metrics[2])precision recall f1-score support 0 0.67 0.88 0.76 113 1 0.55 0.24 0.34 66 accuracy 0.65 179 macro avg 0.61 0.56 0.55 179 weighted avg 0.62 0.65 0.60 179 f1_score() value : 0.34 recall_score() value : 0.24 precision_score() value : 0.55 Precision : [0.66666667 0.55172414] Recall : [0.88495575 0.24242424] F1 Score : [0.76045627 0.33684211]I messed up while predicting the survivors. Fortunately, I can guess better whom died :) ROC / AUCtest_prediction_proba = titanic_prediction_model.predict_proba(X_test)[:,1] from sklearn.metrics import roc_curve, roc_auc_score fpr, tpr, thresholds = roc_curve(y_test, test_prediction_proba) import matplotlib.pyplot as plt # Plot ROC curve plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr, tpr) plt.xlabel('False Positive Ratio') plt.ylabel('True Positive Ratio') plt.title('ROC Curve') plt.show() print('AUC value : ', roc_auc_score(y_test, test_prediction_proba))AUC value : 0.6924108340037544This AUC score is far better than I expected. Precision/Recall Curvesfrom sklearn.metrics import precision_recall_curve precision, recall, _ = precision_recall_curve(y_test, test_prediction_proba) plt.plot(recall, precision) plt.xlabel('Recall') plt.ylabel('Precision') plt.title('Precision / Recall Curve') plt.show()I made my first mistake at the very beginning, that's not good. Logarithmic Lossfrom sklearn.metrics import log_loss print("Logarithmic Loss (log-loss) : " , log_loss(y_test, test_prediction_proba)) print("Error Rate : " , 1- accuracy_score(y_test, test_prediction))Logarithmic Loss (log-loss) : 0.6230488109026635 Error Rate : 0.35195530726256985My model is pretty good at making a mistake :) Anyway, I like my model. The results are better than I've expected. Different C valueslr = LogisticRegression(penalty = 'l2', C = 0.001, random_state = 0, solver='lbfgs', multi_class='multinomial') lr.fit(X_train, y_train) Y = titanic_df['Survived'] X = titanic_df[['age*fare', 'Parch', 'Embarked', 'Fare']] lrm = linear_model.LinearRegression() lrm.fit(X, Y) print('\nCoefficients: \n', lrm.coef_) print('\nIntercept: \n', lrm.intercept_)Coefficients: [-1.88622862e-05 1.53275069e-02 6.18025050e-02 3.13192177e-03] Intercept: 0.29094274854881813Toy demoThis notebook shows a simple demo to play with _Mint_. A CNN model with one convolutional layer and two linear layers is trained for around 8,000 samples on MNIST dataset. The modules available up to now:- **Basic**: Conv2d, Linear, Sequential- **Pooling**: MaxPool2d- **Activation**: ReLU- **Transform**: Flat- **Loss**: CrossEntropy- **Optimizer**: SGD Importsimport time import numpy as np import mint.modules as mo import mint.optim as optim from mint.data.utils import * from mint.data.mnist import load_mnist_datasets import matplotlib.pyplot as pltLoad datatrain_set, val_set, test_set = load_mnist_datasets() ## reshape data train_x, val_x = np.reshape(train_set[0],(-1,1,28,28)), np.reshape(val_set[0],(-1,1,28,28)), test_x = np.reshape(test_set[0],(-1,1,28,28)) ## map labels to categories train_y, val_y, test_y = train_set[1], val_set[1], test_set[1]Hyper-parametersbatch_size = 64 epochs = 1 lr = 1e-2 moment = 0.9 weight_decay = 1e-4Model designmodel = mo.Sequential( mo.Conv2d(in_channels=1, out_channels=5, kernel_size=3), mo.ReLU(in_place=True), mo.MaxPool2d(kernel_size=4, stride=4), # mo.Conv2d(in_channels=5, out_channels=5, kernel_size=3), # mo.ReLU(in_place=True), # mo.MaxPool2d(kernel_size=2, stride=2), mo.Flat(), mo.Linear(5*7*7, 64), mo.ReLU(in_place=True), mo.Linear(64, 10), )Objevtice function & optimizerobjective = mo.CrossEntropy() optimizer = optim.SGD(model.modules, lr=lr, moment=moment, weight_decay=weight_decay)Trainingloss = list() for epoch in range(epochs): random_idxs = get_random_idxs(batch_size, len(train_y)) current = time.time() model.train() for batch in range(len(random_idxs)): data, label = next_batch((train_x, train_y), batch, random_idxs) output = model.forward(data) optimizer.zero_grad() loss.append(objective.forward(output, label)) model.backward(objective.backward()) optimizer.step() if (batch+1) % 10 == 0: _correct = correct(output, label) print("[epoch %d][%d, %d] loss=%.2f, accuracy=%.2f, elapse time=%.2fs." % (epoch+1, batch+1, len(random_idxs), loss[-1], _correct * 1. / label.shape[0], time.time() - current)) current = time.time() if (batch+1) % 50 == 0: optimizer.defaults['lr'] *= 0.7 if batch == 256: ## eval model.eval() _correct = 0. idxs = get_deter_idxs(batch_size, len(test_y)) current = time.time() for batch in range(len(idxs)): data, label = next_batch((test_x, test_y), batch, idxs) output = model.forward(data) _correct += correct(output.data, label) _accuracy = _correct / len(test_y) print("[epoch %d]Test accuracy=%.2f, elapse time=%.2fs." % (epoch+1, _accuracy, time.time() - current)) break # too slow....[epoch 1][10, 782] loss=2.19, accuracy=0.27, elapse time=37.46s. [epoch 1][20, 782] loss=2.20, accuracy=0.16, elapse time=38.06s. [epoch 1][30, 782] loss=1.89, accuracy=0.53, elapse time=37.84s. [epoch 1][40, 782] loss=1.63, accuracy=0.56, elapse time=37.81s. [epoch 1][50, 782] loss=1.15, accuracy=0.66, elapse time=37.66s. [epoch 1][60, 782] loss=0.82, accuracy=0.77, elapse time=37.71s. [epoch 1][70, 782] loss=0.80, accuracy=0.80, elapse time=37.86s. [epoch 1][80, 782] loss=0.48, accuracy=0.84, elapse time=37.69s. [epoch 1][90, 782] loss=0.57, accuracy=0.80, elapse time=37.87s. [epoch 1][100, 782] loss=0.57, accuracy=0.81, elapse time=37.79s. [epoch 1][110, 782] loss=0.66, accuracy=0.78, elapse time=38.34s. [epoch 1][120, 782] loss=0.50, accuracy=0.86, elapse time=37.72s. [epoch 1][130, 782] loss=0.62, accuracy=0.81, elapse time=37.79s. [epoch 1][140, 782] loss=0.63, accuracy=0.83, elapse time=38.11s. [epoch 1][150, 782] loss=0.73, accuracy=0.75, elapse time=37.66s. [epoch 1][160, 782][...]Loss curvebatches = np.arange(0, 257) plt.plot(batches, loss) plt.show()Random testidx = np.random.choice(test_x.shape[0], 1) x, y = test_x[idx], test_y[idx] model.eval() y_predict = model.forward(x) plt.figure(figsize=(3,3)) plt.imshow(np.reshape(x,(28,28))) plt.show() print("ground truth:{}, prediction:{}".format(y, np.argmax(y_predict)))Plotting Tools# read in data event_dir = '../tests/data/cwb' streams = [] for filename in glob.glob(event_dir + '/*'): streams += [read_data(filename)]Variations of Arias Intensity Plots Full default plot:axes = plot_arias(streams[3])Combining multiple plots:fig, axs = plt.subplots(len(streams), 3, figsize=(15,10)) axs = axs.flatten() idx = 0 for stream in streams: axs = plot_arias(stream, axes=axs, axis_index=idx, minfontsize=15, show_maximum=False, title="18km NNE of Hualian, Taiwan") idx += 3Variations of Duration Plots Full default plot:durations = [(0.05, 0.75), (0.2, 0.8), (0.05, .95)] axes = plot_durations(streams[3], durations)Combining multiple plots:fig, axs = plt.subplots(len(streams), 3, figsize=(15,10)) axs = axs.flatten() idx = 0 for stream in streams: axs = plot_durations(stream, durations, axes=axs, axis_index=idx, minfontsize=15, title="18km NNE of Hualian, Taiwan") idx += 3Moveout plots Full default plot:epicenter_lat = 24.14 epicenter_lon = 121.69 fig, ax = plot_moveout(streams, epicenter_lat, epicenter_lon, 'BN1')Moveout with scaling and specified colormap:The data is automatically normalized if a scaling factor is specified.fig, ax = plot_moveout(streams, epicenter_lat, epicenter_lon, 'BN1', cmap='nipy_spectral_r', figsize=(15, 10), minfontsize=16, normalize=True, scale=10)TnTimport nltk nltk.download('treebank') train_data = nltk.corpus.treebank.tagged_sents()[:30] test_data = nltk.corpus.treebank.tagged_sents()[3000:] TnT = nltk.tag.tnt.TnT() TnT.train(train_data) 'accuracy: ' + str(round(TnT.evaluate(test_data), 3)) TnT.tag(['the', 'men', 'attended', 'to', 'the', 'meetings'])Import pose estimation model Define output formatLet's load the JSON file which describes the human pose task. This is in COCO format, it is the category descriptor pulled from the annotations file. We modify the COCO category slightly, to add a neck keypoint. We will use this task description JSON to create a topology tensor, which is an intermediate data structure that describes the part linkages, as well as which channels in the part affinity field each linkage corresponds to.import os os.environ['MPLCONFIGDIR'] = os.getcwd() + "/configs/" # Specify MatplotLib config folder import json import numpy as np # Requiere https://github.com/NVIDIA-AI-IOT/trt_pose import trt_pose.coco from trt_pose.draw_objects import DrawObjects from trt_pose.parse_objects import ParseObjects with open('human_pose.json', 'r') as f: human_pose = json.load(f) topology = trt_pose.coco.coco_category_to_topology(human_pose) parse_objects = ParseObjects(topology) draw_objects = DrawObjects(topology)Matplotlib is building the font cache; this may take a moment.Import TensorRT optimized modelNext, we'll load our model. It has been optimized using another Notebook and saved so that we do not need to perform optimization again, we can just load the model. Please note that TensorRT has device specific optimizations, so you can only use an optimized model on similar platforms.import torch # Requiere https://github.com/NVIDIA-AI-IOT/torch2trt from torch2trt import TRTModule OPTIMIZED_MODEL = 'resnet18_baseline_att_224x224_A_epoch_249_trt.pth' model_trt = TRTModule() model_trt.load_state_dict(torch.load(OPTIMIZED_MODEL))Define video-processing pipeline Pre-process image for TRT_PoseNext, let's define a function that will preprocess the image, which is originally in BGR8 / HWC format. It is formated to the default Torch format.import cv2 import torchvision.transforms as transforms import PIL.Image mean = torch.Tensor([0.485, 0.456, 0.406]).cuda() std = torch.Tensor([0.229, 0.224, 0.225]).cuda() device = torch.device('cuda') def preprocess(image): global device device = torch.device('cuda') image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = PIL.Image.fromarray(image) image = transforms.functional.to_tensor(image).to(device) image.sub_(mean[:, None, None]).div_(std[:, None, None]) return image[None, ...]Access video feedAccess images streamed by a WiFi camera on the local network.WIDTH_INPUT, HEIGHT_INPUT = 224, 224 # Imposed by the model import ipywidgets from IPython.display import display import urllib.request image_w = ipywidgets.Image(format='jpeg') display(image_w) url_esp32 = 'http://192.168.0.163/capture' url_IPcam = 'http://192.168.0.244:8080/photo.jpg' def fetch_image(url): imgResp = urllib.request.urlopen(url) imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8) img = cv2.imdecode(imgNp,-1) img_height, img_width, img_channel = img.shape if img_width>img_height: img = cv2.resize(img, (int((HEIGHT_INPUT/img_height)*img_width), HEIGHT_INPUT), interpolation = cv2.INTER_AREA) img = img[:, img.shape[1]//2 - WIDTH_INPUT//2 : img.shape[1]//2 + WIDTH_INPUT//2] else: img = cv2.resize(img, (WIDTH_INPUT, int((WIDTH_INPUT/img_width)*img_height)), interpolation = cv2.INTER_AREA) img = img[img.shape[0] - HEIGHT_INPUT//2 : img.shape[0] + HEIGHT_INPUT//2,:] return imgGet keypoints with TRT-Posedef get_keypoints(counts, objects, peak, indexBody=0): #if indexBody= 0: peak = peaks[0][j][k] # peak[1]:width, peak[0]:height kpoint.append([float(peak[1]),float(peak[0])]) #print('indexBody:%d : success [%5.3f, %5.3f]'%(j, peak[1], peak[2]) ) else: kpoint.append([None, None]) #print('indexBody:%d : None'%(j) ) return np.array(kpoint) def get_cmap_paf(image): data = preprocess(image) cmap, paf = model_trt(data) cmap, paf = cmap.detach().cpu(), paf.detach().cpu() return cmap, pafGet label with pose-classification-kitfrom tensorflow import keras from keras.utils.data_utils import get_file classificationModelURL = "https://github.com/ArthurFDLR/pose-classification-kit/blob/master/pose_classification_kit/models/Body/CNN_BODY18_1/CNN_BODY18_1.h5?raw=true" classificationModelPath = get_file( "CNN_BODY18_1", classificationModelURL ) classificationModel = keras.models.load_model(classificationModelPath) classificationModel.summary() classificationLabelsURL = "https://raw.githubusercontent.com/ArthurFDLR/pose-classification-kit/master/pose_classification_kit/models/Body/CNN_BODY18_1/class.json" classificationLabelsPath = get_file( "CNN_BODY18_1_Info", classificationLabelsURL ) with open(classificationLabelsPath) as f: classificationLabels = json.load(f)['labels'] print("labels:", classificationLabels) def getLengthLimb(data, keypoint1: int, keypoint2: int): if type(data[keypoint1, 0]) != type(None) and type(data[keypoint2, 0]) != type(None): return np.linalg.norm([data[keypoint1, 0:2] - data[keypoint2, 0:2]]) return 0 def preprocess_keypoints(keypoints:np.ndarray): if type(keypoints) != type(None): assert keypoints.shape == (18,2) # Find bounding box min_x, max_x = float("inf"), 0.0 min_y, max_y = float("inf"), 0.0 for k in keypoints: if type(k[0]) != type(None): # If keypoint exists min_x = min(min_x, k[0]) max_x = max(max_x, k[0]) min_y = min(min_y, k[1]) max_y = max(max_y, k[1]) # Centering np.subtract( keypoints[:, 0], (min_x + max_x) / 2., where=keypoints[:, 0] != None, out=keypoints[:, 0], ) np.subtract( (min_y + max_y) / 2., keypoints[:, 1], where=keypoints[:, 0] != None, out=keypoints[:, 1], ) # Scaling normalizedPartsLength = np.array( [ getLengthLimb(keypoints, 6, 12) * (16.0 / 5.2), # Torso right getLengthLimb(keypoints, 5, 11) * (16.0 / 5.2), # Torso left getLengthLimb(keypoints, 0, 17) * (16.0 / 2.5), # Neck getLengthLimb(keypoints, 12, 14) * (16.0 / 3.6), # Right thigh getLengthLimb(keypoints, 14, 16) * (16.0 / 3.5), # Right lower leg getLengthLimb(keypoints, 11, 13) * (16.0 / 3.6), # Left thigh getLengthLimb(keypoints, 13, 15) * (16.0 / 3.5), # Left lower leg ] ) # Mean of non-zero lengths normalizedPartsLength = normalizedPartsLength[normalizedPartsLength > 0.0] if len(normalizedPartsLength)>0: scaleFactor = np.mean(normalizedPartsLength) else: return None # Populate None keypoints with 0s keypoints[keypoints == None] = 0.0 # Normalize np.divide(keypoints, scaleFactor, out=keypoints[:, 0:2]) if np.any((keypoints > 1.0) | (keypoints < -1.0)): #print("Scaling error") return None return keypoints.astype('float32') else: return NoneModel: "CNN_BODY18_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv1d_3 (Conv1D) (None, 16, 16) 112 _________________________________________________________________ dropout_8 (Dropout) (None, 16, 16) 0 _________________________________________________________________ conv1d_4 (Conv1D) (None, 14, 32) 1568 _________________________________________________________________ dropout_9 (Dropout) (None, 14, 32) 0 _________________________________________________________________ conv1d_5 (Conv1D) (None, 12, 32) 3104 _________________________________________________________________ dropout_10 (Dropout) (None, 12, 32) 0 ______________________________________________________[...]Main Processing loop- Read image- Infere keypoints- Infere label- Send MQTT update- Draw skeleton on the input image- Update in output window.try: while True: # Get image image = fetch_image(url_esp32) # TRT-Pose inference cmap, paf = get_cmap_paf(image) counts, objects, peaks = parse_objects(cmap, paf) keypoints = get_keypoints(counts, objects, peaks) # Classification inference label_pose = None keypoints = preprocess_keypoints(keypoints) if type(keypoints) != type(None): prediction = classificationModel.predict(x=np.array([keypoints])) label_pose = classificationLabels[np.argmax(prediction)] # Display image locally draw_objects(image, counts, objects, peaks) if label_pose: image = cv2.putText(image, label_pose, (10,20), cv2.FONT_HERSHEY_SIMPLEX, .7, (255, 0, 0), 1, cv2.LINE_AA) image_w.value = bytes(cv2.imencode('.jpg', image[:, :, :])[1]) except KeyboardInterrupt: print('Video processing stopped')Video processing stoppedAgora vamos falar de aprendizado supervisionado, usando um algoritmo clássico: Support Vector Machine (SVM).#Importando bibliotecas básicas import numpy as np import pandas as pd import matplotlib.pyplot as pltSegue o link para visualizar os dados: https://github.com/gilvandrocesardemedeiros/GEDataScience/raw/master/Codes/Data/INMET-Dados_Diarios_PortoAlegre_1980-2017.csv#Lendo os dados dataSet = pd.read_csv("https://github.com/gilvandrocesardemedeiros/GEDataScience/raw/master/Codes/Data/INMET-Dados_Diarios_PortoAlegre_1980-2017.csv", skiprows = 16, sep=';') #Transformando data em variável do tipo datetime dataSet["Data"] = pd.to_datetime(dataSet["Data"], format = "%d/%m/%Y") #Visualizando cabeçalho dos dados dataSet.head() #Visualizando descrição resumida dos dados dataSet.describe()Esse problema encontrado agora na base de dados já foi enfrentado anteriormente, portanto, vamos agilizar o processo com a função "OrganizarDados"def OrganizarDados(dataSet): #Transformando data em variável do tipo datetime dataSet["Data"] = pd.to_datetime(dataSet["Data"], format = "%d/%m/%Y") #Atribuindo "Data" como índice para a base de dados dataSet = dataSet.set_index("Data") #Separando os dados em dois dataframes, um para as 00:00 h e outro para as 12:00 h dataSet00, dataSet12 = dataSet[dataSet["Hora"] == 0], dataSet[dataSet["Hora"] == 1200] #Descartando as colunas "Hora", "Estacao" e "Unnamed: 11" dataSet00, dataSet12 = dataSet00.drop(columns = ["Hora", "Estacao", "Unnamed: 11"]), dataSet12.drop(columns = ["Hora", "Estacao", "Unnamed: 11"]) #Eliminando colunas completas por "NaN" em cada uma das bases de dados dataSet00 = dataSet00.drop(columns = ["Precipitacao","TempMinima"]) dataSet12 = dataSet12.drop(columns = ["TempMaxima","Insolacao","Evaporacao Piche","Temp Comp Media","Umidade Relativa Media","Velocidade do Vento Media"]) #Criando o intervalo completo de tempo de 01-01-2005 à 31-12-2017 dataInicial = '2005-01-01' dataFinal = '2017-12-31' tempo = pd.date_range(dataInicial, dataFinal) #Atribuindo este intervalo de tempo à um dataSet provisório dataSetProv = pd.DataFrame() dataSetProv["Data"] = tempo #Atribuindo o índice para o dataSet provisório como sendo a coluna de datas dataSetProv = dataSetProv.set_index("Data") #Mesclando o dataSet provisório como sendo o resultado da junção dos dataSet00 e dataSet12 dataSetProv = dataSetProv.join(dataSet00).join(dataSet12) #Tornando o dataSet como sendo o dataSet provisório dataSet = dataSetProv return dataSet #Aplicando a função ao dataSet dataSet = OrganizarDados(dataSet) #Tornando a informação de mês uma variável do dataSet dataSet["Mes"] = dataSet.index.month #Eliminando eventos inconsistentes da base de dados dataSet = dataSet.dropna() #Resentando o índice do dataSet dataSet = dataSet.reset_index(drop = True) #Visualizando cabeçalho dos dados dataSet.head() #Visualizando descrição resumida dos dados dataSet.describe() #Separando a coluna "Mes" do DataFrame mes = dataSet["Mes"].values dataSet = dataSet.drop(columns = "Mes") from sklearn.model_selection import train_test_split #Separando os conjuntos de treino e teste com os dados embaralhados, sendo 80% para treino e 20% para teste X_train, X_test, y_train, y_test = train_test_split(dataSet, mes, test_size = 0.2, random_state = 9) from sklearn.preprocessing import StandardScaler #Efetuando uma transformação no conjunto de dados - também já estudamos isso sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test)Vamos agora utilizar o algoritmo SVM para Classificação, disponibilizado na biblioteca Scikit-Learn. Segue link para a documentação:https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.htmlsklearn.svm.LinearSVCfrom sklearn.svm import SVC #Criação do objeto SVC como "classifier" classifier = SVC(C = 10000000.0, kernel = 'rbf', random_state = 0, gamma = 0.0001) #Treinando o modelo classificador classifier.fit(X_train, y_train) #Previsão para os valores de teste y_pred = classifier.predict(X_test) from sklearn.metrics import confusion_matrix #Avaliando o modelo com matriz de confusão confusion_matrix(y_test, y_pred) #Convertendo a matriz de confusão para um DataFrame, para facilitar a visualização cm = pd.DataFrame(confusion_matrix(y_test, y_pred)) #Declarando títulos para as colunas e os índices da matriz de confusão, para facilitar a interpretação cm.columns = ["Jan","Fev","Mar","Abr","Mai","Jun","Jul","Ago","Set","Out","Nov","Dez"] cm.index = ["Jan","Fev","Mar","Abr","Mai","Jun","Jul","Ago","Set","Out","Nov","Dez"] #Imprimindo a matriz de confusão cmSVCclf1 = SVC(C=10,gamma=10, kernel="rbf") ypred1 = clf1.predict(X) s1 = f1_score(y1, ypred1, average="weighted") clf2 = SVC(C=10,gamma=10, kernel="rbf").fit(X, y2) ypred2 = clf2.predict(X) s2 = f1_score(y2, ypred2, average="weighted") scores = cross_val_score(clf1, X, y2, cv=5, scoring="f1_weighted") print(scores) score = 100 * (s1+s2) / 2 print(s1,s2) print("Training f1 score: ", score) # ypred1 = clf1.predict(X_train) # s1 = f1_score(y_train1, ypred1, average="weighted") # ypred2 = clf2.predict(X_train) # s2 = f1_score(y_train2, ypred2, average="weighted") 0.9138437651219301 0.8973397476796859 Training f1 score: 90.55917564008081 X2 = df2.drop(columns=["pet_id"]) ypred11 = clf1.predict(X2) ypred22 = clf2.predict(X2) submission = pd.DataFrame({"pet_id": df2["pet_id"], "breed_category": ypred11, "pet_category": ypred22}) print(submission.shape) submission.to_csv("submission.csv", index=False)XGBoost# D_train1 = xgb.DMatrix(X, label=y1) # D_train2 = xgb.DMatrix(X, label=y2) # D_test1 = xgb.DMatrix(X_test, label=y_test1) # D_test2 = xgb.DMatrix(X_test, label=y_test2) # param1 = { # 'eta': 0.3, # 'max_depth': 6, # "gamma": 0.1, # 'objective': 'multi:softmax', # "nthread": 5, # # "lambda": 20, # # "alpha": 0.1, # "eval_metric": "mlogloss", # 'num_class': 5} # param2 = { # 'eta': 0.3, # 'max_depth': 6, # "gamma": 0.01, # # "lambda": 10, # # "alpha": 0.1, # 'objective': 'multi:softmax', # "eval_metric": "mlogloss", # 'num_class': 5} # steps = 20 # model1 = xgb.train(param1, D_train1, steps) # model2 = xgb.train(param2, D_train2, steps) model1 = xgb.XGBClassifier(learning_rate=0.1, objective='multi:softmax', n_estimators=1000, gamma=10, max_depth=6).fit(X,y1) model2 = xgb.XGBClassifier(learning_rate=0.1, objective='multi:softmax', n_estimators=1000, gamma=10, max_depth=6).fit(X,y2) train_preds1 = model1.predict(X) train_preds2 = model2.predict(X) s1 = f1_score(y1, train_preds1, average="weighted") s2 = f1_score(y2, train_preds2, average="weighted") score = 100 * (s1+s2) / 2 print(s1,s2) print("Training f1 score: ", score) pd.DataFrame(preds1, columns=['Value'])['Value'].value_counts().plot(kind='barh') test_preds1 = model1.predict(X2) test_preds2 = model2.predict(X2) # s1 = f1_score(y_test1, preds1, average="weighted") # s2 = f1_score(y_test2, preds2, average="weighted") # score = 100 * (s1+s2) / 2 # print(s1,s2) # print("Test f1 score: ", score) # pd.DataFrame(test_preds1, columns=['Value'])['Value'].value_counts().plot(kind='barh') # D_train3 = xgb.DMatrix(X2) ypred11 = model1.predict(X2) ypred22 = model2.predict(X2) submission = pd.DataFrame({"pet_id": df2["pet_id"], "breed_category": ypred11, "pet_category": ypred22}) print(submission.shape) submission.to_csv("submission.csv", index=False)LGBMfrom lightgbm import LGBMClassifier from sklearn.model_selection import RandomizedSearchCV from scipy.stats import uniform as sp_uniform from scipy.stats import randint as sp_randint lgb1 = LGBMClassifier( reg_lambda=57, refit=True, # reg_alpha=10, # learning_rate=0.1, max_depth=-1, n_estimators=200).fit(X,y1) lgb2 = LGBMClassifier( # reg_lambda=0.1, # reg_alpha=0.01, learning_rate=0.1, refit=True, n_estimators=200).fit(X,y2) # param_test ={'num_leaves': sp_randint(6, 50), # 'min_child_samples': sp_randint(100, 500), # 'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4], # 'subsample': sp_uniform(loc=0.2, scale=0.8), # 'colsample_bytree': sp_uniform(loc=0.4, scale=0.6), # 'reg_alpha': [40,42,44,56,57, 50, 60, 55], # 'reg_lambda': [10,12,13, 15, 20,25], # 'max_depth': [1,2,3,4,5,10,-1]} # clf1 = RandomizedSearchCV(lgb1, param_test, random_state=0) # clf2 = RandomizedSearchCV(lgb2, param_test, random_state=0) # search1 = clf1.fit(X,y1) # search2 = clf2.fit(X,y2) # print(search1.best_params_) # print(search2.best_params_) train_preds1 = lgb1.predict(X) train_preds2 = lgb2.predict(X) s1 = f1_score(y1, train_preds1, average="weighted") s2 = f1_score(y2, train_preds2, average="weighted") score = 100 * (s1+s2) / 2 print(s1,s2) print("Training f1 score: ", score) ypred11 = lgb1.predict(X2) ypred22 = lgb2.predict(X2) submission = pd.DataFrame({"pet_id": df2["pet_id"], "breed_category": ypred11, "pet_category": ypred22}) print(submission.shape) submission.to_csv("submission3.csv", index=False)(8072, 3)Random Forestfrom sklearn.ensemble import RandomForestClassifier model1 = RandomForestClassifier(n_estimators=1000, max_depth=10, random_state=0).fit(X, y1) model2 = RandomForestClassifier(n_estimators=1000, max_depth=10, random_state=0).fit(X, y2) train_preds1 = model1.predict(X) train_preds2 = model2.predict(X) s1 = f1_score(y1, train_preds1, average="weighted") s2 = f1_score(y2, train_preds2, average="weighted") score = 100 * (s1+s2) / 2 print(s1,s2) print("Training f1 score: ", score) ypred11 = model1.predict(X2) ypred22 = model2.predict(X2) submission = pd.DataFrame({"pet_id": df2["pet_id"], "breed_category": ypred11, "pet_category": ypred22}) print(submission.shape) submission.to_csv("submission10.csv", index=False)io-lulc-9-class dataset issueThe end goal of this workflow is to get the percentage of crop land for a set of points associated with the surrounding 1 $km^2$ regions. The percentage of crop land will then be added as a column to the features created with the mosaiks notebook at the same points. The percentage crop land of a point will be used as a wieght for the weighted average of features inside of an administrative boundary, in this example, the districts inside the country of Zambia. This general workflow is ideal for our use only in the sense that it matches well with the mosaiks example notebook and would integrate nicely into the work we have done to build from that baseline. That being said, we are open to other methods which give us our desired reults. This notebook reproduces an issue where the workflow produces empty values at the UTM zones delineations at every $6^o$ longitude. It also causes empty values at every $8^o$ latitude. We were able to solve the issue with the vertical lines of empty values but not with the horizontal lines.!pip install -q git+https://github.com/geopandas/dask-geopandas RASTERIO_BEST_PRACTICES = dict( # See https://github.com/pangeo-data/cog-best-practices CURL_CA_BUNDLE="/etc/ssl/certs/ca-certificates.crt", GDAL_DISABLE_READDIR_ON_OPEN="EMPTY_DIR", AWS_NO_SIGN_REQUEST="YES", GDAL_MAX_RAW_BLOCK_CACHE_SIZE="200000000", GDAL_SWATH_SIZE="200000000", VSI_CURL_CACHE_SIZE="200000000", ) import os os.environ.update(RASTERIO_BEST_PRACTICES) import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import rasterio import rasterio.warp import rasterio.mask import shapely.geometry import geopandas import dask_geopandas from dask.distributed import Client from pystac.extensions.item_assets import ItemAssetsExtension import pystac_client import planetary_computer as pcCreate a sparse grid of points over Zambia# Set params country_code = 'ZMB' year = 2018 geodetic_epsg = 'EPSG:4326' # get country shape world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres')) country = world.query(f'iso_a3 == "{country_code}"') # Create grid of points cell_size = .1 # Very roughly 10 km xmin, ymin, xmax, ymax = country.total_bounds xs = list(np.arange(xmin, xmax + cell_size, cell_size)) ys = list(np.arange(ymin, ymax + cell_size, cell_size)) def make_cell(x, y, cell_size): ring = [ (x, y), (x + cell_size, y), (x + cell_size, y + cell_size), (x, y + cell_size) ] cell = shapely.geometry.Polygon(ring).centroid return cell points = [] for x in xs: for y in ys: cell = make_cell(x, y, cell_size) points.append(cell) # Put grid into a GeDataFrame gdf = geopandas.GeoDataFrame({'geometry': points}, crs = geodetic_epsg) gdf['lon'], gdf['lat'] = gdf.geometry.x, gdf.geometry.y # subset to country gdf = gdf[gdf.within(country.unary_union)] gdf = gdf[['lon', 'lat', 'geometry']].reset_index(drop = True)Sort by hilbert distance# sort by hilbert distance NPARTITIONS = 250 ddf = dask_geopandas.from_geopandas(gdf, npartitions=1) hd = ddf.hilbert_distance().compute() gdf["hd"] = hd gdf = gdf.sort_values("hd") dgdf = dask_geopandas.from_geopandas(gdf, npartitions=NPARTITIONS, sort=False)Find the value for Crops in the io-lulc-9-class dataset# Get the value for Crop land in the data catalog = pystac_client.Client.open( "https://planetarycomputer.microsoft.com/api/stac/v1" ) collection = catalog.get_collection("io-lulc-9-class") ia = ItemAssetsExtension.ext(collection) x = ia.item_assets["data"] class_names = {x["summary"]: x["values"][0] for x in x.properties["file:values"]} values_to_classes = {v: k for k, v in class_names.items()} crop_value = class_names['Crops']Create the querydef query(points): """ Find a STAC item for points in the `points` DataFrame Parameters ---------- points : geopandas.GeoDataFrame A GeoDataFrame Returns ------- geopandas.GeoDataFrame A new geopandas.GeoDataFrame with a `stac_item` column containing the STAC item that covers each point. """ intersects = shapely.geometry.mapping(points.unary_union.convex_hull) catalog = pystac_client.Client.open( "https://planetarycomputer.microsoft.com/api/stac/v1" ) search = catalog.search( collections=["io-lulc-9-class"], intersects=intersects, datetime=["2018-01-01", "2018-12-31"], ) ic = search.get_all_items_as_dict() features = ic["features"] features_d = {item["id"]: item for item in features} data = {"geometry": [],} index = [] for item in features: data["geometry"].append(shapely.geometry.shape(item["geometry"])) index.append(item["id"]) items = geopandas.GeoDataFrame(data, index=index, geometry="geometry") point_list = points.geometry.tolist() point_items = [] for point in point_list: covered_by = items[items.covers(point)] if len(covered_by): point_items.append(features_d[covered_by.index[0]]) else: # There weren't any scenes matching our conditions for this point (too cloudy) point_items.append(None) return points.assign(stac_item=point_items)Match points to imageswith Client(n_workers=16) as client: meta = dgdf._meta.assign(stac_item=[]) df2 = dgdf.map_partitions(query, meta=meta).compute() df3 = df2.dropna(subset=["stac_item"]).reset_index() matching_urls = [ pc.sign(item["assets"]["data"]["href"]) for item in df3.stac_item.tolist() ] # Define the points list points = df3[["lon", "lat"]].to_numpy()Define the custom dataset and data loaderclass CustomDataset(Dataset): def __init__(self, points, fns, buffer=500): self.points = points self.fns = fns self.buffer = buffer def __len__(self): return self.points.shape[0] def __getitem__(self, idx): lon, lat = self.points[idx] fn = self.fns[idx] if fn is None: return None else: point_geom = shapely.geometry.mapping(shapely.geometry.Point(lon, lat)) with rasterio.Env(): with rasterio.open(fn, "r") as f: point_geom = rasterio.warp.transform_geom( "epsg:4326", f.crs.to_string(), point_geom ) point_shape = shapely.geometry.shape(point_geom) mask_shape = point_shape.buffer(self.buffer).envelope mask_geom = shapely.geometry.mapping(mask_shape) try: out_image, out_transform = rasterio.mask.mask( f, [mask_geom], crop=True ) except ValueError as e: if "Input shapes do not overlap raster." in str(e): return None out_image = out_image.squeeze() out_image = torch.from_numpy(out_image).float() return out_image # Create the dataset dataset = CustomDataset(points, matching_urls) dataloader = DataLoader( dataset, batch_size=8, shuffle=False, num_workers=os.cpu_count()*2, collate_fn=lambda x: x, pin_memory=False, )/srv/conda/envs/notebook/lib/python3.8/site-packages/torch/utils/data/dataloader.py:478: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 4, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary. warnings.warn(_create_warning_msg(Calculate the percentage of cropped area around each 1 km regionx_all = np.zeros((points.shape[0], 1), dtype=float) i = 0 for images in dataloader: for image in images: if type(image) == torch.Tensor: #### This was added because of the empty points crops = (image == crop_value).sum() / image.numel() x_all[i] = crops.item() else: x_all[i] = float("NaN") i += 1Plot the resultscrops = pd.DataFrame({'crop_perc': x_all.squeeze()}) crops[["lon", "lat"]] = points.tolist() plt.figure(figsize = (10,10)) plt.scatter(crops.lon, crops.lat, c=crops.crop_perc, s=22)Manually fix the vertical lines caused by the UTM zone delineationclass CustomDataset(Dataset): def __init__(self, points, fns, buffer=500): self.points = points self.fns = fns self.buffer = buffer def __len__(self): return self.points.shape[0] def __getitem__(self, idx): lon, lat = self.points[idx] fn = self.fns[idx] if lon >= 18.0 and lon < 24.0: ##### This fix does not scale well globally epsg = 32734 elif lon >= 24.0 and lon < 30.0: epsg = 32735 else: epsg = 32736 if fn is None: return None else: point_geom = shapely.geometry.mapping(shapely.geometry.Point(lon, lat)) with rasterio.Env(): with rasterio.open(fn, "r") as f: point_geom = rasterio.warp.transform_geom( "epsg:4326", epsg, ###### If I set this manually based on the if statement above it # f.crs.to_string(),###### solves the vertical gaps, but not the horizontal gaps point_geom ) point_shape = shapely.geometry.shape(point_geom) mask_shape = point_shape.buffer(self.buffer).envelope mask_geom = shapely.geometry.mapping(mask_shape) try: out_image, out_transform = rasterio.mask.mask( f, [mask_geom], crop=True ) except ValueError as e: if "Input shapes do not overlap raster." in str(e): return None out_image = out_image.squeeze() out_image = torch.from_numpy(out_image).float() return out_image # Create the dataset dataset = CustomDataset(points, matching_urls) dataloader = DataLoader( dataset, batch_size=8, shuffle=False, num_workers=os.cpu_count()*2, collate_fn=lambda x: x, pin_memory=False, ) # Calculate the percentage of cropped area around each 1 km region x_all = np.zeros((points.shape[0], 1), dtype=float) i = 0 for images in dataloader: for image in images: if type(image) == torch.Tensor: #### This was added because of the empty points crops = (image == crop_value).sum() / image.numel() x_all[i] = crops.item() else: x_all[i] = float("NaN") i += 1 crops = pd.DataFrame({'crop_perc': x_all.squeeze()}) crops[["lon", "lat"]] = points.tolist() plt.figure(figsize = (10,10)) plt.scatter(crops.lon, crops.lat, c=crops.crop_perc, s=22)/srv/conda/envs/notebook/lib/python3.8/site-packages/torch/utils/data/dataloader.py:478: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 4, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary. warnings.warn(_create_warning_msg((10분 지각 ppt로 수업하고 있었음 ) 1 산업문제 소개 - 박윤영 주택 용도별 차이점과 기준단독, 다가구, 다세대하고싶은 것 경기도 연면적 300 대지 250 건축연도 2010 도로조건 8 가격 6억원 이런 식으로 조건을 입력했을 떄 이것과 유사한 집들을 보여주는게 가능한지. 유사군 추출어떻게 집들 사이에 유사도를 정의할지. 2 데이터 전처리import pandas as pd import numpy as np import os import glob #파일 한꺼번에 부르기 위해서 사용 import re glob.glob('*.*') #현재 작업하고 있는 파일명 모두 glob.glob(os.path.join("gh-data","*.*")) filenames = glob.glob(os.path.join("gh-data","단독*.xlsx")) filenames re.split('([0-9]+)',filenames[1]) #숫자 연속된거까지 포함해서 숫자 있는 것에서 스플릿 filenames = sorted (filenames, key=lambda s: int(re.split('([0-9]+)',s)[1]),) filenames #잘 불러와지나 테스트 df0=pd.read_excel('gh-data/단독다가구(매매)_실거래가_ (1).xlsx',header = 16,encoding='utf-8').fillna(0) #header : 앞에 16줄 제외하려고. fillna(0)은 빈칸을 0으로 만들기 위해서 df0.head() len(df0) df0[['시군구','번지']] df_raw=pd.DataFrame() #빈 데이터 프레임 만들기 for f in filenames: data = pd.read_excel(f,header=16,encoding='utf-8').fillna(0) df_raw = df_raw.append(data) len(df_raw) df_raw.tail() df_raw.reset_index(inplace=True,drop=True) #인덱스 정리 df_raw.tail() df_raw.to_excel('gh-data/2018_raw_data_my.xlsx',index=None, encoding='utf-8') #파일 저장 glob.glob(os.path.join('gh-data','*.*')) df_raw=pd.read_excel('gh-data/2018_raw_data.xlsx',index_col=None,header=0,encoding='utf-8') #파일 불러오기 df=df_raw.copy() df.head() df.describe() #내용중 숫자로 된 데이터의 성질을 말해준다. 거래금액, 계약일이 안나오는 이유는 str으로 저장되어있어서 일 수 있음 df.dtypes #각각 column의 값들의 타잎 #연면적 : 층별로 다 바닥면적 합친 것/ 대지면적 : 토지면적을 수평에 정사영한 면적/ 토지면적 : 실제 땅의 면적 = 잔디 깔때 필요한 면적 df['거래금액(만원)']=df['거래금액(만원)'].str.replace(',','').astype(float) #,를 공배으로 바꾸고 float으로 형태 바꾸기 df.head(10) df['도로조건'].unique() #데이터 어떤 것들 들어가있는지 df['도로조건']=df['도로조건'].replace(['-','m미만','25m이상'],['0','','26'],regex=True).astype(float) df['도로조건'].unique() s=df['계약년월'].astype(str) df['연도']=s.str[0:4].astype(int) #없는 칼럼도 이렇게 바로 만들 수 있음 df['월']=s.str[-2:].astype(int) # del df['계약년월'] df.head() #여기에 오류나 있는 이유는 한번 돌아간게 다시 돌리는 바람에 '계약년월'칼럼이 없기 때문에 del df['번지'] del df['계약일'] # del df['도로명'] df.head()지도 정보 데이터 파일은 오픈소스. 서울지역, 전국지역 각각 있다.#지도 데이터 파일과 형식 맞춰주기 위한 과정 : 지역_ID.txt SiDo = [] SiGunGu = [] DongLi = [] for i in range(0,len(df['시군구'])): tmp = df['시군구'][i].split() # 띄어쓰기로 split SiDo.append(tmp[0]) # 시/도 SiGunGu.append(tmp[1]) # 시/군/구 if len(tmp) > 3: DongLi.append(tmp[-1]+'('+tmp[2]+')') else: DongLi.append(tmp[-1]) df['SiDo'] = SiDo df['SiGunGu'] = SiGunGu df['DongLi'] = DongLi def cut_char_sigu(name): return name if len(name)==2 else name[:-1] sido_candi = df['SiDo'] #파일에서 특별시에는 '관악' 이런식이 아니라 '서울 관악' 이렇게 되어있기 떄문에 맞춰주려는 것 sido_candi = [name[:2] if name[:2] in ['서울','부산','대구','광주','인천','대전','울산','세종'] else '' for name in sido_candi] sigun_candi = ['']*len(df) for n in df.index: # print(n) each = df['SiGunGu'][n] if each[:2] in ['수원', '성남','안양','안산','고양','용인','청주','천안','전주','포항','창원']: #여기도 '분당' 아니라 '성남 분당' 이렇게 되어있는 곳들 sigun_candi[n] = each[:2] + ' ' + each[2:-1] #re.split('시', each)[0]+' '+ cut_char_sigu(re.split('시', each)[1]) else: sigun_candi[n] = cut_char_sigu(each) ID_candi = [sido_candi[n]+' '+sigun_candi[n] for n in range(0,len(sigun_candi))] ID_candi = [name[1:] if name[0]==' ' else name for name in ID_candi] ID_candi = [name[:2] if name[:2]=='세종' else name for name in ID_candi] df['ID'] = ID_candi df.head() #박사님이 빠트린 부분 있다고 내일 다른 박사님이 얘기해줄거라고 함 df.to_excel('gh-data/2018_modified_data_my.xlsx',index=None,encoding='utf-8') house_data=pd.read_excel('gh-data/2018_modified_data.xlsx', index_col=None, header=0,encoding='utf-8') house_data.head()zillow.com 이라는 해외 사이트가 있는데. 사는건지 렌탈인건지 금액 방개수 설정해서 집 타입 화장실 개수 등등 조건 넣어서 조건에 해당되는 집 추천해주고 있다.정보가 꽤나 다양히 있다. 네이버 부동산에는 꽤나 정보가 다양하게 있긴 함. 직방은 한 세대 단위로만 매매하기 떄문에 건물 매매는 안나와있다. 건물안에 있었던 매매 정보라든지 학교정보 쭉 나와있다.서울 열린 데이터 광장, 등등 추천. 조금 쉬었다가 끝내겠따. Data cleaning and Visualizationimport seaborn as sns from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt %matplotlib inline del house_data['시군구'] import matplotlib as mpl mpl.rc('font',family='Malgun Gothic') house_type=house_data['주택유형'].unique() num1=house_data['주택유형'][house_data['주택유형']=='단독'].count() num2=house_data['주택유형'][house_data['주택유형']=='다가구'].count() fig_pie=plt.figure(figsize=(8,8)) num_type=[num1,num2] plt.rcParams['text.color']='Navy' a, b=[plt.cm.Blues, plt.cm.Reds] plt.pie(num_type, labels=house_type, #그림은 여기줄만 해도 된다고 함 colors=[a(0.6),b(0.6)], autopct='%.2f', #자동으로 퍼센트 계산해서 해주라는 것 wedgeprops={'linewidth':7,'edgecolor':'white'}, textprops={'fontsize':18}) plt.show()NOTE:-----Please run the below cells first before proceeding- you'll need them soon!%load_ext sql %sql sqlite:// %%sql DROP TABLE IF EXISTS Movies; CREATE TABLE Movies(title VARCHAR(50), year INT, director VARCHAR(50), length INT); INSERT INTO Movies VALUES('Database Wars', 1967, '', 123); INSERT INTO Movies VALUES('The Databaser', 1992, '', 190); INSERT INTO Movies VALUES('Database Wars', 1998, '', 176); %sql DROP TABLE IF EXISTS A; DROP TABLE IF EXISTS B; %sql CREATE TABLE A (x int, y int); CREATE TABLE B (x int, y int); for i in range(1,6): %sql INSERT INTO A VALUES (:i, :i+1) for i in range(1,11,3): %sql INSERT INTO B VALUES (:i, :i+2)Done. Done. Done. Done. 1 rows affected. 1 rows affected. 1 rows affected. 1 rows affected. 1 rows affected. 1 rows affected. 1 rows affected. 1 rows affected. 1 rows affected.Activity 3-1:------------ORDER BY semantics, set operators & nested queries%sql SELECT * FROM moviesDone.Exercise 1-----------**Can you write the movie query from lecture as a single SFW query?**Recall that we are trying to find **all movie titles that were used for more than one movie.** You may assume that no two movies in the same year have the same title. Our schema for the `movies` table is:> * title STRING> * year INT> * director STRING> * length INTLet's try to write the nested query that solves this from lecture:%%sql SELECT m.title FROM Movies m WHERE m.year <> ANY(SELECT year FROM Movie WHERE title = m.title);(sqlite3.OperationalError) near "SELECT": syntax error [SQL: u'SELECT m.title \nFROM Movies m\nWHERE m.year <> ANY(SELECT year FROM Movie WHERE title = m.title);']What? This doesn't work? Why?**ANY doesn't exist in SQLite!** Can we do this query without nesting? Write your query here:%%sql SELECT DISTINCT a.title FROM Movies a, Movies b WHERE a.title = b.title AND a.year <> b.year;Done.Exercise 2--------------------Consider the two relations $A$ and $B$ below:%sql SELECT * FROM A; %sql SELECT * FROM B;Done.Assuming no duplicates, can you write an `INTERSECT` query, **just over the $x$ attribute**, without using `INTERSECT` OR nested queries? Write your query here:%%sql SELECT A.x FROM A, B Where A.x = B.xDone.What is this operation called?Next, using set operators again as well, can you return all the _full_ tuples in $A$ and $B$ that overlap in $x$ attributes? Write your query here:%%sql SELECT x, y FROM ( SELECT A.x, A.y FROM A, B WHERE A.x = B.x UNION SELECT B.x, B.y FROM A, B WHERE A.x = B.x );Done.Proving that Greater Metro Sydney was a Coronavirus Hotspot via Binomial Regression and Bayesian inference of NSW Covid test dataThis notebook explores the NSW Covid 19 test data to look for patterns ... blah blahURL: https://data.nsw.gov.au/data/dataset/5424aa3b-550d-4637-ae50-7f458ce327f4/resource/227f6b65-025c-482c-9f22-a25cf1b8594f/download/covid-19-tests-by-date-and-location-and-result.csvThe data are categorised by date and postcode, local health district, local government area, and result. Updated daily. Fetched on Thu 15 Oct 2020 at 5:00pm.A surge on a given day does not indicate a surge in tests for that day. Test data are updated in batches after a weekend as normal work hours commence, and also as laboratories gain new testing capacity.The NSW government provides assurances that the data are anonymised and no single person can be identified from the dataset. Understanding NSW districts![image of nsw regional districts](files/nsw.jpg)The above shows all the regional districts of NSW as they appear in a geographic space.![image of nsw metro districts](files/metro.jpg)The above shows the Metropolitan districts of NSW.What we learn from this is that places like South Eastern Sydney, Sydney, and Northern Sydney are the closest to the airport, whereas regional NSW is far away from population centres.According to https://profile.id.com.au the following is true about regional vs. metro NSW:- The 2019 Estimated Resident Population for Regional NSW is 2,777,654, with a population density of 0.04 persons per hectare.- The 2019 Estimated Resident Population for Greater Sydney is 5,312,163, with a population density of 4.29 persons per hectare.Woah, that's a pretty big difference!We hypothesise that given metro NSW has a higher population density, and its closeness to the airport, there will be a higher positive test rate when compared to regional NSW. We don't know whether positive test rate is directly related to population density, so we do not expect a 100-fold difference exactly.In order to prove this, we will compare Far West NSW, the Nepean Blue Mountains, and South Eastern Sydney. We'll first assess the binomial distribution to validate positive test cases, and then we'll use naive Bayes algorithms to determine which region a positive test will most likely originate from.First, it's time to load in our data.# load the relevant libraries import pandas as pd import numpy as np import seaborn as sns from scipy.stats import binom import matplotlib.pyplot as plt from statistics import median from math import factorial from statistics import mean def n_choose_m(n, m): return factorial(n)/(factorial(n-m)*factorial(m)) # load in the dataset # make relevant adjustments to the datatype after inspecting in excel df = pd.read_csv( 'covid-19-tests-by-date-and-location-and-result.csv', #dtype={'postcode': 'Int64', 'lga_code19': 'Int64'}, parse_dates=['test_date'] ) df[['postcode', 'lga_code19']] = df[['postcode', 'lga_code19']].fillna(0) df[['postcode', 'lga_code19']] = df[['postcode', 'lga_code19']].apply(pd.to_numeric, downcast='integer') print(set(df.lhd_2010_name)) df['region'] = df.lhd_2010_name.fillna('').apply(lambda x: 'regional' if x in [ 'Murrumbidgee', 'Far West', 'Western NSW', 'Hunter New England', 'Northern NSW', 'Mid North Coast', 'Illawarra Shoalhaven', 'Southern NSW', 'Network with Vic' ] else 'metro' if x != '' else np.nan) # inspect the data visually df.head() df.tail()The data appear normal, but it looks like there are sections where the data are completely anonymised. In this case we will assign a new flag in a new column: `anon`.Note that anonymised data are not people choosing to remain anonymous; rather this is a technique known as differential privacy.df['anon'] = np.where(df.postcode == 0, True, False) df.head() df.tail()The binomial distributionCurrent knowledge is that Melbourne and Sydney have had the most positive confirmed cases. We will find the probability of whether a person is likely to test positive based on the region in which they live in NSW, then use the binomial distribution to determine the likelihood that probability is indicative of the realworld; i.e. true.We do this because we need to ensure that the testing methodology is accurate before we can make any inferences from Bayes' theorem.df.groupby('lhd_2010_name')['result'].value_counts() df.groupby('region')['result'].value_counts()This gives us an idea of the values to expect, but we're interested in the probability. In any case, the probability is the total positive results divided by total tests, for each region. We wish to know how likely it is that the following probabilities are true.We'll compute this for our three selected sample regions, and then the aggregate regions (metro & regional).probs = df.result.eq('Case - Confirmed').groupby(df['lhd_2010_name']).mean() print(probs) probs = df.result.eq('Case - Confirmed').groupby(df['region']).mean() print(probs)region metro 0.006486 regional 0.004131 Name: result, dtype: float64We have now ascertained the probability that you will test positive in NSW based on the region that you test in. Eyeballing it, if you tested in South Eastern Sydney, you were most likely to test positive, but if you tested in Far West you were least likely to test positive. Hypothetically, this is an indication that the virus, at least as of today, has not penetrated the Far West of NSW as much as other regions *or* that the Far West of NSW has suboptimal conditions for viral spread.For each region, we assert that `n` is the total number of tests, and `p` is the probability of success in each trial. We are interested in finding the value of `n`, given `k` positive outcomes, for each region.What can we say about the probability of these probabilities?sns.distplot(probs, kde=False, color='skyblue')C:\ProgramData\Anaconda3\lib\site-packages\scipy\stats\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result. return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumvalLet's first take Far West, where `n` is equal to `1388` and `p` is equal to `2/1388`, or `0.001441`. Let's substitute these into our binomial function and see what it looks like.fig, ax = plt.subplots(1,1) n, p = 1388, 0.001441 mean, var, skew, kurt = binom.stats(n, p, moments='mvsk') x = np.arange( binom.ppf(0.01, n, p), binom.ppf(0.99, n, p)) ax.plot(x, binom.pmf(x, n, p), 'bo', ms=8, label='binom pmf for far west') ax.vlines(x, 0, binom.pmf(x, n, p), colors='r', lw=5, alpha=.5)We do not have a lot of data points, but what we see is that we should expect 1 or 2 out of our total sample to test positive, which lines up with our empirical data.Let's run a quick `np.allclose` check to see if we are accurate. This function checks to see that the cumulative density function is equal to the percentage point function in element-wise order. Essentially: we construct a confidence interval and assert that it is satisfactory. If satisfactory, assert that the result is accurate.prob = binom.cdf(x, n, p) np.allclose(x, binom.ppf(prob, n, p)) print('median:', median(x)) print('discrete probability:', max(binom.pmf(x, n, p)))median: 2.5 discrete probability: 0.2708657608253548Success! There is a 27% chance that 2.5 people will test positive, which is very close to our real world results of 2 positive tests.We'll use a mathematical prove just to demonstrate that the function above is doing what we expect.Recall the binomial function:\begin{equation*}P(E) = {n \choose k} p^k (1-p)^{ n-k}\end{equation*}Substituting in our values:\begin{equation*}P(E) = {1388 \choose 2} 0.001441^2 (1-0.001441)^{ 1388-2}\end{equation*}Thus manually calculating this results in:\begin{equation*}P(E) = 962578 \cdot 2.076481e^{-06} \times 0.1355158 = 0.27086\end{equation*}Let's check for South Eastern Sydney, which we know has a much larger dataset...fig, ax = plt.subplots(1,1) n, p = 72758, 0.009429 mean, var, skew, kurt = binom.stats(n, p, moments='mvsk') x = np.arange( binom.ppf(0.01, n, p), binom.ppf(0.99, n, p)) ax.plot(x, binom.pmf(x, n, p), 'bo', ms=8, label='binom pmf for far west') ax.vlines(x, 0, binom.pmf(x, n, p), colors='r', lw=5, alpha=.5) prob = binom.cdf(x, n, p) np.allclose(x, binom.ppf(prob, n, p)) print('median:', median(x)) print('discrete probability:', max(binom.pmf(x, n, p)))median: 686.0 discrete probability: 0.015302131865897095Also success! And we get a much prettier graph.We know that 686 positive tests were recorded in the real world for GES, so this is promising. Though the probability is smaller, since there is a much larger number of tests.Still, we can be confident this is accurate. Last, let's check for the Nepean Blue Mountains.fig, ax = plt.subplots(1,1) n, p = 31901+178, 0.005549 mean, var, skew, kurt = binom.stats(n, p, moments='mvsk') x = np.arange( binom.ppf(0.01, n, p), binom.ppf(0.99, n, p)) ax.plot(x, binom.pmf(x, n, p), 'bo', ms=8, label='binom pmf for far west') ax.vlines(x, 0, binom.pmf(x, n, p), colors='r', lw=5, alpha=.5) prob = binom.cdf(x, n, p) np.allclose(x, binom.ppf(prob, n, p)) print('median:', median(x)) print('discrete probability:', max(binom.pmf(x, n, p)))median: 178.5 discrete probability: 0.029971256440868704Yahoo! Regardless of whether the contact tracing program is good or not, we can assert from this result that the testing program works as we would expect. The median here is 178.5 (and we had 178 in the real world) and the probability is pretty much 3%, which is the highest of the distribution. Simulating the binomial distributionJust to close this section, instead of a mathematical proof, let's simulate for the Far West region. We expect a median of 178 with a probability of... you guessed it, 27.0865%.We would simulate for Nepean Blue Mountains but we don't have capable computer hardware to do it in a reasonable amount of time.And since we have a much smaller set of tests we can simulate many, many more times.result = [] for i in range(750000): test = np.random.choice([True, False], 1388, p=[.001441, 1-.001441]) result.append(np.sum(test)) test = pd.DataFrame(result, columns=['positives']) grouped = test.groupby('positives')['positives'].value_counts() groupedLooks good so far! Now let's get the means...grouped / grouped.sum() * 100It's pretty dang close! Interesting enough, the probability that you'll get 13 positive tests in Far West NSW is practically 0!Let's plot this like we did before, by computing the binomial distribution, just to check it out. Inference via Naive Bayesian theoryBayes' theorem states that the probability of A being true, given B, is the function of the probability of B given A by the probability of A, divided by the probability of B. Recall Bayes' theorem:\begin{equation*}P(A|B) = \frac{{P(B|A)}.{P(A)}}{P(B)}\end{equation*}We wish to find the probability that a person is from regional NSW (A) given that they have tested positive for the coronavirus (B). We will compute the probability that a person tests positive (B), being in NSW, and subtract this from 1 to find the probability the person who tested positive is from regional NSW.Naive Bayes stipulates that all possible explanatory variables (Bs) are independent of each other. We will work solely on the assumption that our B is independent of all other explanatory variables. Essentially what we want to predict is whether a given positive test case comes from Regional or Metro NSW. Substituting into Bayes theorem:\begin{equation*}P(metro|pos) = \frac{{P(pos|metro)}.{P(pos\_metro \times metro)}}{P(pos\_non\_metro \times non\_metro) + P(pos\_metro \times metro)}\end{equation*}and\begin{equation*}P(regional|pos) = 1 - P(metro|pos)\end{equation*}First, we need to know the global positive test rate for the general population of NSW.df.groupby('result')['result'].value_counts() df.groupby('region')['result'].value_counts() pos_rate = 3117/535933 print('Positive test rate:', pos_rate) neg_rate = 1-pos_rate print('Negative test rate:', neg_rate) p_non_metro = 2777654/(5312163+2777654) print('Probability lives in regional NSW:', p_non_metro) p_metro = 1-p_non_metro print('Probability lives in metro NSW:', p_metro) p_pos_non_metro = 647/155971 print('Positive test rate regional NSW', p_pos_non_metro) p_pos_metro = 2326/356298 print('Positive test rate metro NSW', p_pos_metro) p_neg_metro = 1-p_pos_metro print('Negative test rate metro NSW', p_neg_metro) numerator = p_pos_metro * p_metro denominator = p_pos_metro * p_metro + p_pos_non_metro * p_non_metro probability = numerator / denominator probability > pos_rate print(numerator, denominator, probability) print(1-probability)0.24939265647148867Quiz 3For Penn State student, access quiz [here](https://psu.instructure.com/courses/2177217)import ipywidgets as widgetsQuestion 1Is $f(x)=e^x$  a convex function?  ```{dropdown} Show answerAnswer: Yes``` Question 2Consider the uniform distribution $\mathcal X$ on $[-a,a]$ for some number $a>0$. What are the expectation and variance of $\mathcal X$ ```{dropdown} Show answerAnswer:$0, \frac{a^2}{3}.$``` Question 3Suppose you flip a fair icon 3 times. Let $\chi$ be the number of heads. Calculate the expectation of $\chi ^2 $ ```{dropdown} Show answerAnswer: 3``` Question 4Consider the function $f(x,y,z)=yz+e^{xyz}$. At the point $ \begin{pmatrix} x\\ y\\ z \end{pmatrix} = \begin{pmatrix} 0\\ 1\\ 2 \end{pmatrix}$find the direction along which the function decreases most rapidly. ```{dropdown} Show answerAnswer: $\begin{pmatrix} -2\\-2\\-1\end{pmatrix}$``` Question 5Consider $f(x,y)=2x^2+2y^2.$ Given initial guess $ \begin{pmatrix} x^0\\ y^0 \end{pmatrix} = \begin{pmatrix} 2\\ 3 \end{pmatrix}$$\eta =1/8$ compute two steps of the gradient  descent method for $f(x,y)$ ```{dropdown} Show answerAnswer: $ \begin{pmatrix} x^2\\ y^2 \end{pmatrix} = \begin{pmatrix} \frac {1}{2}\\ \frac {3}{4} \end{pmatrix}$``` Question 6What is output of the following code?class test:         def _ _init_ _(self, a):                self.a=a         def display(self):                print(self.a) obj = test() obj.display()```{dropdown} Show answerAnswer: Error as one argument is required while creating the object``` Question 7If we use "import Course'' in Python, what is "Course"? ```{dropdown} Show answer Answer: A module``` Question 8What is the output of the following code:print('{}\n/{}'.format(1,2))```{dropdown} Show answerAnswer: 1/2``` Question 9How to define stochastic gradient descent method with learing rate=1 after:import torch.optim import torch.nn as nn my_model=nn.Linear(784,10)Titanic Data Analytics Project - Kaggle Beichuan( 2017.01.10 In this Kaggle challenge, I need to complete the analysis of what sorts of people were likely to survive. Data Dictionary (Variable Definition Key)* survival Survival 0 = No, 1 = Yes* pclass Ticket class 1 = 1st, 2 = 2nd, 3 = 3rd* sex Sex * Age Age in years * sibsp of siblings / spouses aboard the Titanic * parch of parents / children aboard the Titanic * ticket Ticket number * fare Passenger fare * cabin Cabin number * embarked Port of Embarkation C = Cherbourg, Q = Queenstown, S = Southampton Variable Notes pclass: A proxy for socio-economic status (SES)* 1st = Upper* 2nd = Middle* 3rd = Lowerage: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5sibsp: The dataset defines family relations in this way...* Sibling = brother, sister, stepbrother, stepsister* Spouse = husband, wife (mistresses and fiancés were ignored)parch: The dataset defines family relations in this way...* Parent = mother, father* Child = daughter, son, stepdaughter, stepson* Some children travelled only with a nanny, therefore parch=0 for them. Load and Browse Dataimport pandas as pd import numpy as np import seaborn as sns td = pd.read_csv('data/train.csv') td.head(5) td.info() RangeIndex: 891 entries, 0 to 890 Data columns (total 12 columns): PassengerId 891 non-null int64 Survived 891 non-null int64 Pclass 891 non-null int64 Name 891 non-null object Sex 891 non-null object Age 714 non-null float64 SibSp 891 non-null int64 Parch 891 non-null int64 Ticket 891 non-null object Fare 891 non-null float64 Cabin 204 non-null object Embarked 889 non-null object dtypes: float64(2), int64(5), object(5) memory usage: 83.6+ KBEDA - Numeric Variablesnuvar = ['Age','SibSp','Parch','Fare']AgeFrom the univariate distribution plot, most of the people are between 20ish to 40ishtd['Age'].value_counts() age = td['Age'].dropna() # do it since seaborn can't create plot with NaN in the variable sns.distplot(age)Fill in Missing Values in Ageage_1 = td['Age'].interpolate(method='linear') age_2 = td['Age'].interpolate(method='pchip') age_3 = td['Age'].interpolate(method='cubic') #sns.distplot(age_1) sns.distplot(age_2) #sns.distplot(age_2)The three methods get similar results. Source: https://pandas.pydata.org/pandas-docs/stable/missing_data.htmltd['Age_all'] = td['Age']. interpolate(method='pchip')SibSpAbout 3/4 of the people has no sibling or spouse, about 1/4 has one sibling or spouse.td['SibSp'].value_counts() sns.distplot(td['SibSp'])ParchAbout 2/3 of the people has no parents/children, this includs some children traveling with only nanny. And about 1/3 of the people has one to two parents or children.td['Parch'].value_counts() sns.distplot(td['Parch'])FareMost of the people have fare of around 10 (assuming the unit is dollar here).td['Fare'].value_counts() sns.distplot(td['Fare'])Pairwise relationshipnuvar nuvar[0] = 'Age_all' # replace age with age_all because seaborn.pairplot() doesn't work well with NaN nuvar nuvar_df = td[nuvar] sns.pairplot(nuvar_df)Correlation MatrixThere no strongly related numerical variables except for age and age_all, which is the same feature with and without missing value.corr = td.corr() sns.heatmap(corr)EDA - Categorical Variablelist(td) catvar = ['Survived','Pclass','Name', 'Sex', 'Ticket', 'Cabin', 'Embarked']Survivedtd['Survived'].value_counts()Pclasstd['Pclass'].value_counts() td['Pclass'] = td['Pclass'].astype('category') # Convert Pclass from int type to categorydifference between object and category type?td['Pclass'].dtype sns.barplot(x="Pclass", y="Survived", data=td);Sextd['Sex'].value_counts() sns.barplot(x='Sex', y='Survived', data=td)Tickettd['Ticket'].value_counts()Cabintd['Cabin'].value_counts() cabinLetter = ['A','B','C','D','E','F','G'] num = [] for i in cabinLetter: num.append(len(td[td['Cabin'].str.contains(i, na = False)])) numI guess the letter in the cabin number indicates the location of the seats. So I wanted to see how many people are in each cabin letter. But the Cabin variable has too many missing values to use. Embarkedtd['Embarked'].value_counts() td['Embarked'] = td['Embarked'].fillna('S') # Use the mode to fill the missing values sns.barplot(x='Embarked', y='Survived', data=td)how to select catigorical variables? what stats test to use? Define Features and TargetY = td['Survived'].values list(td) feature_list = [ # 'PassengerId', # 'Survived', 'Pclass', # 'Name', 'Sex', # 'Age', 'SibSp', 'Parch', # 'Ticket', 'Fare', # 'Cabin', 'Embarked', 'Age_all'] td[feature_list].info() # no missing values RangeIndex: 891 entries, 0 to 890 Data columns (total 7 columns): Pclass 891 non-null category Sex 891 non-null object SibSp 891 non-null int64 Parch 891 non-null int64 Fare 891 non-null float64 Embarked 891 non-null object Age_all 891 non-null float64 dtypes: category(1), float64(2), int64(2), object(2) memory usage: 42.8+ KBOne-hot EncodingX = pd.get_dummies(td[feature_list]) X feature_list_dummy = list(X) feature_list_dummyStandardize Features for Logistics Regressionfrom sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_scaled = scaler.fit_transform(X) X_scaled.shapeTrain Test Splitfrom sklearn.cross_validation import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.2)Modeling - Logistics Regression Training modelfrom sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X_train, Y_train)Predict on training datatrain_predict = lr.predict(X_train)Predict on testing datatest_predict = lr.predict(X_test)Model performance & Interpretation - Logistics RegressionThe performance is not bad.reference: http://scikit-learn.org/stable/modules/classes.htmlmodule-sklearn.metricsfrom sklearn.metrics import precision_score, accuracy_score, recall_score, f1_score, roc_auc_score def print_results(y_true, y_pred): print("Accuracy of the Logistic Regression is: {}".format(accuracy_score(y_true, y_pred))) print("Precision of the Logistic Regression is: {}".format(precision_score(y_true, y_pred))) print("Recall of the Logistic Regression is: {}".format(recall_score(y_true, y_pred))) print("f1-score of the Logistic Regression is: {}".format(f1_score(y_true, y_pred))) print("Area Under Curve (AUC) of the Logistic Regression is: {}".format(roc_auc_score(y_true, y_pred))) print("Training set scores:") print_results(Y_train, train_predict) print("Testing set scores:") print_results(Y_test, test_predict) from sklearn.metrics import roc_curve #reference: http://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html#sklearn.metrics.roc_curve Y_test_pred_proba = lr.predict_proba(X_test)[:,1] FalsePositiveRate, TruePositiveRate, thresholds = roc_curve(Y_test, Y_test_pred_proba) import matplotlib.pyplot as plt % matplotlib inline plt.style.use("ggplot") # plot TPR against FPR plt.plot(FalsePositiveRate, TruePositiveRate, color='red') # plot 45 degree line xx = np.linspace(0, 1.0, 20) plt.plot(xx, xx, color='blue') plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC") plt.show()Understand Estimated CoefficientsSince I standardized the variables, so estimated coefficients can reveal which variable has bigger predicting power.From the result, female are more likely to survive, and people in class 1 are more likely to survive. Also, the higher people paid as fare, the more likely they are to survive. is my interpretation fair???#lr.coef_.flatten() #reference: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.flatten.html #list(zip(feature_list_dummy, lr.coef_)) df_coeffs = pd.DataFrame(list(zip(feature_list_dummy, lr.coef_.flatten()))).sort_values(by=[1], ascending=False) df_coeffs.columns = ['feature', 'coeff'] df_coeffs ModelResources- Errors & Exceptions in Python: https://docs.python.org/3/tutorial/errors.html- Built-In Excpetions in Python: https://docs.python.org/2/library/exceptions.html- Numpy Testing: http://docs.scipy.org/doc/numpy/reference/routines.testing.html Assert statements Example: Assert that a specific version of Python is being useddef check_python_version(): print 'Python version:\n', sys.version assert sys.version_info < (3,0) check_python_version() def improved_check_python_version(): print 'Python version:\n', sys.version try: assert sys.version_info < (3,0) except: raise AssertionError('Incompatible version of Python: use Python version < 3.0') improved_check_python_version()Raising errors- Anticipate and catch errors- Raise a more informative error than the default error Example: Division in Python versions < 3.0def test_type_float(var): if not isinstance(var, float): raise TypeError('Expected input type == float') f = 1. # Since the input type is a float, no error is raised. test_type_float(f) i = 1 # Since the input type is a list, an error is raised. test_type_float(i) def incorrect_divide_by_two(var): return var / 2 print incorrect_divide_by_two(f) print incorrect_divide_by_two(i) def correct_divide_by_two(var): ''' Divides input by two. INPUT var : float ''' test_type_float(var) return var / 2 correct_divide_by_two? print correct_divide_by_two(f) print correct_divide_by_two(i)Raising warnings Example: Division in Python versions < 3.0def divide_by_two(var): if isinstance(var, int): warnings.warn('Performing floor division. Input type == int', Warning) return var / 2 divide_by_two(1) divide_by_two(np.array([1])) def divide_by_two(var): if isinstance(var, int): warnings.warn('Performing floor division. Input type == int', Warning) if isinstance(var, np.ndarray): if var.dtype == int: warnings.warn('Performing floor division. numpy.dtype == int', Warning) return var / 2 divide_by_two(np.array([1]))Model test 1input_layer = Input(shape=(7, 1, 4)) conv_1 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(input_layer) bn_1 = BatchNormalization()(conv_1) conv_2 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(bn_1) drop_1 = Dropout(0.25)(conv_2) flatten = Flatten()(drop_1) predictions = Dense(4, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 200 model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate))_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_4 (InputLayer) (None, 7, 1, 4) 0 _________________________________________________________________ conv2d_6 (Conv2D) (None, 7, 1, 10) 370 _________________________________________________________________ batch_normalization_1 (Batch (None, 7, 1, 10) 40 _________________________________________________________________ conv2d_7 (Conv2D) (None, 7, 1, 10) 910 _________________________________________________________________ dropout_2 (Dropout) (None, 7, 1, 10) 0 _________________________________________________________________ flatten_4 (Flatten) (None, 70) 0 _________________________________________________________________ dense_4 (D[...]Bitwisex=0x0a y=0x0f z = x & y print(f"(hex) x is {x:02x} , y is {y:02x} and z is {z:02x}") print(f"(deciman) x is {x:08b} , y is {y:08b} and z is {z:08b}")(hex) x is 0a , y is 0f and z is 0a (deciman) x is 00001010 , y is 00001111 and z is 00001010Booleanand ornotin not in is not is Strings are immutable. They have the same ID, optimization. They are indeed, the same object.for i in range()Integrating New FERC Form 1 and EIA Data ReleasesThis notebook generates lists of new plants and utilities that need to be assigned PUDL IDs. It helps with the process of integrating new data each fall when the agencies make their new annual release for the previous year. Prerequisites:* All available EIA 860/923 years must be loaded into your PUDL DB.* This includes the **new** year of data to be integrated.* This means the spreadsheet tab maps need to be updated.* Some minor EIA data wrangling may also be required.* All years of FERC Form 1 data must be loaded into your FERC 1 DB.* This includes the **new** year of data to be integrated. Outputs:* `unmapped_utilities_ferc1.csv`: Respondent IDs and respondent names of utilities which appear in the FERC Form 1 DB, but which do **not** appear in the PUDL ID mapping spreadsheet.* `unmapped_plants_ferc1.csv`: Plant names, respondent names, and respondent IDs associated with plants that appear in the FERC Form 1 DB, but which do **not** appear in the PUDL ID Mapping spreadsheet.* `unmapped_utilities_eia.csv`: EIA Utility IDs and names of utilities which appear in the PUDL DB, but which do **not** appear in the PUDL ID mapping spreadsheet.* `unmapped_plants_eia.csv`: EIA Plant IDs and Plant Names of plants which appear in the PUDL DB, but which do **not** appear in the PUDL ID mapping spreadsheet. The Utility ID and Name for the primary plant operator, as well as the aggregate plant capacity and the state the plant is located in are also proved to aid in PUDL ID mapping.* `lost_utilities_eia.csv`: The Utility IDs and Names of utilities which appear in the PUDL ID mapping spreadsheet but which do **not** appear in the PUDL DB. Likely because EIA revised previous years of data, and removed those utilities, after we had mapped them.* `lost_plants_eia.csv`: The Plant IDs and Names of plants which appear in the PUDL ID mapping spreadsheet but which do **not** appear in the PUDL DB. Likely because EIA revised previous years of data, and removed those plants, after we had mapped them.%load_ext autoreload %autoreload 2 import sqlalchemy as sa import pandas as pd import pudl import re from pathlib import Path pudl_settings = pudl.workspace.setup.get_defaults() pudl_settingsSetup:* Create FERC1/PUDL database connections* Set the scope of the FERC Form 1 search (which years to check)ferc1_engine = sa.create_engine(pudl_settings["ferc1_db"]) pudl_engine = sa.create_engine(pudl_settings["pudl_db"]) ferc1_years = pudl.constants.data_years["ferc1"] print("Searching for new FERC 1 plants, utilities and strings in the following years:") print(ferc1_years) with pudl_engine.connect() as conn: eia_years = pd.read_sql("select distinct(report_date) from plants_eia860", conn) print(f"EIA Years in db: {(eia_years)}")EIA Years in db: report_date 0 2019-01-01 1 2018-01-01 2 2017-01-01 3 2016-01-01 4 2015-01-01 5 2014-01-01 6 2013-01-01 7 2012-01-01 8 2011-01-01 9 2010-01-01 10 2009-01-01 11 2008-01-01 12 2007-01-01 13 2006-01-01 14 2005-01-01 15 2004-01-01 16 2003-01-01 17 2002-01-01 18 2001-01-01Unmapped FERC Form 1 Plantsunmapped_plants_ferc1 = pudl.glue.ferc1_eia.get_unmapped_plants_ferc1(pudl_settings, years=ferc1_years) n_ferc1_unmapped_plants = len(unmapped_plants_ferc1) print(f"{n_ferc1_unmapped_plants} unmapped FERC 1 plants found in {min(ferc1_years)}-{max(ferc1_years)}.") outfile = Path("unmapped_plants_ferc1.csv") print(f"Writing {n_ferc1_unmapped_plants} out to {outfile}") unmapped_plants_ferc1.to_csv(outfile, index=False) unmapped_plants_ferc10 unmapped FERC 1 plants found in 1994-2019. Writing 0 out to unmapped_plants_ferc1.csvUnmapped FERC Form 1 Utilities / Respondents* **Note:** Frequently there are zero of these.unmapped_utils_ferc1 = pudl.glue.ferc1_eia.get_unmapped_utils_ferc1(ferc1_engine) n_ferc1_unmapped_utils = len(unmapped_utils_ferc1) print(f"{n_ferc1_unmapped_utils} unmapped FERC 1 utilities found in {min(ferc1_years)}-{max(ferc1_years)}.") outfile = Path("unmapped_utilities_ferc1.csv") print(f"Writing {n_ferc1_unmapped_utils} out to {outfile}") unmapped_utils_ferc1.to_csv(outfile, index=False) unmapped_utils_ferc10 unmapped FERC 1 utilities found in 1994-2019. Writing 0 out to unmapped_utilities_ferc1.csvUnmapped EIA Plants* **Note:** Some unmapped EIA plants do not have Utilities associated with them.* Many of these plants are too small to warrant mapping, and so capacity is included as a potential filter.* Also note that the first and last few plants in the output dataframe have a bunch of NA values... which can be confusing.unmapped_plants_eia = pudl.glue.ferc1_eia.get_unmapped_plants_eia(pudl_engine) print(f"Found {len(unmapped_plants_eia)} unmapped EIA plants.") outfile = Path("unmapped_plants_eia.csv") unmapped_plants_eia.to_csv(outfile) unmapped_plants_eiaFound 253 unmapped EIA plants.Lost EIA Plants* There shouldn't be very many of these... if it's more than a few hundred (out of the ~10,000 EIA plants) then something may be wrong.lost_plants_eia = pudl.glue.ferc1_eia.get_lost_plants_eia(pudl_engine) print(f"Found {len(lost_plants_eia)} lost EIA plants.") outfile = Path("lost_plants_eia.csv") outfile.unlink(missing_ok=True) outfile.touch() lost_plants_eia.to_csv(outfile) lost_plants_eia.sample(min([10, len(lost_plants_eia)]))Found 3 lost EIA plants.Unmapped EIA Utilities* Especially with the advent of many small distributed generators, there are often just as many new utilities as there are new plants.unmapped_utils_eia = pudl.glue.ferc1_eia.get_unmapped_utils_eia(pudl_engine) print(f"Found {len(unmapped_utils_eia)} unmapped EIA utilities.") outfile = Path("all_unmapped_utilities_eia.csv") unmapped_utils_eia.to_csv(outfile) miss_utils = pudl.glue.ferc1_eia.get_unmapped_utils_with_plants_eia(pudl_engine) print(f"Found {len(miss_utils)} unmapped utilities with plants/ownership.") outfile = Path("planted_unmapped_utilities_eia.csv") miss_utils.to_csv(outfile) unmapped_utils_eia.head(10) miss_utils.head(10)Another Kind of Unmapped EIA Utilities* This cell looks *only* for the EIA utilities that show up somewhere in the EIA 923 data, but still don't have a `utility_id_pudl` value assigned to them.pudl_raw = pudl.output.pudltabl.PudlTabl(pudl_engine, freq=None) frc_eia923 = pudl_raw.frc_eia923() gf_eia923 = pudl_raw.gf_eia923() gen_eia923 = pudl_raw.gen_eia923() bf_eia923 = pudl_raw.bf_eia923() missing_frc = frc_eia923[frc_eia923.utility_id_pudl.isna()][["utility_id_eia", "utility_name_eia"]] missing_gf = gf_eia923[gf_eia923.utility_id_pudl.isna()][["utility_id_eia", "utility_name_eia"]] missing_bf = bf_eia923[bf_eia923.utility_id_pudl.isna()][["utility_id_eia", "utility_name_eia"]] missing_gens = gen_eia923[gen_eia923.utility_id_pudl.isna()][["utility_id_eia", "utility_name_eia"]] missing_utils = ( pd.concat([missing_frc, missing_bf, missing_gf, missing_gens]) .drop_duplicates(subset="utility_id_eia") .set_index("utility_id_eia") ) print(f"Found {len(missing_utils)} utilities with EIA 923 data but no PUDL Utility ID.") outfile = Path("dataful_unmapped_utilities_eia.csv") missing_utils.to_csv(outfile) missing_utils.sample(min(len(missing_utils), 10)) missing_utils.iloc[50:100]Lost EIA Utilities* Again, there shouldn't be **too** many of these. If it's thousands, not hundreds, dig deeper.lost_utils_eia = pudl.glue.ferc1_eia.get_lost_utils_eia(pudl_engine) print(f"Found {len(lost_utils_eia)} lost EIA utilities.") outfile = Path("lost_utilities_eia.csv") lost_utils_eia.to_csv(outfile)Cleaning other FERC Form 1 Plant Tables* There are several additional FERC Form 1 tables which contain plant data.* These include small plants, hydro, and pumped storage.* Thus far we have not done much concerted work cleaning up / categorizing these plants, though they do get PUDL IDs.* The following cell pulls the small plants (`f1_gnrt_plant`) table with some fields that would be useful for categorization.* This is just a prototype/outline/suggestion...small_plants_ferc1 = ( pd.read_sql( f"""SELECT f1_gnrt_plant.report_year,\ f1_gnrt_plant.respondent_id,\ f1_gnrt_plant.row_number,\ f1_gnrt_plant.spplmnt_num,\ f1_gnrt_plant.plant_name,\ f1_gnrt_plant.capacity_rating,\ f1_gnrt_plant.kind_of_fuel, \ f1_respondent_id.respondent_name\ FROM f1_gnrt_plant, f1_respondent_id \ WHERE report_year>={min(ferc1_years)} AND report_year<={max(ferc1_years)} AND f1_respondent_id.respondent_id=f1_gnrt_plant.respondent_id;""", ferc1_engine). assign(record_number=lambda x: x["row_number"] + 46*x["spplmnt_num"]). drop(["row_number", "spplmnt_num"], axis="columns"). pipe(pudl.helpers.simplify_strings, columns=["plant_name", "kind_of_fuel", "respondent_name"]). rename(columns={"capacity_rating": "capacity_mw"}). loc[:,["report_year", "respondent_id", "respondent_name", "record_number", "plant_name", "capacity_mw", "kind_of_fuel"]]. sort_values(["report_year", "respondent_id", "record_number"]) ) n_small_plants_ferc1 = len(small_plants_ferc1) outfile = Path("f1_gnrt_plant.csv") print(f"Writing {n_small_plants_ferc1} small plant records out to {outfile}") small_plants_ferc1.to_csv(outfile, index=False) small_plants_ferc1make modelmodel = tf.keras.models.Sequential() model.add(tf.keras.layers.Embedding(input_dim = 10000, output_dim = 24, input_length = 500)) # input layer model.add(tf.keras.layers.LSTM(24, return_sequences=True, activation='tanh')) model.add(tf.keras.layers.LSTM(12, activation='tanh')) # model.add(tf.keras.layers.Flatten()) # hidden layer model.add(tf.keras.layers.Dense(46, activation='softmax')) # output layer model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['acc']) # gadget # hist = model.fit(pad_x_train, y_train, epochs=5, validation_split=0.3, batch_size=128) hist = model.fit(pad_x_train, y_train, epochs=100, validation_split=0.3, batch_size=256)Epoch 1/100 25/25 [==============================] - 23s 765ms/step - loss: 3.7162 - acc: 0.2941 - val_loss: 3.4252 - val_acc: 0.3532 Epoch 2/100 25/25 [==============================] - 18s 735ms/step - loss: 3.2217 - acc: 0.3510 - val_loss: 2.9977 - val_acc: 0.3532 Epoch 3/100 25/25 [==============================] - 19s 750ms/step - loss: 2.8443 - acc: 0.3510 - val_loss: 2.6756 - val_acc: 0.3532 Epoch 4/100 25/25 [==============================] - 19s 750ms/step - loss: 2.6061 - acc: 0.3510 - val_loss: 2.5135 - val_acc: 0.3532 Epoch 5/100 25/25 [==============================] - 19s 747ms/step - loss: 2.5009 - acc: 0.3510 - val_loss: 2.4469 - val_acc: 0.3532 Epoch 6/100 25/25 [==============================] - 19s 747ms/step - loss: 2.4585 - acc: 0.3510 - val_loss: 2.4190 - val_acc: 0.3532 Epoch 7/100 25/25 [==============================] - 18s 740ms/step - loss: 2.4396 - acc: 0.3510 - val_loss: 2.4052 - val_acc: 0.3532 Epoch 8/100 25/25 [==============================] - 18s 734ms[...]Evaluation# 학습 시켰던 데이터 model.evaluate(pad_x_train, y_train) # epoches: 5, batch_zise: 128 --> loss: 2.4052 (loss율이 너무 높음, 0으로 근접해야 함) - acc: 0.3517 # # x_test 데이터 전처리 # # pad_x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train, maxlen=500) # pad_x_test = tf.keras.preprocessing.sequence.pad_sequences(x_test, maxlen=500) # 데이터 전처리 function 만들기 def pad_make(x_data): pad_x = tf.keras.preprocessing.sequence.pad_sequences(x_data, maxlen=500) return pad_x pad_make_x = pad_make(x_test) model.evaluate(pad_make_x, y_test) # 학습 시키지 않은 데이터 # model.evaluate(pad_x_test, y_test) # loss: 2.4171 - acc: 0.3620 (학습시킨 데이터에 비해서 loss율이 높음 -> 모델 학습이 잘됨) import matplotlib.pyplot as plt plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss'], 'r-') plt.show() plt.plot(hist.history['acc']) plt.plot(hist.history['val_acc'], 'r-') plt.show() from sklearn.metrics import classification_report y_train_pred = model.predict(pad_x_train) y_train_pred[0] import numpy as np y_pred = np.argmax(y_train_pred, axis=1) y_pred.shape len(y_train) print(classification_report(y_train, y_pred)) y_test_pred = model.predict(pad_x_test) y_pred = np.argmax(y_test_pred, axis=1) print(classification_report(y_test, y_pred))precision recall f1-score support 0 0.00 0.00 0.00 12 1 0.20 0.04 0.06 105 2 0.00 0.00 0.00 20 3 0.92 0.88 0.90 813 4 0.86 0.71 0.78 474 5 0.00 0.00 0.00 5 6 0.00 0.00 0.00 14 7 0.00 0.00 0.00 3 8 0.07 0.32 0.11 38 9 0.00 0.00 0.00 25 10 0.00 0.00 0.00 30 11 0.22 0.43 0.29 83 12 0.00 0.00 0.00 13 13 0.06 0.19 0.09 37 14 0.00 0.00 0.00 2 15 0.00 0.00 0.00 9 16 0.15 0.17 0.16 99 17 0.00 [...]ServiceTOKYO, Aug 6 (Reuters) - Two Belarus coaches who cut short sprinter Krystsina Tsimanouskaya's Tokyo Games have had their accreditation revoked and were removed from the athletes village, the IOC said on Friday.# 문장 입력 # ---> 숫자(사전을 기준) ---> [ , , , ...] ---> pad_sequence model.predictI am importing the csv. I could also use Urllib to pull form url but I like csv's. I limited the rows as I kept getting an error in row 131,000.nRowsRead=100000 badstuff= pd.read_csv('Crimes.csv',nrows=nRowsRead, index_col='INC NUMBER')I wanted all the columns to be lower case and spaces to be replaced by _. I used this replace method for it.badstuff.columns = badstuff.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')I am going to see the info and describe methods to get a better idea of the data.badstuff.info() Index: 100000 entries, 201600000052855 to 201700000927237 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 occurred_on 99739 non-null object 1 occurred_to 71048 non-null object 2 ucr_crime_category 100000 non-null object 3 100_block_addr 100000 non-null object 4 zip 99997 non-null float64 5 premise_type 99372 non-null object dtypes: float64(1), object(5) memory usage: 5.3+ MBAfter performing the describe, I realize its the only column with floats. Hence why its the only one that appears.badstuff.describe()I want to see how many different zip codes and different places their are for this data.badstuff.zip.value_counts() badstuff.premise_type.value_counts()Now I want to sort the data. I am going to sort based on place.badstuff.sort_values(by=['premise_type','ucr_crime_category'])I am going to look at the different premises and which types of crimes are most common in each.places =badstuff.groupby('premise_type') places.describe()Import matplotlib to make graphs eventuallyimport matplotlib as plotWant to graph the 10 highest areas of crime in the Phoenix area.places.zip.count().sort_values(ascending=False).iloc[1:10].plot.bar()Try and make a pie chart for crime in the top 10 areas of crime.tally= places.zip.count().sort_values(ascending=False).iloc[1:10] tally tally.plot.pie() places.get_group('APARTMENT') places.get_group('APARTMENT').nunique()Isolate the counts of crime category and graph it.places.get_group('APARTMENT').groupby('ucr_crime_category').zip.count() places.get_group('APARTMENT').groupby('ucr_crime_category').zip.count().plot.bar(title='Apartment Crimes')0. ETF SelectionWe select the SPDR Gold Shares (GLD) ETF as the gold ETF. It is traded on Nasdaq, the currency is USD.Similarly, we choose the Amundi CAC 40 UCITS ETF-C (C40.PA) as the equity ETF. It will track the CAC 40 index of France. It is traded on Paris Euronext, the currency is EUR.The currency for Bitcoin is USD.Data source: https://finance.yahoo.com/ 1. Data Importingimport arch import holidays import pmdarima import pandas as pd import numpy as np from pandas import Series, DataFrame import matplotlib.pyplot as plt import seaborn as sns from scipy import stats from datetime import datetime from statsmodels.tsa.arima.model import ARIMA from statsmodels.tsa.statespace.varmax import VARMAX from statsmodels.tsa.stattools import adfuller, coint from statsmodels.tsa.vector_ar.vecm import VECM from sklearn import preprocessing %matplotlib inline gold_df = pd.read_csv("data/SPDR Gold Shares (GLD) Jan - Dec 2020.csv") equity_df = pd.read_csv("data/Amundi CAC 40 UCITS ETF-C (C40.PA) Jan 2020 - Dec 2020.csv") bitcoin_df = pd.read_csv('data/Bitcoin USD (BTC-USD) Jan 2020 - Dec 2020.csv')Convert the data into the datetime format and make it the index to query the dataframe easier.def convert_df(df): df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d") df.set_index("Date", inplace=True) return df gold_df = convert_df(gold_df) equity_df = convert_df(equity_df) bitcoin_df = convert_df(bitcoin_df)We use the common subset of days for all 3 time series to make them comparable and to run the test for cointegration in question 7.missing = set(equity_df.index) - set(gold_df.index) def remove_missing_days(df, indices, missing): return df.loc[[index for index in indices if index not in missing]] gold_df = remove_missing_days(gold_df, equity_df.index, missing) equity_df = remove_missing_days(equity_df, equity_df.index, missing) bitcoin_df = remove_missing_days(bitcoin_df, equity_df.index, missing) assert equity_df.shape == gold_df.shape assert equity_df.shape == bitcoin_df.shapeWe then fill some missing values in the equity series with its old values to avoid data leakage.equity_df = equity_df.ffill()Verify that the time range is correct.gold_df.head() gold_df.tail() equity_df.head() equity_df.tail() bitcoin_df.head() bitcoin_df.tail()2. Data Processing We use adjusted close prices to calculate the daily returns. Adjusted close prices are the prices that already take into account stock split and dividends, which reflex more accurate the change of the prices.equity_df["Daily Return"] = equity_df["Adj Close"].pct_change(1) equity_df.head() gold_df["Daily Return"] = gold_df["Adj Close"].pct_change(1) gold_df.head() bitcoin_df["Daily Return"] = bitcoin_df["Adj Close"].pct_change(1) bitcoin_df.head()5. Category 1 Models: Just use 1 variable.# Helper df_names = {0: "gold ETF", 1: "equity ETF", 2: "Bitcoin"} dfs = [gold_df, equity_df, bitcoin_df] def get_data(df, month_start, month_end, column=None): data = df[(df.index >= f"2020-{month_start:02d}-01") & (df.index < f"2020-{month_end:02d}-01")] if column: data = data[column] return data def summarize_data(df): data = get_data(df, 3, 12, "Adj Close") moving_avg = data.rolling(20, min_periods=1).mean() return get_data(moving_avg, 4, 12) def get_data(df, month_start, month_end, column): return df[(df.index >= f"2020-{month_start:02d}-01") & (df.index < f"2020-{month_end:02d}-01")][column] def fit_arima(data, exog= None): model = ARIMA(data, exog=exog, order=(2,0,2)) model_fit = model.fit() return model_fit def fit_garch(data, garch_type="GARCH"): if garch_type == "TARCH": garch = arch.arch_model(data, vol='TGARCH', p=1, o=1, q=1, power=1) else: garch = arch.arch_model(data, vol=garch_type, p=1, o=0, q=1) garch_fitted = garch.fit() print(garch_fitted.summary()) if garch_type != "FIGARCH": omega = garch_fitted.params["omega"] alpha = garch_fitted.params["alpha[1]"] beta = garch_fitted.params["beta[1]"] print(f"Unconditional variance: {omega/(1 - alpha - beta)}") return garch_fitted bitcoin_q2 = get_data(bitcoin_df, 4, 6, "Daily Return") bitcoin_q3 = get_data(bitcoin_df, 7, 9, "Daily Return") bitcoin_q4 = get_data(bitcoin_df, 10, 12, "Daily Return") equity_q2 = get_data(equity_df, 4, 6, "Daily Return") equity_q3 = get_data(equity_df, 7, 9, "Daily Return") equity_q4 = get_data(equity_df, 10, 12, "Daily Return") gold_q2 = get_data(gold_df, 4, 6, "Daily Return") gold_q3 = get_data(gold_df, 7, 9, "Daily Return") gold_q4 = get_data(gold_df, 10, 12, "Daily Return") bitcoin_q2.shape bitcoin_q3.shape bitcoin_q4.shape gold_q2.shape gold_q3.shapeOur first model with ARIMA is a simple one, we'll allocate our capital totally to bitcoin if we predict that the next day price will rise, otherwise we will short 100%.The total return for Q3 is 3.3%, not very impressive, but for Q4, it's 82%, which is much better. Return for 2 quarters is 87%model = fit_arima(bitcoin_q2) return_pred = model.predict(start=bitcoin_q2.shape[0], end=bitcoin_q2.shape[0] + bitcoin_q3.shape[0] - 1).values signal = np.where(return_pred > 0, 1, -1) return_q3 = np.product(bitcoin_q3 * signal + 1) - 1 return_q3 model = fit_arima(bitcoin_q3) return_pred = model.predict(start=bitcoin_q3.shape[0], end=bitcoin_q3.shape[0] + bitcoin_q4.shape[0] - 1).values signal = np.where(return_pred >0, 1, -1) return_q4 = np.product(bitcoin_q4 * signal + 1) - 1 return_q4 (1 + return_q3) * (1 + return_q4) - 1We prefer the ARMA model for the ease of model and a decent return 6. Category 2 Models: Just use 2 variables With VARMA model, we use the equity returns as to enhance the prediction for the bitcoin returns. for Q3, the return is 28% while for Q4, it's 63%. Return for 2 quarters is 108%model = fit_arima(bitcoin_q2, equity_q2) return_pred = model.predict(exog=equity_q3, start=bitcoin_q2.shape[0], end=bitcoin_q2.shape[0] + bitcoin_q3.shape[0] - 1).values signal = np.where(return_pred > 0, 1, -1) return_q3 = np.product(bitcoin_q3 * signal + 1) - 1 return_q3 model = fit_arima(bitcoin_q3, equity_q3) return_pred = model.predict(exog=equity_q4, start=bitcoin_q3.shape[0], end=bitcoin_q3.shape[0] + bitcoin_q4.shape[0] - 1).values signal = np.where(return_pred > 0, 1, -1) return_q4 = np.product(bitcoin_q4 * signal + 1) - 1 return_q4 (1 + return_q3) * (1 + return_q4) - 1We prefer the VARMA model thanks to its execellent return 7 . Category 3 Models: Use all 3 variables VARMA model With VARMA model, we use both the equity and gold returns as to enhance the prediction for the bitcoin returns. for Q3, the return is 50% while for Q4, it's 40%. Return for 2 quarters is 109%model = fit_arima(bitcoin_q2, pd.concat([equity_q2, gold_q2], axis=1)) return_pred = model.predict(exog=pd.concat([equity_q3, gold_q3], axis=1), start=bitcoin_q2.shape[0], end=bitcoin_q2.shape[0] + bitcoin_q3.shape[0] - 1).values signal = np.where(return_pred > 0, 1, -1) return_q3 = np.product(bitcoin_q3 * signal + 1) - 1 return_q3 model = fit_arima(bitcoin_q3, pd.concat([equity_q3, gold_q3], axis=1)) return_pred = model.predict(exog=pd.concat([equity_q4, gold_q4], axis=1), start=bitcoin_q3.shape[0], end=bitcoin_q3.shape[0] + bitcoin_q4.shape[0] - 1).values signal = np.where(return_pred > 0, 1, -1) return_q4 = np.product(bitcoin_q4 * signal + 1) - 1 return_q4 (1 + return_q3) * (1 + return_q4) - 1 # index = 2 # df = dfs[index] # print(f"{model} model for {df_names[index]} from April to December") # data = get_data(df, 4, 6, "Daily Return") # data = data.dropna() # garch_fitted = fit_garch(data, "GARCH") # forecasts = garch_fitted.forecast(horizon=43)Compare models using cumulative return and volatilitydata1 = get_data(gold_df, 4, 6, "Daily Return").values data2 = get_data(equity_df, 4, 6, "Daily Return").values data3 = get_data(bitcoin_df, 4, 6, "Daily Return").values coint(data1, np.array([data2, data3]).T) coint(data2, np.array([data1, data3]).T) coint(data3, np.array([data1, data2]).T)p-value < 0.01 for all tests, we conclude that there are cointegrating vectors. Vector Error Correction Model We allocate our capital to 3 assets based on the predicted return, with a negative return means that we'll short the asset, and the sum of the 3 assets is always 1. Q3's return is 23%, Q4's return is 47%, total return is 81%data_q2 = np.array([data1, data2, data3]).T model = VECM(data_q2, coint_rank=1) vecm_res = model.fit() res = vecm_res.predict(steps=43) row_sums = res.sum(axis=1) weights = res / row_sums[:, np.newaxis] weights data1 = get_data(gold_df, 7, 9, "Daily Return").values data2 = get_data(equity_df, 7, 9, "Daily Return").values data3 = get_data(bitcoin_df, 7, 9, "Daily Return").values data_q3 = np.array([data1, data2, data3]).T return_q3 = weights * data_q3 total_return_q3 = return_q3.sum(axis=1) portfolio_return_q3 = np.product(total_return_q3 + 1) - 1 portfolio_return_q3 coint(data1, np.array([data2, data3]).T) coint(data2, np.array([data1, data3]).T) coint(data3, np.array([data1, data2]).T)Interestingly, we can't reject the null hypothesis for the test for the equity ETF and the combintion of gold and bitcoin ETF. We can reject the null hypothesis for the other two tests.model = VECM(data_q3, coint_rank=1) vecm_res = model.fit() res = vecm_res.predict(steps=42) row_sums = res.sum(axis=1) weights = res / row_sums[:, np.newaxis] weights data1 = get_data(gold_df, 10, 12, "Daily Return").values data2 = get_data(equity_df, 10, 12, "Daily Return").values data3 = get_data(bitcoin_df, 10, 12, "Daily Return").values data_q4 = np.array([data1, data2, data3]).T return_q4 = weights * data_q4 total_return_q4 = return_q4.sum(axis=1) portfolio_return_q4 = np.product(total_return_q4 + 1) - 1 portfolio_return_q4 (1 + portfolio_return_q3) * (1 + portfolio_return_q4) - 1Personalized cancer diagnosis Description Source: https://www.kaggle.com/c/msk-redefining-cancer-treatment/ Data: Memorial Sloan Kettering Cancer Center (MSKCC) Download training_variants.zip and training_text.zip from Kaggle. Context: Source: https://www.kaggle.com/c/msk-redefining-cancer-treatment/discussion/35336198462 Problem statement : Classify the given genetic variations/mutations based on evidence from text-based clinical literature. Real-world/Business objectives and constraints. * No low-latency requirement.* Interpretability is important.* Errors can be very costly.* Probability of a data-point belonging to each class is needed.from google.colab import drive drive.mount('/content/drive') path = "/content/drive/My Drive/Colab Notebooks/AppliedAI/Personalized_Cancer_Diagnosis/"2.1. Data Data Overview - Source: https://www.kaggle.com/c/msk-redefining-cancer-treatment/data- We have two data files: one conatins the information about the genetic mutations and the other contains the clinical evidence (text) that human experts/pathologists use to classify the genetic mutations. - Both these data files are have a common column called ID- Data file's information: training_variants (ID , Gene, Variations, Class) training_text (ID, Text) Example Data Point training_variantsID,Gene,Variation,Class0,FAM58A,Truncating Mutations,1 1,CBL,W802*,2 2,CBL,Q249E,2 ... training_textID,Text 0||Cyclin-dependent kinases (CDKs) regulate a variety of fundamental cellular processes. CDK10 stands out as one of the last orphan CDKs for which no activating cyclin has been identified and no kinase activity revealed. Previous work has shown that CDK10 silencing increases ETS2 (v-ets erythroblastosis virus E26 oncogene homolog 2)-driven activation of the MAPK pathway, which confers tamoxifen resistance to breast cancer cells. The precise mechanisms by which CDK10 modulates ETS2 activity, and more generally the functions of CDK10, remain elusive. Here we demonstrate that CDK10 is a cyclin-dependent kinase by identifying cyclin M as an activating cyclin. Cyclin M, an orphan cyclin, is the product of FAM58A, whose mutations cause STAR syndrome, a human developmental anomaly whose features include toe syndactyly, telecanthus, and anogenital and renal malformations. We show that STAR syndrome-associated cyclin M mutants are unable to interact with CDK10. Cyclin M silencing phenocopies CDK10 silencing in increasing c-Raf and in conferring tamoxifen resistance to breast cancer cells. CDK10/cyclin M phosphorylates ETS2 in vitro, and in cells it positively controls ETS2 degradation by the proteasome. ETS2 protein levels are increased in cells derived from a STAR patient, and this increase is attributable to decreased cyclin M levels. Altogether, our results reveal an additional regulatory mechanism for ETS2, which plays key roles in cancer and development. They also shed light on the molecular mechanisms underlying STAR syndrome.Cyclin-dependent kinases (CDKs) play a pivotal role in the control of a number of fundamental cellular processes (1). The human genome contains 21 genes encoding proteins that can be considered as members of the CDK family owing to their sequence similarity with bona fide CDKs, those known to be activated by cyclins (2). Although discovered almost 20 y ago (3, 4), CDK10 remains one of the two CDKs without an identified cyclin partner. This knowledge gap has largely impeded the exploration of its biological functions. CDK10 can act as a positive cell cycle regulator in some cells (5, 6) or as a tumor suppressor in others (7, 8). CDK10 interacts with the ETS2 (v-ets erythroblastosis virus E26 oncogene homolog 2) transcription factor and inhibits its transcriptional activity through an unknown mechanism (9). CDK10 knockdown derepresses ETS2, which increases the expression of the c-Raf protein kinase, activates the MAPK pathway, and induces resistance of MCF7 cells to tamoxifen (6). ... Type of Machine Learning Problem There are nine different classes a genetic mutation can be classified into => Multi class classification problem Performance Metric Source: https://www.kaggle.com/c/msk-redefining-cancer-treatmentevaluationMetric(s): * Multi class log-loss * Confusion matrix Machine Learing Objectives and Constraints Objective: Predict the probability of each data-point belonging to each of the nine classes. Constraints:* Interpretability* Class probabilities are needed.* Penalize the errors in class probabilites => Metric is Log-loss.* No Latency constraints. Train, CV and Test Datasets Split the dataset randomly into three parts train, cross validation and test with 64%,16%, 20% of data respectivelyimport pandas as pd import matplotlib.pyplot as plt import re import time import warnings import numpy as np from nltk.corpus import stopwords from nltk.tokenize import sent_tokenize from sklearn.decomposition import TruncatedSVD from sklearn.preprocessing import normalize from sklearn.feature_extraction.text import CountVectorizer from sklearn.manifold import TSNE import seaborn as sns from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics.classification import accuracy_score, log_loss from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import SGDClassifier from imblearn.over_sampling import SMOTE from collections import Counter from scipy.sparse import hstack from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC # from sklearn.cross_validation import StratifiedKFold from collections import Counter, defaultdict from sklearn.calibration import CalibratedClassifierCV from sklearn.naive_bayes import MultinomialNB from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV,StratifiedKFold import math from sklearn.metrics import normalized_mutual_info_score from sklearn.ensemble import RandomForestClassifier warnings.filterwarnings("ignore") from mlxtend.classifier import StackingClassifier from sklearn import model_selection from sklearn.linear_model import LogisticRegression from prettytable import PrettyTable table = PrettyTable(field_names=['Vectorizer','Model','Test Log Loss', 'Improvement']) print(table)+------------+-------+---------------+-------------+ | Vectorizer | Model | Test Log Loss | Improvement | +------------+-------+---------------+-------------+ +------------+-------+---------------+-------------+Reading Gene and Variation Datadata = pd.read_csv(path+'training_variants') print('Number of data points : ', data.shape[0]) print('Number of features : ', data.shape[1]) print('Features : ', data.columns.values) data.head()Number of data points : 3321 Number of features : 4 Features : ['ID' 'Gene' 'Variation' 'Class']training/training_variants is a comma separated file containing the description of the genetic mutations used for training. Fields are ID : the id of the row used to link the mutation to the clinical evidence Gene : the gene where this genetic mutation is located Variation : the aminoacid change for this mutations Class : 1-9 the class this genetic mutation has been classified on Reading Text Data# note the seprator in this file data_text =pd.read_csv(path+"training_text",sep="\|\|",engine="python", names=["ID","TEXT"],skiprows=1) print('Number of data points : ', data_text.shape[0]) print('Number of features : ', data_text.shape[1]) print('Features : ', data_text.columns.values) data_text.head()Number of data points : 3321 Number of features : 2 Features : ['ID' 'TEXT']Preprocessing of textimport nltk nltk.download('stopwords') nltk.download('punkt') # loading stop words from nltk library stop_words = set(stopwords.words('english')) def nlp_preprocessing(total_text, index, column): if type(total_text) is not int: string = "" # replace every special char with space total_text = re.sub('[^a-zA-Z0-9\n]', ' ', total_text) # replace multiple spaces with single space total_text = re.sub('\s+',' ', total_text) # converting all the chars into lower-case. total_text = total_text.lower() for word in total_text.split(): # if the word is a not a stop word then retain that word from the data if not word in stop_words: string += word + " " data_text[column][index] = string #text processing stage. start_time = time.clock() for index, row in data_text.iterrows(): if type(row['TEXT']) is str: nlp_preprocessing(row['TEXT'], index, 'TEXT') else: print("there is no text description for id:",index) print('Time took for preprocessing the text :',time.clock() - start_time, "seconds") #merging both gene_variations and text data based on ID result = pd.merge(data, data_text,on='ID', how='left') result.head() result[result.isnull().any(axis=1)] result.loc[result['TEXT'].isnull(),'TEXT'] = result['Gene'] +' '+result['Variation'] result[result['ID']==1109]Splitting data into train, test and cross validation (64:20:16)y_true = result['Class'].values result.Gene = result.Gene.str.replace('\s+', '_') result.Variation = result.Variation.str.replace('\s+', '_') # split the data into test and train by maintaining same distribution of # output varaible 'y_true' [stratify=y_true] X_train, test_df, y_train, y_test = train_test_split(result, y_true, stratify=y_true, test_size=0.2) # split the train data into train and cross validation by maintaining # same distribution of output varaible 'y_train' [stratify=y_train] train_df, cv_df, y_train, y_cv = train_test_split(X_train, y_train, stratify=y_train, test_size=0.2) print(result.shape) print(train_df.shape) print(test_df.shape) print(cv_df.shape)(3321, 5) (2124, 5) (665, 5) (532, 5)We split the data into train, test and cross validation data sets, preserving the ratio of class distribution in the original data setprint('Number of data points in train data:', train_df.shape[0]) print('Number of data points in test data:', test_df.shape[0]) print('Number of data points in cross validation data:', cv_df.shape[0])Number of data points in train data: 2124 Number of data points in test data: 665 Number of data points in cross validation data: 532Distribution of y_i's in Train, Test and Cross Validation datasetstrain_df['Class'].head(8) # it returns a dict, keys as class labels and values as the number of data points in that class train_class_distribution = train_df['Class'].value_counts().sort_index() test_class_distribution = test_df['Class'].value_counts().sort_index() cv_class_distribution = cv_df['Class'].value_counts().sort_index() my_colors = 'rgbkymc' train_class_distribution.plot(kind='bar',color=list(my_colors)) plt.xlabel('Class') plt.ylabel('Data points per Class') plt.title('Distribution of yi in train data') plt.grid() plt.show() # ref: argsort https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html # -(train_class_distribution.values): the minus sign will give us in decreasing order sorted_yi = np.argsort(-train_class_distribution.values) for i in sorted_yi: print('Number of data points in class', i+1, ':', train_class_distribution.values[i], '(', np.round((train_class_distribution.values[i]/train_df.shape[0]*100), 3), '%)') print('-'*80) my_colors = 'rgbkymc' test_class_distribution.plot(kind='bar',color=list(my_colors)) plt.xlabel('Class') plt.ylabel('Data points per Class') plt.title('Distribution of yi in test data') plt.grid() plt.show() # ref: argsort https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html # -(train_class_distribution.values): the minus sign will give us in decreasing order sorted_yi = np.argsort(-test_class_distribution.values) for i in sorted_yi: print('Number of data points in class', i+1, ':', test_class_distribution.values[i], '(', np.round((test_class_distribution.values[i]/test_df.shape[0]*100), 3), '%)') print('-'*80) my_colors = 'rgbkymc' cv_class_distribution.plot(kind='bar',color=list(my_colors)) plt.xlabel('Class') plt.ylabel('Data points per Class') plt.title('Distribution of yi in cross validation data') plt.grid() plt.show() # ref: argsort https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html # -(train_class_distribution.values): the minus sign will give us in decreasing order sorted_yi = np.argsort(-train_class_distribution.values) for i in sorted_yi: print('Number of data points in class', i+1, ':', cv_class_distribution.values[i], '(', np.round((cv_class_distribution.values[i]/cv_df.shape[0]*100), 3), '%)')Prediction using a 'Random' Model In a 'Random' Model, we generate the NINE class probabilites randomly such that they sum to 1.# This function plots the confusion matrices given y_i, y_i_hat. def plot_confusion_matrix(test_y, predict_y): C = confusion_matrix(test_y, predict_y) # C = 9,9 matrix, each cell (i,j) represents number of points of class i # are predicted class j A =(((C.T)/(C.sum(axis=1))).T) # divide each element of the confusion matrix with the sum of elements in # that column # C = [[1, 2], # [3, 4]] # C.T = [[1, 3], # [2, 4]] # C.sum(axis = 1) axis=0 corresonds to columns and axis=1 corresponds to rows in two diamensional array # C.sum(axix =1) = [[3, 7]] # ((C.T)/(C.sum(axis=1))) = [[1/3, 3/7] # [2/3, 4/7]] # ((C.T)/(C.sum(axis=1))).T = [[1/3, 2/3] # [3/7, 4/7]] # sum of row elements = 1 B =(C/C.sum(axis=0)) #divid each element of the confusion matrix with the sum of elements in that row # C = [[1, 2], # [3, 4]] # C.sum(axis = 0) axis=0 corresonds to columns and axis=1 corresponds to rows in two diamensional array # C.sum(axix =0) = [[4, 6]] # (C/C.sum(axis=0)) = [[1/4, 2/6], # [3/4, 4/6]] labels = [1,2,3,4,5,6,7,8,9] # representing A in heatmap format print("-"*20, "Confusion matrix", "-"*20) plt.figure(figsize=(20,7)) sns.heatmap(C, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels) plt.xlabel('Predicted Class') plt.ylabel('Original Class') plt.show() print("-"*20, "Precision matrix (Columm Sum=1)", "-"*20) plt.figure(figsize=(20,7)) sns.heatmap(B, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels) plt.xlabel('Predicted Class') plt.ylabel('Original Class') plt.show() # representing B in heatmap format print("-"*20, "Recall matrix (Row sum=1)", "-"*20) plt.figure(figsize=(20,7)) sns.heatmap(A, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels) plt.xlabel('Predicted Class') plt.ylabel('Original Class') plt.show() # we need to generate 9 numbers and the sum of numbers should be 1 # one solution is to genarate 9 numbers and divide each of the numbers by their sum # ref: https://stackoverflow.com/a/18662466/4084039 test_data_len = test_df.shape[0] cv_data_len = cv_df.shape[0] # we create a output array that has exactly same size as the CV data cv_predicted_y = np.zeros((cv_data_len,9)) for i in range(cv_data_len): rand_probs = np.random.rand(1,9) cv_predicted_y[i] = ((rand_probs/sum(sum(rand_probs)))[0]) print("Log loss on Cross Validation Data using Random Model",log_loss(y_cv,cv_predicted_y, eps=1e-15)) # Test-Set error. #we create a output array that has exactly same as the test data test_predicted_y = np.zeros((test_data_len,9)) for i in range(test_data_len): rand_probs = np.random.rand(1,9) test_predicted_y[i] = ((rand_probs/sum(sum(rand_probs)))[0]) print("Log loss on Test Data using Random Model",log_loss(y_test,test_predicted_y, eps=1e-15)) predicted_y =np.argmax(test_predicted_y, axis=1) plot_confusion_matrix(y_test, predicted_y+1) table.add_row(['NAN','Random',2.48,0]) print(table)+------------+--------+---------------+-------------+ | Vectorizer | Model | Test Log Loss | Improvement | +------------+--------+---------------+-------------+ | NAN | Random | 2.48 | 0 | +------------+--------+---------------+-------------+Univariate Analysis# code for response coding with Laplace smoothing. # alpha : used for laplace smoothing # feature: ['gene', 'variation'] # df: ['train_df', 'test_df', 'cv_df'] # algorithm # ---------- # Consider all unique values and the number of occurances of given feature in train data dataframe # build a vector (1*9) , the first element = (number of times it occured in class1 + 10*alpha / number of time it occurred in total data+90*alpha) # gv_dict is like a look up table, for every gene it store a (1*9) representation of it # for a value of feature in df: # if it is in train data: # we add the vector that was stored in 'gv_dict' look up table to 'gv_fea' # if it is not there is train: # we add [1/9, 1/9, 1/9, 1/9,1/9, 1/9, 1/9, 1/9, 1/9] to 'gv_fea' # return 'gv_fea' # ---------------------- # get_gv_fea_dict: Get Gene varaition Feature Dict def get_gv_fea_dict(alpha, feature, df): # value_count: it contains a dict like # print(train_df['Gene'].value_counts()) # output: # {BRCA1 174 # TP53 106 # EGFR 86 # BRCA2 75 # PTEN 69 # KIT 61 # BRAF 60 # ERBB2 47 # PDGFRA 46 # ...} # print(train_df['Variation'].value_counts()) # output: # { # Truncating_Mutations 63 # Deletion 43 # Amplification 43 # Fusions 22 # Overexpression 3 # E17K 3 # Q61L 3 # S222D 2 # P130S 2 # ... # } value_count = train_df[feature].value_counts() # gv_dict : Gene Variation Dict, which contains the probability array for each gene/variation gv_dict = dict() # denominator will contain the number of time that particular feature occured in whole data for i, denominator in value_count.items(): # vec will contain (p(yi==1/Gi) probability of gene/variation belongs to perticular class # vec is 9 diamensional vector vec = [] for k in range(1,10): # print(train_df.loc[(train_df['Class']==1) & (train_df['Gene']=='BRCA1')]) # ID Gene Variation Class # 2470 2470 BRCA1 S1715C 1 # 2486 2486 BRCA1 S1841R 1 # 2614 2614 BRCA1 M1R 1 # 2432 2432 BRCA1 L1657P 1 # 2567 2567 BRCA1 T1685A 1 # 2583 2583 BRCA1 E1660G 1 # 2634 2634 BRCA1 W1718L 1 # cls_cnt.shape[0] will return the number of rows cls_cnt = train_df.loc[(train_df['Class']==k) & (train_df[feature]==i)] # cls_cnt.shape[0](numerator) will contain the number of time that particular feature occured in whole data vec.append((cls_cnt.shape[0] + alpha*10)/ (denominator + 90*alpha)) # we are adding the gene/variation to the dict as key and vec as value gv_dict[i]=vec return gv_dict # Get Gene variation feature def get_gv_feature(alpha, feature, df): # print(gv_dict) # {'BRCA1': [0.20075757575757575, 0.03787878787878788, 0.068181818181818177, 0.13636363636363635, 0.25, 0.19318181818181818, 0.03787878787878788, 0.03787878787878788, 0.03787878787878788], # 'TP53': [0.32142857142857145, 0.061224489795918366, 0.061224489795918366, 0.27040816326530615, 0.061224489795918366, 0.066326530612244902, 0.051020408163265307, 0.051020408163265307, 0.056122448979591837], # 'EGFR': [0.056818181818181816, 0.21590909090909091, 0.0625, 0.068181818181818177, 0.068181818181818177, 0.0625, 0.34659090909090912, 0.0625, 0.056818181818181816], # 'BRCA2': [0.13333333333333333, 0.060606060606060608, 0.060606060606060608, 0.078787878787878782, 0.1393939393939394, 0.34545454545454546, 0.060606060606060608, 0.060606060606060608, 0.060606060606060608], # 'PTEN': [0.069182389937106917, 0.062893081761006289, 0.069182389937106917, 0.46540880503144655, 0.075471698113207544, 0.062893081761006289, 0.069182389937106917, 0.062893081761006289, 0.062893081761006289], # 'KIT': [0.066225165562913912, 0.25165562913907286, 0.072847682119205295, 0.072847682119205295, 0.066225165562913912, 0.066225165562913912, 0.27152317880794702, 0.066225165562913912, 0.066225165562913912], # 'BRAF': [0.066666666666666666, 0.17999999999999999, 0.073333333333333334, 0.073333333333333334, 0.093333333333333338, 0.080000000000000002, 0.29999999999999999, 0.066666666666666666, 0.066666666666666666], # ... # } gv_dict = get_gv_fea_dict(alpha, feature, df) # value_count is similar in get_gv_fea_dict value_count = train_df[feature].value_counts() # gv_fea: Gene_variation feature, it will contain the feature for each feature value in the data gv_fea = [] # for every feature values in the given data frame we will check if it is there in the train data then we will add the feature to gv_fea # if not we will add [1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9] to gv_fea for index, row in df.iterrows(): if row[feature] in dict(value_count).keys(): gv_fea.append(gv_dict[row[feature]]) else: gv_fea.append([1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9]) # gv_fea.append([-1,-1,-1,-1,-1,-1,-1,-1,-1]) return gv_feawhen we caculate the probability of a feature belongs to any particular class, we apply laplace smoothing(numerator + 10\*alpha) / (denominator + 90\*alpha) Univariate Analysis on Gene Featureunique_genes = train_df['Gene'].value_counts() print('Number of Unique Genes :', unique_genes.shape[0]) # the top 10 genes that occured most print(unique_genes.head(10)) print("Ans: There are", unique_genes.shape[0] ,"different categories of genes in the train data, and they are distibuted as follows",) s = sum(unique_genes.values); h = unique_genes.values/s; plt.plot(h, label="Histrogram of Genes") plt.xlabel('Index of a Gene') plt.ylabel('Number of Occurances') plt.legend() plt.grid() plt.show() c = np.cumsum(h) plt.plot(c,label='Cumulative distribution of Genes') plt.grid() plt.legend() plt.show() #response-coding of the Gene feature # alpha is used for laplace smoothing alpha = 1 # train gene feature train_gene_feature_responseCoding = np.array(get_gv_feature(alpha, "Gene", train_df)) # test gene feature test_gene_feature_responseCoding = np.array(get_gv_feature(alpha, "Gene", test_df)) # cross validation gene feature cv_gene_feature_responseCoding = np.array(get_gv_feature(alpha, "Gene", cv_df)) print("train_gene_feature_responseCoding is converted feature using respone coding method. The shape of gene feature:", train_gene_feature_responseCoding.shape) # one-hot encoding of Gene feature. # gene_vectorizer = CountVectorizer() gene_vectorizer = TfidfVectorizer(max_features=1000) train_gene_feature_onehotCoding = gene_vectorizer.fit_transform(train_df['Gene']) test_gene_feature_onehotCoding = gene_vectorizer.transform(test_df['Gene']) cv_gene_feature_onehotCoding = gene_vectorizer.transform(cv_df['Gene']) train_df['Gene'].head() gene_vectorizer.get_feature_names() print("train_gene_feature_onehotCoding is converted feature using one-hot encoding method. The shape of gene feature:", train_gene_feature_onehotCoding.shape) alpha = [10 ** x for x in range(-5, 1)] # hyperparam for SGD classifier. # read more about SGDClassifier() at http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html # ------------------------------ # default parameters # SGDClassifier(loss=’hinge’, penalty=’l2’, alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None, # shuffle=True, verbose=0, epsilon=0.1, n_jobs=1, random_state=None, learning_rate=’optimal’, eta0=0.0, power_t=0.5, # class_weight=None, warm_start=False, average=False, n_iter=None) # some of methods # fit(X, y[, coef_init, intercept_init, …]) Fit linear model with Stochastic Gradient Descent. # predict(X) Predict class labels for samples in X. #------------------------------- # video link: #------------------------------ cv_log_error_array=[] for i in alpha: clf = SGDClassifier(alpha=i, penalty='l2', loss='log', random_state=42) clf.fit(train_gene_feature_onehotCoding, y_train) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_gene_feature_onehotCoding, y_train) predict_y = sig_clf.predict_proba(cv_gene_feature_onehotCoding) cv_log_error_array.append(log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) print('For values of alpha = ', i, "The log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) fig, ax = plt.subplots() ax.plot(alpha, cv_log_error_array,c='g') for i, txt in enumerate(np.round(cv_log_error_array,3)): ax.annotate((alpha[i],np.round(txt,3)), (alpha[i],cv_log_error_array[i])) plt.grid() plt.title("Cross Validation Error for each alpha") plt.xlabel("Alpha i's") plt.ylabel("Error measure") plt.show() best_alpha = np.argmin(cv_log_error_array) clf = SGDClassifier(alpha=alpha[best_alpha], penalty='l2', loss='log', random_state=42) clf.fit(train_gene_feature_onehotCoding, y_train) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_gene_feature_onehotCoding, y_train) predict_y = sig_clf.predict_proba(train_gene_feature_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The train log loss is:",log_loss(y_train, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(cv_gene_feature_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The cross validation log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(test_gene_feature_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The test log loss is:",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15)) # Improvement in test log = 0.01 # Improvement of 0.01 on test log loss table.add_row(['Gene Feature One-Hot','Linear SVM', 1.19, 0.01]) print(table) unique_variations = train_df['Variation'].value_counts() print('Number of Unique Variations :', unique_variations.shape[0]) # the top 10 variations that occured most print(unique_variations.head(10)) print("Ans: There are", unique_variations.shape[0] ,"different categories of variations in the train data, and they are distibuted as follows",) s = sum(unique_variations.values); h = unique_variations.values/s; plt.plot(h, label="Histrogram of Variations") plt.xlabel('Index of a Variation') plt.ylabel('Number of Occurances') plt.legend() plt.grid() plt.show() c = np.cumsum(h) print(c) plt.plot(c,label='Cumulative distribution of Variations') plt.grid() plt.legend() plt.show() # alpha is used for laplace smoothing alpha = 1 # train gene feature train_variation_feature_responseCoding = np.array(get_gv_feature(alpha, "Variation", train_df)) # test gene feature test_variation_feature_responseCoding = np.array(get_gv_feature(alpha, "Variation", test_df)) # cross validation gene feature cv_variation_feature_responseCoding = np.array(get_gv_feature(alpha, "Variation", cv_df)) print("train_variation_feature_responseCoding is a converted feature using the response coding method. The shape of Variation feature:", train_variation_feature_responseCoding.shape) # one-hot encoding of variation feature. # variation_vectorizer = CountVectorizer() variation_vectorizer = TfidfVectorizer(max_features=1000) train_variation_feature_onehotCoding = variation_vectorizer.fit_transform(train_df['Variation']) test_variation_feature_onehotCoding = variation_vectorizer.transform(test_df['Variation']) cv_variation_feature_onehotCoding = variation_vectorizer.transform(cv_df['Variation']) print("train_variation_feature_onehotEncoded is converted feature using the onne-hot encoding method. The shape of Variation feature:", train_variation_feature_onehotCoding.shape) alpha = [10 ** x for x in range(-5, 1)] # read more about SGDClassifier() at http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html # ------------------------------ # default parameters # SGDClassifier(loss=’hinge’, penalty=’l2’, alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None, # shuffle=True, verbose=0, epsilon=0.1, n_jobs=1, random_state=None, learning_rate=’optimal’, eta0=0.0, power_t=0.5, # class_weight=None, warm_start=False, average=False, n_iter=None) # some of methods # fit(X, y[, coef_init, intercept_init, …]) Fit linear model with Stochastic Gradient Descent. # predict(X) Predict class labels for samples in X. #------------------------------- # video link: #------------------------------ cv_log_error_array=[] for i in alpha: clf = SGDClassifier(alpha=i, penalty='l2', loss='log', random_state=42) clf.fit(train_variation_feature_onehotCoding, y_train) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_variation_feature_onehotCoding, y_train) predict_y = sig_clf.predict_proba(cv_variation_feature_onehotCoding) cv_log_error_array.append(log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) print('For values of alpha = ', i, "The log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) fig, ax = plt.subplots() ax.plot(alpha, cv_log_error_array,c='g') for i, txt in enumerate(np.round(cv_log_error_array,3)): ax.annotate((alpha[i],np.round(txt,3)), (alpha[i],cv_log_error_array[i])) plt.grid() plt.title("Cross Validation Error for each alpha") plt.xlabel("Alpha i's") plt.ylabel("Error measure") plt.show() best_alpha = np.argmin(cv_log_error_array) clf = SGDClassifier(alpha=alpha[best_alpha], penalty='l2', loss='log', random_state=42) clf.fit(train_variation_feature_onehotCoding, y_train) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_variation_feature_onehotCoding, y_train) predict_y = sig_clf.predict_proba(train_variation_feature_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The train log loss is:",log_loss(y_train, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(cv_variation_feature_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The cross validation log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(test_variation_feature_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The test log loss is:",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15)) # Almost no improvement table.add_row(['Var Feature One-Hot','Linear SVM', 1.73, 0.00]) print(table)+----------------------+------------+---------------+-------------+ | Vectorizer | Model | Test Log Loss | Improvement | +----------------------+------------+---------------+-------------+ | NAN | Random | 2.48 | 0 | | Gene Feature One-Hot | Linear SVM | 1.19 | 0.01 | | Var Feature One-Hot | Linear SVM | 1.73 | 0.0 | +----------------------+------------+---------------+-------------+Univariate Analysis on Text Feature# cls_text is a data frame # for every row in data fram consider the 'TEXT' # split the words by space # make a dict with those words # increment its count whenever we see that word def extract_dictionary_paddle(cls_text): dictionary = defaultdict(int) for index, row in cls_text.iterrows(): for word in row['TEXT'].split(): dictionary[word] +=1 return dictionary import math #https://stackoverflow.com/a/1602964 def get_text_responsecoding(df): text_feature_responseCoding = np.zeros((df.shape[0],9)) for i in range(0,9): row_index = 0 for index, row in df.iterrows(): sum_prob = 0 for word in row['TEXT'].split(): sum_prob += math.log(((dict_list[i].get(word,0)+10 )/(total_dict.get(word,0)+90))) text_feature_responseCoding[row_index][i] = math.exp(sum_prob/len(row['TEXT'].split())) row_index += 1 return text_feature_responseCoding # building a CountVectorizer with all the words that occured minimum 3 times in train data # text_vectorizer = CountVectorizer(min_df=3) text_vectorizer = TfidfVectorizer(min_df=3, max_features=1000) train_text_feature_onehotCoding = text_vectorizer.fit_transform(train_df['TEXT']) # getting all the feature names (words) train_text_features= text_vectorizer.get_feature_names() # train_text_feature_onehotCoding.sum(axis=0).A1 will sum every row and returns (1*number of features) vector train_text_fea_counts = train_text_feature_onehotCoding.sum(axis=0).A1 # zip(list(text_features),text_fea_counts) will zip a word with its number of times it occured text_fea_dict = dict(zip(list(train_text_features),train_text_fea_counts)) print("Total number of unique words in train data :", len(train_text_features)) dict_list = [] # dict_list =[] contains 9 dictoinaries each corresponds to a class for i in range(1,10): cls_text = train_df[train_df['Class']==i] # build a word dict based on the words in that class dict_list.append(extract_dictionary_paddle(cls_text)) # append it to dict_list # dict_list[i] is build on i'th class text data # total_dict is buid on whole training text data total_dict = extract_dictionary_paddle(train_df) confuse_array = [] for i in train_text_features: ratios = [] max_val = -1 for j in range(0,9): ratios.append((dict_list[j][i]+10 )/(total_dict[i]+90)) confuse_array.append(ratios) confuse_array = np.array(confuse_array) #response coding of text features train_text_feature_responseCoding = get_text_responsecoding(train_df) test_text_feature_responseCoding = get_text_responsecoding(test_df) cv_text_feature_responseCoding = get_text_responsecoding(cv_df) # https://stackoverflow.com/a/16202486 # we convert each row values such that they sum to 1 train_text_feature_responseCoding = (train_text_feature_responseCoding.T/train_text_feature_responseCoding.sum(axis=1)).T test_text_feature_responseCoding = (test_text_feature_responseCoding.T/test_text_feature_responseCoding.sum(axis=1)).T cv_text_feature_responseCoding = (cv_text_feature_responseCoding.T/cv_text_feature_responseCoding.sum(axis=1)).T # don't forget to normalize every feature train_text_feature_onehotCoding = normalize(train_text_feature_onehotCoding, axis=0) # we use the same vectorizer that was trained on train data test_text_feature_onehotCoding = text_vectorizer.transform(test_df['TEXT']) # don't forget to normalize every feature test_text_feature_onehotCoding = normalize(test_text_feature_onehotCoding, axis=0) # we use the same vectorizer that was trained on train data cv_text_feature_onehotCoding = text_vectorizer.transform(cv_df['TEXT']) # don't forget to normalize every feature cv_text_feature_onehotCoding = normalize(cv_text_feature_onehotCoding, axis=0) #https://stackoverflow.com/a/2258273/4084039 sorted_text_fea_dict = dict(sorted(text_fea_dict.items(), key=lambda x: x[1] , reverse=True)) sorted_text_occur = np.array(list(sorted_text_fea_dict.values())) # Number of words for a given frequency. print(Counter(sorted_text_occur)) # For Tf-Idf vectorizer this becomes the tf-Idf scores # Train a Logistic regression+Calibration model using text features whicha re on-hot encoded alpha = [10 ** x for x in range(-5, 1)] # read more about SGDClassifier() at http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html # ------------------------------ # default parameters # SGDClassifier(loss=’hinge’, penalty=’l2’, alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None, # shuffle=True, verbose=0, epsilon=0.1, n_jobs=1, random_state=None, learning_rate=’optimal’, eta0=0.0, power_t=0.5, # class_weight=None, warm_start=False, average=False, n_iter=None) # some of methods # fit(X, y[, coef_init, intercept_init, …]) Fit linear model with Stochastic Gradient Descent. # predict(X) Predict class labels for samples in X. #------------------------------- # video link: #------------------------------ cv_log_error_array=[] for i in alpha: clf = SGDClassifier(alpha=i, penalty='l2', loss='log', random_state=42) clf.fit(train_text_feature_onehotCoding, y_train) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_text_feature_onehotCoding, y_train) predict_y = sig_clf.predict_proba(cv_text_feature_onehotCoding) cv_log_error_array.append(log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) print('For values of alpha = ', i, "The log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) fig, ax = plt.subplots() ax.plot(alpha, cv_log_error_array,c='g') for i, txt in enumerate(np.round(cv_log_error_array,3)): ax.annotate((alpha[i],np.round(txt,3)), (alpha[i],cv_log_error_array[i])) plt.grid() plt.title("Cross Validation Error for each alpha") plt.xlabel("Alpha i's") plt.ylabel("Error measure") plt.show() best_alpha = np.argmin(cv_log_error_array) clf = SGDClassifier(alpha=alpha[best_alpha], penalty='l2', loss='log', random_state=42) clf.fit(train_text_feature_onehotCoding, y_train) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_text_feature_onehotCoding, y_train) predict_y = sig_clf.predict_proba(train_text_feature_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The train log loss is:",log_loss(y_train, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(cv_text_feature_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The cross validation log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(test_text_feature_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The test log loss is:",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15)) # Test log loss improvement - 0.09 table.add_row(['Text Feature One-Hot','Linear SVM', 1.10, 0.09]) print(table) def get_intersec_text(df): # df_text_vec = CountVectorizer(min_df=3) df_text_vec = TfidfVectorizer(min_df=3, max_features=1000) df_text_fea = df_text_vec.fit_transform(df['TEXT']) df_text_features = df_text_vec.get_feature_names() df_text_fea_counts = df_text_fea.sum(axis=0).A1 df_text_fea_dict = dict(zip(list(df_text_features),df_text_fea_counts)) len1 = len(set(df_text_features)) len2 = len(set(train_text_features) & set(df_text_features)) return len1,len2 len1,len2 = get_intersec_text(test_df) print(np.round((len2/len1)*100, 3), "% of word of test data appeared in train data") len1,len2 = get_intersec_text(cv_df) print(np.round((len2/len1)*100, 3), "% of word of Cross Validation appeared in train data")95.4 % of word of test data appeared in train data 93.4 % of word of Cross Validation appeared in train dataMachine Learning Models#Data preparation for ML models. #Misc. functionns for ML models def predict_and_plot_confusion_matrix(train_x, train_y,test_x, test_y, clf): clf.fit(train_x, train_y) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_x, train_y) pred_y = sig_clf.predict(test_x) # for calculating log_loss we willl provide the array of probabilities belongs to each class print("Log loss :",log_loss(test_y, sig_clf.predict_proba(test_x))) # calculating the number of data points that are misclassified print("Number of mis-classified points :", np.count_nonzero((pred_y- test_y))/test_y.shape[0]) plot_confusion_matrix(test_y, pred_y) def report_log_loss(train_x, train_y, test_x, test_y, clf): clf.fit(train_x, train_y) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_x, train_y) sig_clf_probs = sig_clf.predict_proba(test_x) return log_loss(test_y, sig_clf_probs, eps=1e-15) # this function will be used just for naive bayes # for the given indices, we will print the name of the features # and we will check whether the feature present in the test point text or not def get_impfeature_names(indices, text, gene, var, no_features): # gene_count_vec = CountVectorizer() # var_count_vec = CountVectorizer() # text_count_vec = CountVectorizer(min_df=3) gene_count_vec = TfidfVectorizer(max_features=1000) var_count_vec = TfidfVectorizer(max_features=1000) text_count_vec = TfidfVectorizer(min_df=3, max_features=1000) gene_vec = gene_count_vec.fit(train_df['Gene']) var_vec = var_count_vec.fit(train_df['Variation']) text_vec = text_count_vec.fit(train_df['TEXT']) fea1_len = len(gene_vec.get_feature_names()) fea2_len = len(var_count_vec.get_feature_names()) word_present = 0 for i,v in enumerate(indices): if (v < fea1_len): word = gene_vec.get_feature_names()[v] yes_no = True if word == gene else False if yes_no: word_present += 1 print(i, "Gene feature [{}] present in test data point [{}]".format(word,yes_no)) elif (v < fea1_len+fea2_len): word = var_vec.get_feature_names()[v-(fea1_len)] yes_no = True if word == var else False if yes_no: word_present += 1 print(i, "variation feature [{}] present in test data point [{}]".format(word,yes_no)) else: word = text_vec.get_feature_names()[v-(fea1_len+fea2_len)] yes_no = True if word in text.split() else False if yes_no: word_present += 1 print(i, "Text feature [{}] present in test data point [{}]".format(word,yes_no)) print("Out of the top ",no_features," features ", word_present, "are present in query point")Stacking the three types of features# merging gene, variance and text features # building train, test and cross validation data sets # a = [[1, 2], # [3, 4]] # b = [[4, 5], # [6, 7]] # hstack(a, b) = [[1, 2, 4, 5], # [ 3, 4, 6, 7]] train_gene_var_onehotCoding = hstack((train_gene_feature_onehotCoding, train_variation_feature_onehotCoding)) test_gene_var_onehotCoding = hstack((test_gene_feature_onehotCoding, test_variation_feature_onehotCoding)) cv_gene_var_onehotCoding = hstack((cv_gene_feature_onehotCoding, cv_variation_feature_onehotCoding)) train_x_onehotCoding = hstack((train_gene_var_onehotCoding, train_text_feature_onehotCoding)).tocsr() train_y = np.array(list(train_df['Class'])) test_x_onehotCoding = hstack((test_gene_var_onehotCoding, test_text_feature_onehotCoding)).tocsr() test_y = np.array(list(test_df['Class'])) cv_x_onehotCoding = hstack((cv_gene_var_onehotCoding, cv_text_feature_onehotCoding)).tocsr() cv_y = np.array(list(cv_df['Class'])) train_gene_var_responseCoding = np.hstack((train_gene_feature_responseCoding, train_variation_feature_responseCoding)) test_gene_var_responseCoding = np.hstack((test_gene_feature_responseCoding, test_variation_feature_responseCoding)) cv_gene_var_responseCoding = np.hstack((cv_gene_feature_responseCoding, cv_variation_feature_responseCoding)) train_x_responseCoding = np.hstack((train_gene_var_responseCoding, train_text_feature_responseCoding)) test_x_responseCoding = np.hstack((test_gene_var_responseCoding, test_text_feature_responseCoding)) cv_x_responseCoding = np.hstack((cv_gene_var_responseCoding, cv_text_feature_responseCoding)) print("One hot encoding features :") print("(number of data points * number of features) in train data = ", train_x_onehotCoding.shape) print("(number of data points * number of features) in test data = ", test_x_onehotCoding.shape) print("(number of data points * number of features) in cross validation data =", cv_x_onehotCoding.shape) print(" Response encoding features :") print("(number of data points * number of features) in train data = ", train_x_responseCoding.shape) print("(number of data points * number of features) in test data = ", test_x_responseCoding.shape) print("(number of data points * number of features) in cross validation data =", cv_x_responseCoding.shape)Response encoding features : (number of data points * number of features) in train data = (2124, 27) (number of data points * number of features) in test data = (665, 27) (number of data points * number of features) in cross validation data = (532, 27)Naive Bayesalpha = [0.00001, 0.0001, 0.001, 0.1, 1, 10, 100,1000] cv_log_error_array = [] for i in alpha: print("for alpha =", i) clf = MultinomialNB(alpha=i) clf.fit(train_x_onehotCoding, train_y) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_x_onehotCoding, train_y) sig_clf_probs = sig_clf.predict_proba(cv_x_onehotCoding) cv_log_error_array.append(log_loss(cv_y, sig_clf_probs, labels=clf.classes_, eps=1e-15)) # to avoid rounding error while multiplying probabilites we use log-probability estimates print("Log Loss :",log_loss(cv_y, sig_clf_probs)) fig, ax = plt.subplots() ax.plot(np.log10(alpha), cv_log_error_array,c='g') for i, txt in enumerate(np.round(cv_log_error_array,3)): ax.annotate((alpha[i],str(txt)), (np.log10(alpha[i]),cv_log_error_array[i])) plt.grid() plt.xticks(np.log10(alpha)) plt.title("Cross Validation Error for each alpha") plt.xlabel("Alpha i's") plt.ylabel("Error measure") plt.show() best_alpha = np.argmin(cv_log_error_array) clf = MultinomialNB(alpha=alpha[best_alpha]) clf.fit(train_x_onehotCoding, train_y) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_x_onehotCoding, train_y) predict_y = sig_clf.predict_proba(train_x_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The train log loss is:",log_loss(y_train, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(cv_x_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The cross validation log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(test_x_onehotCoding) print('For values of best alpha = ', alpha[best_alpha], "The test log loss is:",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15)) # Test log improvement - 0.1 table.add_row(['TfIdf All Features','Naive Bayes', 1.17, 0.1]) print(table) # # ---------------------------- clf = MultinomialNB(alpha=alpha[best_alpha]) clf.fit(train_x_onehotCoding, train_y) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_x_onehotCoding, train_y) sig_clf_probs = sig_clf.predict_proba(cv_x_onehotCoding) # to avoid rounding error while multiplying probabilites we use log-probability estimates print("Log Loss :",log_loss(cv_y, sig_clf_probs)) print("Number of missclassified point :", np.count_nonzero((sig_clf.predict(cv_x_onehotCoding)- cv_y))/cv_y.shape[0]) plot_confusion_matrix(cv_y, sig_clf.predict(cv_x_onehotCoding.toarray())) test_point_index = 1 no_feature = 100 predicted_cls = sig_clf.predict(test_x_onehotCoding[test_point_index]) print("Predicted Class :", predicted_cls[0]) print("Predicted Class Probabilities:", np.round(sig_clf.predict_proba(test_x_onehotCoding[test_point_index]),4)) print("Actual Class :", test_y[test_point_index]) indices = np.argsort(-clf.coef_)[predicted_cls-1][:,:no_feature] print("-"*50) get_impfeature_names(indices[0], test_df['TEXT'].iloc[test_point_index],test_df['Gene'].iloc[test_point_index],test_df['Variation'].iloc[test_point_index], no_feature) test_point_index = 100 no_feature = 100 predicted_cls = sig_clf.predict(test_x_onehotCoding[test_point_index]) print("Predicted Class :", predicted_cls[0]) print("Predicted Class Probabilities:", np.round(sig_clf.predict_proba(test_x_onehotCoding[test_point_index]),4)) print("Actual Class :", test_y[test_point_index]) indices = np.argsort(-clf.coef_)[predicted_cls-1][:,:no_feature] print("-"*50) get_impfeature_names(indices[0], test_df['TEXT'].iloc[test_point_index],test_df['Gene'].iloc[test_point_index],test_df['Variation'].iloc[test_point_index], no_feature)Predicted Class : 7 Predicted Class Probabilities: [[0.0593 0.055 0.0172 0.0595 0.0347 0.0326 0.7365 0.0029 0.0022]] Actual Class : 7 -------------------------------------------------- 15 Text feature [activation] present in test data point [True] 16 Text feature [kinase] present in test data point [True] 17 Text feature [activated] present in test data point [True] 18 Text feature [downstream] present in test data point [True] 20 Text feature [inhibitor] present in test data point [True] 21 Text feature [cells] present in test data point [True] 22 Text feature [expressing] present in test data point [True] 23 Text feature [presence] present in test data point [True] 24 Text feature [signaling] present in test data point [True] 25 Text feature [inhibitors] present in test data point [True] 26 Text feature [however] present in test data point [True] 27 Text feature [also] present in test data point [True] 28 Text feature [independent] present in test data point [True] 29 Text feature [[...]K Nearest Neighbour Classificationalpha = [5, 11, 15, 21, 31, 41, 51, 99] cv_log_error_array = [] for i in alpha: print("for alpha =", i) clf = KNeighborsClassifier(n_neighbors=i) clf.fit(train_x_responseCoding, train_y) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_x_responseCoding, train_y) sig_clf_probs = sig_clf.predict_proba(cv_x_responseCoding) cv_log_error_array.append(log_loss(cv_y, sig_clf_probs, labels=clf.classes_, eps=1e-15)) # to avoid rounding error while multiplying probabilites we use # log-probability estimates print("Log Loss :",log_loss(cv_y, sig_clf_probs)) fig, ax = plt.subplots() ax.plot(alpha, cv_log_error_array,c='g') for i, txt in enumerate(np.round(cv_log_error_array,3)): ax.annotate((alpha[i],str(txt)), (alpha[i],cv_log_error_array[i])) plt.grid() plt.title("Cross Validation Error for each alpha") plt.xlabel("Alpha i's") plt.ylabel("Error measure") plt.show() best_alpha = np.argmin(cv_log_error_array) clf = KNeighborsClassifier(n_neighbors=alpha[best_alpha]) clf.fit(train_x_responseCoding, train_y) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_x_responseCoding, train_y) predict_y = sig_clf.predict_proba(train_x_responseCoding) print('For values of best alpha = ', alpha[best_alpha], "The train log loss is:",log_loss(y_train, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(cv_x_responseCoding) print('For values of best alpha = ', alpha[best_alpha], "The cross validation log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(test_x_responseCoding) print('For values of best alpha = ', alpha[best_alpha], "The test log loss is:",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15))for alpha = 5 Log Loss : 0.9897381065356475 for alpha = 11 Log Loss : 0.9925846381333129 for alpha = 15 Log Loss : 1.0145548226098013 for alpha = 21 Log Loss : 1.0211274311373892 for alpha = 31 Log Loss : 1.029602973240631 for alpha = 41 Log Loss : 1.0450068422912024 for alpha = 51 Log Loss : 1.0564016214688174 for alpha = 99 Log Loss : 1.0834159187843944Testing the model with best hyper paramters# find more about KNeighborsClassifier() here http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html # ------------------------- # default parameter # KNeighborsClassifier(n_neighbors=5, weights=’uniform’, algorithm=’auto’, leaf_size=30, p=2, # metric=’minkowski’, metric_params=None, n_jobs=1, **kwargs) # methods of # fit(X, y) : Fit the model using X as training data and y as target values # predict(X):Predict the class labels for the provided data # predict_proba(X):Return probability estimates for the test data X. #------------------------------------- # video link: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/k-nearest-neighbors-geometric-intuition-with-a-toy-example-1/ #------------------------------------- clf = KNeighborsClassifier(n_neighbors=alpha[best_alpha]) predict_and_plot_confusion_matrix(train_x_responseCoding, train_y, cv_x_responseCoding, cv_y, clf) # This improved slightly - 0.12 table.add_row(['All feature One-Hot','kNN', 0.98, 0.12]) print(table)+----------------------+-------------+---------------+-------------+ | Vectorizer | Model | Test Log Loss | Improvement | +----------------------+-------------+---------------+-------------+ | NAN | Random | 2.48 | 0 | | Gene Feature One-Hot | Linear SVM | 1.19 | 0.01 | | Var Feature One-Hot | Linear SVM | 1.73 | 0.0 | | Text Feature One-Hot | Linear SVM | 1.1 | 0.09 | | TfIdf All Features | Naive Bayes | 1.17 | 0.1 | | All feature One-Hot | kNN | 0.98 | 0.12 | +----------------------+-------------+---------------+-------------+Sample Query pointclf = KNeighborsClassifier(n_neighbors=alpha[best_alpha]) clf.fit(train_x_responseCoding, train_y) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_x_responseCoding, train_y) test_point_index = 1 predicted_cls = sig_clf.predict(test_x_responseCoding[0].reshape(1,-1)) print("Predicted Class :", predicted_cls[0]) print("Actual Class :", test_y[test_point_index]) neighbors = clf.kneighbors(test_x_responseCoding[test_point_index].reshape(1, -1), alpha[best_alpha]) print("The ",alpha[best_alpha]," nearest neighbours of the test points belongs to classes",train_y[neighbors[1][0]]) print("Fequency of nearest points :",Counter(train_y[neighbors[1][0]]))Predicted Class : 5 Actual Class : 4 The 5 nearest neighbours of the test points belongs to classes [4 4 6 4 4] Fequency of nearest points : Counter({4: 4, 6: 1})Sample Query Pointclf = KNeighborsClassifier(n_neighbors=alpha[best_alpha]) clf.fit(train_x_responseCoding, train_y) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(train_x_responseCoding, train_y) test_point_index = 100 predicted_cls = sig_clf.predict(test_x_responseCoding[test_point_index].reshape(1,-1)) print("Predicted Class :", predicted_cls[0]) print("Actual Class :", test_y[test_point_index]) neighbors = clf.kneighbors(test_x_responseCoding[test_point_index].reshape(1, -1), alpha[best_alpha]) print("the k value for knn is",alpha[best_alpha],"and the nearest neighbours of the test points belongs to classes",train_y[neighbors[1][0]]) print("Fequency of nearest points :",Counter(train_y[neighbors[1][0]]))Predicted Class : 7 Actual Class : 7 the k value for knn is 5 and the nearest neighbours of the test points belongs to classes [7 7 7 7 7] Fequency of nearest points : Counter({7: 5})Project Project Description This project is used to create virtual bank objects that contain various type of accounts (checking, savings, credit), and simulate behavior across these objects (transactions, transfer, autopay) Users of this program proceed by creating Banks objects, then customer Accounts in banks, and then associated DetailAccount with each customer Account. After creating all these objects, users should feel free to use methods implemented in each of these objects since each of them are fairly equivalent and capable of achieving same purpose. Methods that are implemented for the Bank objects include: 1. Print bank info 2. Summarize bank account balances 3. Open new customer account 4. Adding multiple detail accounts 5. Adding checking, savings, credit account 6. Adding transactions to specific account 7. Intra Bank Transfer 8. Inter Bank Transfer Methods that are implemented for Account objects include: 1. Print customer account info 2. Summarize associated bank accounts balances 3. Add transaction to a assocated detail account 4. Retreive detail account Methods that are implemented for any DetailAccount objects include: 1. Adding transaction to its ledger 2. Transfer to account (same owner, different owner, different bank) 3. Pay off credit account 4. Summarize balance 5. Printing out list of transactions This project is available on Github under user dflin96. The link to the repository is https://github.com/dflin96/COGS18-Python-Projectfrom my_module.classes import *Project Code Following is a sample rundown or example of usage of the code. Note that none of the entities created are actual information or scenarios of real life entities. Any equivalent information are simply coincidence.wells_fargo_bank = Bank("", "3810 Nobel Dr", "08:00", "18:00") wells_fargo_bank.print_bank_info()In the case that users enter faulty input:tom_account = wells_fargo_bank.open_accounts("Tom", ["Whatever account"], [-1000]) wells_fargo_bank.accountsUnable to recognize the parameter entered, so no action is performedwells_fargo_bank.all_detail_accounts wells_fargo_bank.add_checking_account(tom_account, -10) tom_checking_account = wells_fargo_bank.get_detail_account_of_type("CHECKING", tom_account)DetailAccount are still created with default parametersprint(tom_checking_account) print(tom_checking_account.balance) tom_account.detail_accounts wells_fargo_bank.all_detail_accountsIn the cases that the program is provided with valid parameterswells_fargo_bank.add_savings_account(tom_account, 300) wells_fargo_bank.add_credit_account(tom_account, 1000) wells_fargo_bank.add_credit_account(tom_account, 500)Everything would look safe and soundtom_account.summarize_account_balance() tom_savings_account = tom_account.get_detail_account_of_type("Savings") tom_savings_account.print_statement() Transaction.used_transaction_idTime to make more banks and accounts!bank_of_america = Bank("America", "Chicago", "09:00", "20:00") # Attempting to make multiple checking accounts since he's rich trump_account = bank_of_america.open_accounts("Trump", ["checking", "Checking", "SaVings", "CREDIT"], [10000, 518100, 999999])Unfortunately, one customer can only have one CheckingAccount and SavingsAccount, and I hope one create some CreditAccount for at least some credit limittrump_account.summarize_account_balance() trump_savings_account = trump_account.get_detail_account_of_type("savings") trump_checking_account = trump_account.get_detail_account_of_type("checking") trump_checking_account.print_statement() trump_savings_account.print_statement()Banks knows everythingbank_of_america.processed_transaction_idAnd more accounts with nice parameters givendaril_account = wells_fargo_bank.open_accounts("Daril", ["checking", "SAVINGS", "Credit"], [500, 1000, 200]) daril_checking_account = daril_account.get_detail_account_of_type("checking") daril_savings_account = daril_account.get_detail_account_of_type("savings") daril_checking_account.print_statement() daril_savings_account.print_statement() wells_fargo_bank.processed_transaction_idThe main function for banks are to create transactions, transfer money, and pay peopledaril_checking_account.add_transaction(Transaction(date(2018, 12, 28), "Christmas gift", -300)) daril_account.summarize_account_balance() daril_checking_account.add_transaction(Transaction(date(2018, 12, 30), "More gifts", -1000))But too bad you need to have enough money to afford more stuffdaril_checking_account.print_statement()Time to transfer in more moneydaril_savings_account.transfer_to_account(daril_checking_account, Transaction(date(2018, 12, 29), "Enough to buy gifts", -900)) daril_account.summarize_account_balance() daril_checking_account.print_statement() daril_savings_account.print_statement()Here we go again buying but with enough moneydaril_checking_account.add_transaction(Transaction(date(2018, 12, 30), "More gifts", -1000)) daril_account.summarize_account_balance() daril_checking_account.print_statement() daril_credit_account = daril_account.get_detail_account_of_type("credit")Time to put the credit card to use to not pay right awaydaril_credit_account.add_transaction(Transaction(date(2018, 1, 1), "Pay off rent", 200)) daril_account.summarize_account_balance()But still need to pay those credits off at some pointwells_fargo_bank.transfer_to_account(daril_checking_account, daril_credit_account, date(2018, 1, 3), "Paying off credits", 100) daril_account.summarize_account_balance() wells_fargo_bank.transfer_to_account(daril_savings_account, daril_credit_account, date(2018, 1, 3), "Paying off credits", 100) daril_account.summarize_account_balance() trump_account.summarize_account_balance()Maybe a good samaritan will help you out from somewhere else too.trump_checking_account.transfer_to_account(daril_checking_account, Transaction(date(2018, 1, 15), "Free Money", -2000)) trump_account.summarize_account_balance() daril_account.summarize_account_balance()Finally, it is always good to make sure that transactions are valid, and accounts are realfake_transaction = Transaction(date(1991, 10, 1), "This is fake", 1000) fake_transaction in wells_fargo_bank.processed_transaction_id fake_transaction in bank_of_america.processed_transaction_id basic_fake_customer_account = Account("Fake person", 12345678) is_real_account_in_bank(basic_fake_customer_account) mediocre_fake_customer_account = Account("Avg Fake", 15910, wells_fargo_bank) is_real_account_in_bank(mediocre_fake_customer_account) professional_fake_customer_account = Account("Pro Fake", 61942, Bank("Fake name", "Fake address", "08:00", "16:00")) is_real_account_in_bank(professional_fake_customer_account)Similar goes to DetailAccounts, which should be realTransaction.used_transaction_id basic_fake_checking_account = CheckingAccount(basic_fake_customer_account, 100) Transaction.used_transaction_id is_real_account_in_bank(basic_fake_checking_account) mediocre_fake_savings_account = SavingsAccount(mediocre_fake_customer_account, 10000, mediocre_fake_customer_account._bank) Transaction.used_transaction_id is_real_account_in_bank(mediocre_fake_savings_account)Hawaii: How COVID-19 Affects TravelThis repository contains code that was used to perform analysis on Hawaii travel data and how COVID-19 affected the tourism in Hawaii. The writeup report can be found [here](https://docs.google.com/document/d/1lFPU9p4GzJJm3OYw7-xgqkegWq8RNLv5AjrgNqfpWRs/edit?usp=sharing) Loading dataHere we will load the input datasetsimport pandas as pd import re import matplotlib.pyplot as plt import numpy as np from datetime import datetime from sklearn import datasets, linear_model from sklearn.linear_model import LinearRegression import statsmodels.api as sm from scipy import stats travel_data = pd.read_csv("data/hawaii-data.csv") confirmed_cases = pd.read_csv("data/RAW_us_confirmed_cases.csv")Compute monthly COVID casesIn this section we will compute the monthly COVID cases in Honolulu Hawaii by looking at the accumulative cases for the beginning of each month, and converting that into a series of tuples.columns = confirmed_cases[confirmed_cases['Admin2'] == "Honolulu"].columns.tolist() values = confirmed_cases[confirmed_cases['Admin2'] == "Honolulu"].values.tolist() tuples = [[columns[i], values[0][i]] for i in range(len(columns))] tuples = [x for x in tuples if len(re.findall("\/1\/", x[0])) > 0 ] tuples[0].append(0) for i in range(1, len(tuples)): tuples[i].append(tuples[i][1] - tuples[i-1][1]) # tuples will be a series of (date, total cases, monthly cases) tuples[0].append(0)Monthly visitor count trends vs COVIDIn this section we will explore the monthly visitor in Hawaii. The first plot we will present the monthly count trends from 2018 to 2021x = [datetime.strptime(a, "%b-%y") for a in travel_data['Time'].tolist()] hotel_rate = travel_data['Hotel rate'].tolist() total_flights = travel_data['Total flights'].tolist() corrected_hotel_demand = [982.6944+ 0.0721 * total_flights[i] for i in range(len(hotel_rate))] corrected_visitor_count = [-6.995e+04 + 691.9825 * t for t in corrected_hotel_demand] y = travel_data['Visitor Arrival'].tolist() fig, ax = plt.subplots(figsize=(20, 8)) ax.plot(x[:13],y[:13], color = "red", label = "2018") ax.plot(x[12:25],y[12:25], color = "blue", label = "2019") ax.plot(x[24:],y[24:], color = "orange", label = "2020+") plt.title('Hawaii Monthly Visitor Count', size = 20) ax.set_xlabel('Date', size = 15) ax.set_xticks(x[::2]) ax.set_xticklabels([d.strftime("%b-%y") for d in x[::2]], rotation=45, horizontalalignment='right') ax.legend(prop={'size': 15}, bbox_to_anchor=(0,-0.3,1,0.2), loc="lower left", ncol = 2) plt.ticklabel_format(axis="y", style="plain") plt.savefig(fname = 'a7_output_figures/Hawaii_Monthly_Visitor_Count.png') plt.show()In the next plot we will combine the visitor count with the COVID confirm casesx = [datetime.strptime(a, "%b-%y") for a in travel_data['Time'].tolist()] hotel_rate = travel_data['Hotel rate'].tolist() total_flights = travel_data['Total flights'].tolist() corrected_hotel_demand = [982.6944+ 0.0721 * total_flights[i] for i in range(len(hotel_rate))] corrected_visitor_count = [-6.995e+04 + 691.9825 * t for t in corrected_hotel_demand] y = travel_data['Visitor Arrival'].tolist() fig, ax = plt.subplots(figsize=(20, 8)) ax.plot(x,corrected_visitor_count, color = "orange", label = "Arrival") ax.plot(x,y, color = "grey", label = "Hotel demand") plt.title('Hawaii Monthly Visitor Count vs COVID Confirmed cases', size = 20) ax.set_xlabel('Date', size = 15) ax.set_xticks(x[::2]) ax.set_xticklabels([d.strftime("%b-%y") for d in x[::2]], rotation=45, horizontalalignment='right') ax.legend(prop={'size': 15}, bbox_to_anchor=(0,-0.3,1,0.2), loc="lower left", ncol = 2) plt.ticklabel_format(axis="y", style="plain") plt.savefig(fname = 'a7_output_figures/Hawaii_Monthly_Visitor_Count_vs_covid_cases.png') plt.show()Tourism statistics - flights and hotelsx = [datetime.strptime(a, "%b-%y") for a in travel_data['Time'].tolist()] hotel_rate = travel_data['Hotel rate'].tolist() total_flights = travel_data['Total flights'].tolist() hotel_occupancy = travel_data['Hotel Occupancy'].tolist() y = travel_data['Visitor Arrival'].tolist() y = hotel_occupancy fig, ax = plt.subplots(figsize=(20, 8)) ax2 = ax.twinx() ax.plot(x,y, color = "red", label = "Hotel occupancy") ax2.plot(x,total_flights, color = "blue", label = "Total flights") plt.title('Hawaii Hotel Occupancy & Total Flights', size = 20) ax.set_xlabel('Date', size = 15) ax.set_xticks(x[::2]) ax.set_xticklabels([d.strftime("%b-%y") for d in x[::2]], rotation=45, horizontalalignment='right') ax.legend(prop={'size': 15}, bbox_to_anchor=(0,-0.3,1,0.2), loc="lower left", ncol = 2) ax2.legend(prop={'size': 15}, bbox_to_anchor=(0,-0.2,1,0.2), loc="lower left", ncol = 2) plt.ticklabel_format(axis="y", style="plain") plt.savefig(fname = 'a7_output_figures/hotel_flights.png') plt.show()Predicting visitor count through linear regressionI performed linear regression on hotel demand and total visitors, and found that there is a very high correlation between hotel demand and the total visitor count both before and after the pandemic. With this, I will then use total flights to predict the hotel demand with pre-pandemic periods, since I also found high correlation in them. In this way I have a method to use pre-pandemic data to train a model using total flights information, and use this model to predict the visitor count for post-pandemic periods. Fitting hotel demand using hotel rate and total flightshotel_demand = travel_data['Hotel Demand'].tolist()[0:26] hotel_rate = travel_data['Hotel rate'].tolist()[0:26] total_flights = travel_data['Total flights'].tolist()[0:26] X = np.array([hotel_rate, total_flights]).T y = hotel_demand X2 = sm.add_constant(X) est = sm.OLS(y, X2) est2 = est.fit() print(est2.summary())OLS Regression Results ============================================================================== Dep. Variable: y R-squared: 0.276 Model: OLS Adj. R-squared: 0.213 Method: Least Squares F-statistic: 4.374 Date: Tue, 14 Dec 2021 Prob (F-statistic): 0.0246 Time: 20:04:25 Log-Likelihood: -137.08 No. Observations: 26 AIC: 280.2 Df Residuals: 23 BIC: 283.9 Df Model: 2 Covariance Type: nonrobust ============================================================================== coef std err t P>|[...]Fitting hotel demand with flightshotel_demand = travel_data['Hotel Demand'].tolist()[0:26] hotel_rate = travel_data['Hotel rate'].tolist()[0:26] total_flights = travel_data['Total flights'].tolist()[0:26] X = total_flights y = hotel_demand X2 = sm.add_constant(X) est = sm.OLS(y, X2) est2 = est.fit() print(est2.summary())OLS Regression Results ============================================================================== Dep. Variable: y R-squared: 0.273 Model: OLS Adj. R-squared: 0.243 Method: Least Squares F-statistic: 9.019 Date: Tue, 14 Dec 2021 Prob (F-statistic): 0.00616 Time: 20:04:25 Log-Likelihood: -137.13 No. Observations: 26 AIC: 278.3 Df Residuals: 24 BIC: 280.8 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P>|[...]Fitting visitor arrival with hotel demandarrival = travel_data['Visitor Arrival'].tolist() hotel_demand = travel_data['Hotel Demand'].tolist() X = hotel_demand y = arrival X2 = sm.add_constant(X) est = sm.OLS(y, X2) est2 = est.fit() print(est2.summary())OLS Regression Results ============================================================================== Dep. Variable: y R-squared: 0.980 Model: OLS Adj. R-squared: 0.979 Method: Least Squares F-statistic: 2118. Date: Tue, 14 Dec 2021 Prob (F-statistic): 7.45e-39 Time: 20:04:25 Log-Likelihood: -559.24 No. Observations: 46 AIC: 1122. Df Residuals: 44 BIC: 1126. Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P>|[...]Visualizing linear regressionIn the following plot we will visualize the linear regression using the flight data.x = travel_data['Total flights'].tolist() y = travel_data['Hotel Demand'].tolist() regression = [982.6944+ 0.0721 * x[i] for i in range(len(x))] plt.scatter(x[0:26],y[0:26], color = "blue", label = "Non-COVID data (Train)") plt.scatter(x[26:],y[26:], color = "orange", label = "COVID Data") plt.plot(x,regression, color = "red", label = "LR") plt.title('Linear regression on Total Flights', size = 20) plt.ticklabel_format(axis="y", style="plain") plt.xlabel('Total Flights') plt.ylabel('Hotel Demand') plt.legend(loc="lower right") plt.savefig(fname = 'a7_output_figures/linear_regression_visualization.png') plt.show()Flight corrected total visitor countUsing the linear regression models above, we generate the corrected visitor counts in the following sectionx = [datetime.strptime(a, "%b-%y") for a in travel_data['Time'].tolist()] hotel_rate = travel_data['Hotel rate'].tolist() total_flights = travel_data['Total flights'].tolist() corrected_hotel_demand = [982.6944+ 0.0721 * total_flights[i] for i in range(len(hotel_rate))] corrected_visitor_count = [-6.995e+04 + 691.9825 * t for t in corrected_hotel_demand] y = travel_data['Visitor Arrival'].tolist() fig, ax = plt.subplots(figsize=(20, 8)) # plt.plot(x,daily_cases, color = "black", label = "Daily confirmed cases") ax.plot(x,corrected_visitor_count, color = "orange", label = "FCTVC") ax.plot(x,y, color = "grey", label = "Visitor Arrival") ax.set_xlabel('Date', size = 15) ax.set_ylabel("Visitor Count") ax.set_xticks(x[::2]) ax.set_xticklabels([d.strftime("%b-%y") for d in x[::2]], rotation=45, horizontalalignment='right') plt.title('Flight Corrected Total Visitor Count', size = 20) plt.legend(loc="lower right") plt.ticklabel_format(axis="y", style="plain") plt.savefig(fname = 'a7_output_figures/flight_corrected_total_visitor_count.png') plt.show()!pip install vaderSentiment==3.2.1 requests numpy matplotlib pandas flair !nvidia-smi !wget - O politifact_fake.csv https://raw.githubusercontent.com/KaiDMML/FakeNewsNet/master/dataset/politifact_fake.csv !wget - O politifact_real.csv https://raw.githubusercontent.com/KaiDMML/FakeNewsNet/master/dataset/politifact_real.csv !ls -l import os import json import numpy as np import matplotlib.pyplot as plt from collections import Counter from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer import pandas as pd from flair.embeddings import WordEmbeddings, FlairEmbeddings, DocumentPoolEmbeddings, Sentence, DocumentRNNEmbeddings from flair.models import TextClassifier from flair.trainers import ModelTrainer from flair.datasets import CSVClassificationCorpus import os from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score from sklearn.model_selection import train_test_split DATASET_NAME = 'politifact' DATASET_PATH = './{}'.format(DATASET_NAME) REAL_DATA_PATH = '{}_real.csv'.format(DATASET_PATH) FAKE_DATA_PATH = '{}_fake.csv'.format(DATASET_PATH) fake_arts = pd.read_csv(FAKE_DATA_PATH, na_values=['nan'], keep_default_na=False) real_arts = pd.read_csv(REAL_DATA_PATH, na_values=['nan'], keep_default_na=False) def parse_art_data_frame(df): return [{'id': id, 'url': url, 'title': title} for id, url, title, tweets in df.values] fake_arts_with_content = parse_art_data_frame(fake_arts) real_arts_with_content = parse_art_data_frame(real_arts) fake_data = [(art, 'fake') for art in fake_arts_with_content] real_data = [(art, 'real') for art in real_arts_with_content] all_data = fake_data + real_data X = [x for x,y in all_data] y = [y for x,y in all_data] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) train_data = list(zip(X_train, y_train)) test_data = list(zip(X_test, y_test)) print(len(train_data)) print(len(test_data)) print(len(train_data) + len(test_data)) print(len(fake_data) + len(real_data)) print(len([y for y in y_train if y == 'fake'])) print(len([y for y in y_test if y == 'fake'])) print(len([y for y in y_train if y == 'real'])) print(len([y for y in y_test if y == 'real'])) def clear_text(text): return ' '.join(text.split()) class Classifier(): def __init__(self, classifier): self.classifier = classifier def predict(self, text): text = clear_text(text) sentence = Sentence(text) self.classifier.predict(sentence) return sentence.labels[0] def transform_data(data): return [{'label': label, 'text': clear_text(x)} for x, label in data] def save_data(data, data_folder = '.'): if not os.path.exists(data_folder): os.makedirs(data_folder) data = transform_data(data) frame_data = pd.DataFrame(data) train_path = '{}/train.csv'.format(data_folder) test_path = '{}/test.csv'.format(data_folder) dev_path = '{}/dev.csv'.format(data_folder) frame_data.iloc[0:int(len(data)*0.8)].to_csv(train_path, sep='\t', index = False, header = False) frame_data.iloc[int(len(data)*0.8):int(len(data)*0.9)].to_csv(test_path, sep='\t', index = False, header = False) frame_data.iloc[int(len(data)*0.9):].to_csv(dev_path, sep='\t', index = False, header = False) def load_corpus(data_folder = '.'): column_name_map = {1: "text", 0: "label"} return CSVClassificationCorpus(data_folder, column_name_map, delimiter='\t', test_file='test.csv', dev_file='dev.csv', train_file='train.csv') def train_classifier(corpus, model_folder = '.', max_epochs = 1): label_dict = corpus.make_label_dictionary() word_embeddings = [ WordEmbeddings('glove'), FlairEmbeddings('news-forward-fast'), FlairEmbeddings('news-backward-fast') ] document_embeddings = DocumentRNNEmbeddings(word_embeddings, hidden_size=512, reproject_words=True, reproject_words_dimension=256) classifier = TextClassifier(document_embeddings, label_dictionary=label_dict) trainer = ModelTrainer(classifier, corpus) trainer.train(model_folder, max_epochs=max_epochs) return TextClassifier.load('{}/best-model.pt'.format(model_folder)) def train_model(train_data, data_folder = '.', model_folder = '.', max_epochs=1 ): save_data(train_data, data_folder) corpus = load_corpus(data_folder) classifier = train_classifier(corpus, model_folder, max_epochs) return Classifier(classifier) def calculate_metrics(y_true, y_pred, pos_label = 'fake'): acc = accuracy_score(y_true, y_pred) precision = precision_score(y_true, y_pred, pos_label=pos_label) recall = recall_score(y_true, y_pred, pos_label=pos_label) f1 = f1_score(y_true, y_pred, pos_label=pos_label) return acc, precision, recall, f1 def validate_model(test_data, classifier): y_true = [label for x, label in test_data] y_pred = [classifier.predict(x).value for x, label in test_data] acc, precision, recall, f1 = calculate_metrics(y_true, y_pred) print("acc: ", acc) print("precision: ", precision) print("recall: ", recall) print("f1: ", f1) return acc, precision, recall, f1 def make_test(train_data, test_data, data_folder, model_folder, max_epochs): classifier = train_model(train_data, data_folder, model_folder, max_epochs) validate_model(test_data, classifier) train_content = [(x, label) for x, label in train_data] test_content = [(x, label) for x, label in test_data] train_title = [(x['title'], label) for x, label in train_content] test_title = [(x['title'], label) for x, label in test_content] print(len([x for x, label in train_title if x == ''])) print(len([x for x, label in test_title if x == ''])) train_title = [(x, label) for x, label in train_title if x != ''] test_title = [(x, label) for x, label in test_title if x != ''] print(len(train_title)) print(len(test_title)) path = './test_csv/title1' make_test(train_title, test_title, path, path, 1) path = './test_csv/title10' make_test(train_title, test_title, path, path, 10) train_url = [(x['url'], label) for x, label in train_content] test_url = [(x['url'], label) for x, label in test_content] print(len([x for x, label in train_url if x == ''])) print(len([x for x, label in test_url if x == ''])) train_url = [(x, label) for x, label in train_url if x != ''] test_url = [(x, label) for x, label in test_url if x != ''] print(len(train_url)) print(len(test_url)) path = './test_csv/url1' make_test(train_url, test_url, path, path, 1) path = './test_csv/url10' make_test(train_url, test_url, path, path, 10) train_mix = [(x['url'] + ', ' + x['title'], label) for x, label in train_content] test_mix = [(x['url'] + ', ' + x['title'], label) for x, label in test_content] print(len([x for x, label in train_mix if x == ''])) print(len([x for x, label in test_mix if x == ''])) train_mix = [(x, label) for x, label in train_mix if x != ''] test_mix = [(x, label) for x, label in test_mix if x != ''] print(len(train_mix)) print(len(test_mix)) path = './test_csv/mix1' make_test(train_mix, test_mix, path, path, 1) path = './test_csv/mix10' make_test(train_mix, test_mix, path, path, 10) path = './test_csv/title50' make_test(train_title, test_title, path, path, 50) path = './test_csv/url50' make_test(train_url, test_url, path, path, 50) path = './test_csv/mix50' make_test(train_mix, test_mix, path, path, 50)Lemmatization Labimport nltk # nltk.download('wordnet') from nltk.stem import WordNetLemmatizer wordnet_lemmatizer = WordNetLemmatizer() sentence = "He was running and eating at the same time. He has had habit of swimming after playing long hours in the Sun." punctuations = "?:.,;" sentence_words = nltk.word_tokenize(sentence) for word in sentence_words: if word in punctuations: sentence_words.remove(word) sentence_words print("{0:20}{1:20}".format("Word", "Lemma")) for word in sentence_words: print("{0:20}{1:20}".format(word, wordnet_lemmatizer.lemmatize(word))) print(sentence_words) for word in sentence_words: print("{0:20}{1:20}".format(word, wordnet_lemmatizer.lemmatize(word, pos="v"))) # you need to provide the context in which you want # to lemmatize that is the parts-of-speech(POS) # noun n # verb v![image.png](attachment:image.png)from pyomo.environ import * from math import piConcrete Pyomo Model with Explicit Variablesm = ConcreteModel() m.x_1 = Var(within=NonNegativeReals) m.x_2 = Var(within=NonNegativeReals) m.obj = Objective(expr= m.x_1 + 2*m.x_2, sense=minimize ) m.c1 = Constraint(expr= 3*m.x_1 + 4*m.x_2 >= 1) m.c2 = Constraint(expr= 2*m.x_1 + 5*m.x_2 >= 2) solver = SolverFactory('ipopt') status = solver.solve(m) print("Status = %s" % status.solver.termination_condition) print("%s = %f" % (m.x_1, value(m.x_1))) print("%s = %f" % (m.x_2, value(m.x_2))) print("Objective = %f" % value(m.obj))Status = optimal x_1 = 0.000000 x_2 = 0.400000 Objective = 0.800000Concrete Pyomo Model with Indexed Variablesmodel = ConcreteModel() model.x = Var([1,2], within=NonNegativeReals) model.obj = Objective(expr=model.x[1] + 2*model.x[2]) model.con1 = Constraint(expr=3*model.x[1] + 4*model.x[2]>=1) model.con2 = Constraint(expr=2*model.x[1] + 5*model.x[2]>=2) solver = SolverFactory('ipopt') status = solver.solve(model) print("Status = %s" % status.solver.termination_condition) print("%s = %f" % (model.x[1], value(model.x[1]))) print("%s = %f" % (model.x[2], value(model.x[2]))) print("Objective = %f" % value(model.obj)) # Modificando solo el nombre de las variables model = ConcreteModel() model.x = Var([1,3], within=NonNegativeReals) model.obj = Objective(expr=model.x[1] + 2*model.x[3]) model.con1 = Constraint(expr=3*model.x[1] + 4*model.x[3]>=1) model.con2 = Constraint(expr=2*model.x[1] + 5*model.x[3]>=2) solver = SolverFactory('ipopt') status = solver.solve(model) print("Status = %s" % status.solver.termination_condition) print("%s = %f" % (model.x[1], value(model.x[1]))) print("%s = %f" % (model.x[3], value(model.x[3]))) print("Objective = %f" % value(model.obj))Concrete Pyomo Model with External Data ![image.png](attachment:image.png)N = [1,2] # Nombre de las variables c = {1:1, 2:2} # Costos para la funcion de costo # Ej. si fuera 5x_1 + 6x_2 tendriamos c = {1:5, 2:6} a = {(1,1):3, (2,1):4, (1,2):2, (2,2):5} # Parametros de suget to s.t # Es una matriz b = {1:1, 2:2} # bounds o cotas model = ConcreteModel() model.x = Var(N, within=NonNegativeReals) model.obj = Objective(expr=sum(c[i]*model.x[i] for i in N)) model.con1 = Constraint(expr=sum(a[i,1]*model.x[i] for i in N) >= b[1]) model.con2 = Constraint(expr=sum(a[i,2]*model.x[i] for i in N) >= b[2]) solver = SolverFactory('ipopt') status = solver.solve(model) print("Status = %s" % status.solver.termination_condition) for i in N: print("%s = %f" % (model.x[i], value(model.x[i]))) print("Objective = %f" % value(model.obj))Status = optimal x[1] = 0.000000 x[2] = 0.400000 Objective = 0.800000Concrete Pyomo Model with Constraint RulesN = [1,2] M = [1,2] c = {1:1, 2:2} a = {(1,1):3, (2,1):4, (1,2):2, (2,2):5} b = {1:1, 2:2} model = ConcreteModel() model.x = Var(N, within=NonNegativeReals) model.obj = Objective(expr=sum(c[i]*model.x[i] for i in N)) def con_rule(model, m): return sum(a[i,m]*model.x[i] for i in N) >= b[m] model.con = Constraint(M, rule=con_rule) solver = SolverFactory('ipopt') status = solver.solve(model) print("Status = %s" % status.solver.termination_condition) for i in N: print("%s = %f" % (model.x[i], value(model.x[i]))) print("Objective = %f" % value(model.obj))Status = optimal x[1] = 0.000000 x[2] = 0.400000 Objective = 0.800000Concrete Pyomo Model with Abstract Component Declarationsmodel = ConcreteModel() def N_rule(model): return [1,2] model.N = Set(rule=N_rule) model.M = Set(initialize=[1,2]) model.c = Param(model.N, initialize={1:1, 2:2}) model.a = Param(model.N, model.M,initialize={(1,1):3, (2,1):4, (1,2):2, (2,2):5}) model.b = Param(model.M, initialize={1:1, 2:2}) model.x = Var(model.N, within=NonNegativeReals) # Objective def obj_rule(model): return sum(model.c[i]*model.x[i] for i in model.N) model.obj = Objective(rule=obj_rule) # Constraint def con_rule(model, m): return sum(model.a[i,m]*model.x[i] for i in model.N) >= model.b[m] model.con = Constraint(model.M, rule=con_rule) solver = SolverFactory('ipopt') status = solver.solve(model) print("Status = %s" % status.solver.termination_condition) for i in N: print("%s = %f" % (model.x[i], value(model.x[i]))) print("Objective = %f" % value(model.obj))Status = optimal x[1] = 0.000000 x[2] = 0.400000 Objective = 0.800000Using a Function to Construct a Concrete Pyomo Modeldef create_model(N=[], M=[], c={}, a={}, b={}): model = ConcreteModel() model.x = Var(N, within=NonNegativeReals) model.obj = Objective(expr=sum(c[i]*model.x[i] for i in N)) def con_rule(model, m): return sum(a[i,m]*model.x[i] for i in N) >= b[m] model.con = Constraint(M, rule=con_rule) return model model = create_model(N = [1,2], M = [1,2], c = {1:1, 2:2}, a = {(1,1):3, (2,1):4, (1,2):2, (2,2):5}, b = {1:1, 2:2}) solver = SolverFactory('ipopt') status = solver.solve(model) print("Status = %s" % status.solver.termination_condition) for i in N: print("%s = %f" % (model.x[i], value(model.x[i]))) print("Objective = %f" % value(model.obj))Status = optimal x[1] = 0.000000 x[2] = 0.400000 Objective = 0.800000Abstract Pyomo Modelmodel = AbstractModel() model.N = Set() model.M = Set() model.c = Param(model.N) model.a = Param(model.N, model.M) model.b = Param(model.M) model.x = Var(model.N, within=NonNegativeReals) def obj_rule(model): return sum(model.c[i]*model.x[i] for i in model.N) model.obj = Objective(rule=obj_rule) def con_rule(model, m): return sum(model.a[i,m]*model.x[i] for i in model.N) >= model.b[m] model.con = Constraint(model.M, rule=con_rule) %%writefile info.dat set N := 1 2 ; set M := 1 2 ; param c := 1 1 2 2; param a := 1 1 3 2 1 4 1 2 2 2 2 5; param b := 1 1 2 2 ; # %load info.dat set N := 1 2 ; set M := 1 2 ; param c := 1 1 2 2; param a := 1 1 3 2 1 4 1 2 2 2 2 5; param b := 1 1 2 2 ; instance = model.create_instance('info.dat') #instance.pprint() opt = SolverFactory("ipopt") results = opt.solve(instance) results.write() solver = SolverFactory('glpk') instance = model.create_instance('info.dat') #results = solver.solve(instance,tee=True) results = solver.solve(instance) results.write() instance.solutions.load_from(results) for v in instance.component_objects(Var, active=True): print ("Variable",v) varobject = getattr(instance, str(v)) for index in varobject: print (" ",index, varobject[index].value) instance.solutions.load_from(results) for v in instance.component_objects(Var, active=True): print ("Variable",v) varobject = getattr(instance, str(v)) for index in varobject: print (" ",index, varobject[index].value)Variable x 1 0.0 2 0.4![image.png](attachment:image.png) http://cvxopt.org/examples/tutorial/lp.html ![image.png](attachment:image.png)from cvxopt import matrix, solvers A = matrix([ [-1.0, -1.0, 0.0, 1.0], [1.0, -1.0, -1.0, -2.0] ]) b = matrix([ 1.0, -2.0, 0.0, 4.0 ]) c = matrix([ 2.0, 1.0 ]) sol=solvers.lp(c,A,b) print(sol['x'])[ 5.00e-01] [ 1.50e+00]![image.png](attachment:image.png)A = matrix([ [-3.0, -2.0, -1.0, 0.0], [-4.0, -5.0, 0.0, -1.0] ]) b = matrix([ -1.0, -2.0, 0.0, 0 ]) c = matrix([ 1.0, 2.0 ]) sol=solvers.lp(c,A,b) print(sol['x']) # Abstract Knapsack Problem # from pyomo.environ import * model = AbstractModel() model.ITEMS = Set() model.v = Param(model.ITEMS, within=PositiveReals) model.w = Param(model.ITEMS, within=PositiveReals) model.limit = Param(within=PositiveReals) model.x = Var(model.ITEMS, within=Binary) def value_rule(model): return sum(model.v[i]*model.x[i] for i in model.ITEMS) model.value = Objective(sense=maximize, rule=value_rule) def weight_rule(model): return sum(model.w[i]*model.x[i] for i in model.ITEMS) <= model.limit model.weight = Constraint(rule=weight_rule) data = { 'ITEMS': {None:('hammer','wrench','screwdriver','towel')}, 'v': {'hammer': 8, 'wrench': 3, 'screwdriver': 6, 'towel': 11, }, 'w': { 'hammer': 5, 'wrench': 7, 'screwdriver': 4, 'towel': 3, }, 'limit': {None:14}, } inst = model.create_instance(data={None:data}) inst.pprint() solver = SolverFactory('glpk') results = solver.solve(inst) results.write() instance.solutions.load_from(results) for v in instance.component_objects(Var, active=True): print ("Variable",v) varobject = getattr(instance, str(v)) for index in varobject: print (" ",index, varobject[index].value) %matplotlib inline # Example 1 from http://www.mathworks.com/help/matlab/ref/pdepe.html from pyomo.environ import * from pyomo.dae import * from pyomo.opt import SolverFactory from pyomo.dae.plugins.finitedifference import Finite_Difference_Transformation from pyomo.dae.plugins.colloc import Collocation_Discretization_Transformation import math m = ConcreteModel() m.t = ContinuousSet(bounds=(0,2)) m.x = ContinuousSet(bounds=(0,1)) m.u = StateVar(m.x,m.t) m.dudx = DerivativeVar(m.u,wrt=m.x) m.dudx2 = DerivativeVar(m.u,wrt=(m.x,m.x)) m.dudt = DerivativeVar(m.u,wrt=m.t) def _pde(m,i,j): if i == 0 or i == 1 or j == 0 : return Constraint.Skip return math.pi**2*m.dudt[i,j] == m.dudx2[i,j] m.pde = Constraint(m.x,m.t,rule=_pde) def _initcon(m,i): if i == 0 or i == 1: return Constraint.Skip return m.u[i,0] == sin(math.pi*i) m.initcon = Constraint(m.x,rule=_initcon) def _lowerbound(m,j): return m.u[0,j] == 0 m.lowerbound = Constraint(m.t,rule=_lowerbound) def _upperbound(m,j): return math.pi*exp(-j)+m.dudx[1,j] == 0 m.upperbound = Constraint(m.t,rule=_upperbound) m.obj = Objective(expr=1) # Discretize using Finite Difference Method discretize = Finite_Difference_Transformation() disc = discretize.apply(m,nfe=25,wrt=m.x,scheme='BACKWARD') disc = discretize.apply(disc,nfe=20,wrt=m.t,scheme='BACKWARD',clonemodel=False) # Discretize using Orthogonal Collocation #discretize2 = Collocation_Discretization_Transformation() #disc = discretize2.apply(disc,nfe=10,ncp=3,wrt=m.x,clonemodel=False) #disc = discretize2.apply(disc,nfe=20,ncp=3,wrt=m.t,clonemodel=False) solver='ipopt' opt=SolverFactory(solver) results = opt.solve(disc,tee=True) disc.load(results) #disc.u.pprint() x = [] t = [] u = [] for i in sorted(disc.x): temp=[] tempx = [] for j in sorted(disc.t): tempx.append(i) temp.append(value(disc.u[i,j])) x.append(tempx) t.append(sorted(disc.t)) u.append(temp) import numpy import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.axes3d import Axes3D fig = plt.figure() ax = fig.add_subplot(1,1,1,projection='3d') ax.set_xlabel('Distance x') ax.set_ylabel('Time t') ax.set_title('Numerical Solution Using Backward Difference Method') p = ax.plot_wireframe(x,t,u,rstride=1,cstride=1) fig.show() from pyomo.environ import * # Creates a list of the Ingredients Ingredients = ['CHICKEN', 'BEEF', 'MUTTON', 'RICE', 'WHEAT', 'GEL'] # A dictionary of the costs of each of the Ingredients is created costs = {'CHICKEN': 0.013, 'BEEF': 0.008, 'MUTTON': 0.010, 'RICE': 0.002, 'WHEAT': 0.005, 'GEL': 0.001} # A dictionary of the protein percent in each of the Ingredients is created proteinPercent = {'CHICKEN': 0.100, 'BEEF': 0.200, 'MUTTON': 0.150, 'RICE': 0.000, 'WHEAT': 0.040, 'GEL': 0.000} # A dictionary of the fat percent in each of the Ingredients is created fatPercent = {'CHICKEN': 0.080, 'BEEF': 0.100, 'MUTTON': 0.110, 'RICE': 0.010, 'WHEAT': 0.010, 'GEL': 0.000} # A dictionary of the fibre percent in each of the Ingredients is created fibrePercent = {'CHICKEN': 0.001, 'BEEF': 0.005, 'MUTTON': 0.003, 'RICE': 0.100, 'WHEAT': 0.150, 'GEL': 0.000} # A dictionary of the salt percent in each of the Ingredients is created saltPercent = {'CHICKEN': 0.002, 'BEEF': 0.005, 'MUTTON': 0.007, 'RICE': 0.002, 'WHEAT': 0.008, 'GEL': 0.000} model = ConcreteModel(name="The Whiskas Problem") model.ingredient_vars = Var(Ingredients, bounds=(0,None), doc="The amount of each ingredient that is used") model.obj = Objective(expr=sum(costs[i]*model.ingredient_vars[i] for i in Ingredients), doc="Total Cost of Ingredients per can") model.c0 = Constraint(expr=sum(model.ingredient_vars[i] for i in Ingredients) == 100, doc="PercentagesSum") model.c1 = Constraint(expr=sum(proteinPercent[i] * model.ingredient_vars[i] for i in Ingredients) >= 8.0, doc="ProteinRequirement") model.c2 = Constraint(expr=sum(fatPercent[i] * model.ingredient_vars[i] for i in Ingredients) >= 6.0, doc="FatRequirement") model.c3 = Constraint(expr=sum(fibrePercent[i] * model.ingredient_vars[i] for i in Ingredients) <= 2.0, doc="FibreRequirement") model.c4 = Constraint(expr=sum(saltPercent[i] * model.ingredient_vars[i] for i in Ingredients) <= 0.4, doc="SaltRequirement") #inst = model.create_instance(data={None:data}) #inst.pprint() solver = SolverFactory('glpk') results = solver.solve(model) results.write() instance.solutions.load_from(results) for v in instance.component_objects(Var, active=True): print ("Variable",v) varobject = getattr(instance, str(v)) for index in varobject: print (" ",index, varobject[index].value) %matplotlib notebook # sodacan.py from pyomo.environ import * from math import pi M = ConcreteModel() M.r = Var(bounds=(0,None)) M.h = Var(bounds=(0,None)) M.o = Objective(expr=\ 2*pi*M.r*(M.r + M.h)) M.c = Constraint(expr=\ pi*M.h*M.r**2 == 355) solver = SolverFactory('ipopt') status = solver.solve(M) status.write() print("Status = %s" % status.solver.termination_condition) print("%s = %f" % (M.r, value(M.r))) print("%s = %f" % (M.h, value(M.h))) print("Objective = %f" % value(M.o)) from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter import matplotlib.pyplot as plt import numpy as np from math import pi, sqrt fig = plt.figure() ax = fig.gca(projection='3d') R_ = np.arange(0.25, 10, 0.25) H_ = np.arange(0.25, 10, 0.25) R, H = np.meshgrid(R_, H_) Z = 2*pi*R*(R+H) surf = ax.plot_surface(R, H, Z, rstride=1, cstride=1, cmap=cm.hot, linewidth=0, antialiased=False) ax.set_xlabel("r") ax.set_ylabel("h") ax.set_zlim(0, 1200) ax.zaxis.set_major_locator(LinearLocator(10)) #ax.zaxis.set_major_formatter(FormatStrFormatter(' %.02f')) #fig.colorbar(surf, shrink=0.5, aspect=5) H_ = 355/(pi*R_*R_) valid = np.where(H_ < 10.1) Z_ = R_+H_ Z_ = 2*pi*R_*Z_ ax.plot(R_[valid], H_[valid], Z_[valid], label='parametric curve') ax.scatter3D(3.837215,7.674430,0,s=20,c="green") ax.scatter3D? plt.show() model = ConcreteModel() model.x = Var( initialize=-1.2, bounds=(-2, 2) ) model.y = Var( initialize= 1.0, bounds=(-2, 2) ) model.obj = Objective( expr= (1-model.x)**2 + 100*(model.y-model.x**2)**2, sense= minimize ) solver = SolverFactory('ipopt') status = solver.solve(model) status.write() print("Status = %s" % status.solver.termination_condition) print("%s = %f" % (model.x, value(model.x))) print("%s = %f" % (model.y, value(model.y))) print("Objective = %f" % value(model.obj)) print("\nDisplaying Solution\n" + '-'*60) pyomo_postprocess(None, model, status) ## Display of the output ## # Display x.l, x.m ; def pyomo_postprocess(options=None, instance=None, results=None): model.x.display() model.x.display() model.y.display()y : Size=1, Index=None, Domain=Reals Key : Lower : Value : Upper : Fixed : Stale None : -2 : 0.9999999949529991 : 2 : False : False$\newcommand{\xv}{\mathbf{x}} \newcommand{\wv}{\mathbf{w}} \newcommand{\yv}{\mathbf{y}} \newcommand{\zv}{\mathbf{z}} \newcommand{\Chi}{\mathcal{X}} \newcommand{\R}{\rm I\!R} \newcommand{\sign}{\text{sign}} \newcommand{\Tm}{\mathbf{T}} \newcommand{\Xm}{\mathbf{X}} \newcommand{\Xlm}{\mathbf{X1}} \newcommand{\Wm}{\mathbf{W}} \newcommand{\Vm}{\mathbf{V}} \newcommand{\Ym}{\mathbf{Y}} \newcommand{\Zm}{\mathbf{Z}} \newcommand{\Zlm}{\mathbf{Z1}} \newcommand{\I}{\mathbf{I}} \newcommand{\muv}{\boldsymbol\mu} \newcommand{\Sigmav}{\boldsymbol\Sigma} \newcommand{\Phiv}{\boldsymbol\Phi}$ Neural NetworksNeural networks, or artificial neural networks, are the computational models inspired by the brain. Mimicing the neurons' synaptic connecions (Figure 1), we build or stack multiple neuron-like hidden units to map data into nonlinear space for rich representation. Figure 1. Anatomy of a neuron (wikipedia) Now, let us review the perceptron model. In perceptron, passing the output of linear model to the step function, we get discrete outputs. Now, you can think a perceptron as a neuron. With a threshold zero, when the linear model outputs are over it, it passes the signal to next neuron. By connecting the perceptrons, we can actually build synaptic connections.We call this model as *multi-layer perceptron* (MLP). **Q:** For inputs $x \in \{-1, +1 \}$, think about what the following picture represents and answer for it. 1) 2)3) Answer: 1) +12) -13) -1 Feed Forward Neural NetworksFitting the data with MLP is a combinatorial optimization problem with non-smooth step function. So, we can consider smooth step function, a s-shaped sigmoid function. We call this smooth function as **activation function**.import numpy as np import matplotlib.pyplot as plt %matplotlib inline fig, ax = plt.subplots() # x - y axis ax.axhline(y=0, color='k', linewidth=1) ax.axvline(x=0, color='k', linewidth=1) # step function in blue plt.plot([0, 6], [1, 1], 'b-', linewidth=3) plt.plot([-6, 0], [-1, -1], 'b-', linewidth=3) plt.plot([0, 0], [-1, 1], 'b-', linewidth=3) # tanh in red x = np.linspace(-6, 6, 100) plt.plot(x, np.tanh(x), 'r-', linewidth=3)Non-linear Extension of Linear ModelAs we discussed, feed forward neural networks have a rich representation. Thus, it can represent the linear model with single layer. Considering the multiple outputs, we formulated this in matrix: $$\begin{align}E &= \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{k=1}^{K} (t_{nk} - y_{nk})^2 \\\\\Ym &= \Xlm \cdot \Wm\end{align}$$Here, we assume the first column of $\Xlm$ is the bias column with 1's. Thus, the weight matrix $\Wm$ is $(D+1) \times K$ with the bias row in the first row. From this model, we can convert the raw data $\Xm$ to $\Phiv$, which is a nonlinear mapping.$$\phi: \Xm \rightarrow \Phiv$$Then, we can rewrite the linear model with as follows:$$\begin{align}E &= \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{k=1}^{K} (t_{nk} - y_{nk})^2 \\\Ym &= \Phiv \Wm \\ \\\Ym_{nk} &= \Phiv_n^\top \Wm_k \end{align}$$Now, let $\phi(\xv) = h(\xv)$ where $h$ is the *activation function*. $$\begin{align}\Zm &= h(\Xlm \cdot \Vm) \\\\\Ym & = \Zlm \cdot \Wm \end{align}$$Figure below depics this model. The size of each matrix is listed: - $\Xm: N \times D$- $\Xlm: N \times (D+1)$- $\Vm: (D+1) \times G$- $\Zm: N \times G$- $\Zlm: N \times (G+1)$- $\Wm: (G+1) \times K$- $\Ym: N \times K$For this two-layer network, we call the blue circle layer with the activation functions as **hidden layer** and the organge layer with summation as **output layer**. Why Sigmoid? The resemblance to the step function can be good reason. But is there any other reason for choosing a sigmoid function as activation? Let us take a look at a polinomial function and the sigmoid.$$y = x^4 + 3 x^2 + 7 x + 3 \quad\quad\text{vs.}\quad\quad y = tanh(x)$$# polinomial function def h_poly(x): return x**4 + 3 * x**2 + 7 * x + 3 # sigmoid function def h_sigmoid(x): return np.tanh(x) ##### Gradient functions # polinomial function def dh_poly(x): return 4 * x**3 + 6 * x + 7 # polinomial function def dh_sigmoid(x): h = h_sigmoid(x) return 1 - h ** 2 x = np.linspace(-6, 6, 100) plt.figure(figsize=(16,8)) plt.subplot(121) plt.plot(x, h_poly(x), label="$y = x^4 + 3 x^2 + 7 x + 3$") plt.plot(x, dh_poly(x), label="$dy$") plt.legend() plt.subplot(122) plt.plot(x, h_sigmoid(x), label="$y = tanh(x)$") plt.plot(x, dh_sigmoid(x), label="$dy$") plt.legend()Here, we can see the polinomial gradients are very huge when $x$ is moving away from 0. A gradient descent procedure takes this huge step for the large positive or negative $x$ values, which can make learning divergent and unstable.In the right figure, we can see the gradient is nearly turned off for large $x$ values. Only on the nonlinear region of sigmoid function, small gradient is applied for stable learning. Gradient DescentFrom the error function $E$, $$E = \frac{1}{N} \frac{1}{K}\sum_{n=1}^{N} \sum_{k=1}^{K} (t_{nk} - y_{nk})^2,$$we can derive the gradient to update the weights for each layer. Since we can change the output and eventually the error by changing the weights $\Vm$ and $\Wm$, $$\begin{align}v_{dg} &\leftarrow v_{dg} - \alpha_h \frac{\partial{E}} {\partial{v_{dg}}} \\ \\ w_{gk} &\leftarrow w_{gk} - \alpha_o \frac{\partial{E}} {\partial{w_{gk}}},\end{align}$$where $\alpha_h$ and $\alpha_o$ are the learning rate for hidden and output layer respectively. $$\begin{align}\frac{\partial{E}}{\partial{w_{gk}}} &= \frac{\partial{\Big( \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{l=1}^{K} (t_{nl} - y_{nl})^2} \Big)}{\partial{w_{gk}}} \\ &= -2 \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N}(t_{nk} - y_{nk}) \frac{\partial{y_{nl}}}{\partial{w_{gk}}} \end{align}$$where $$y_{nl} = z1_{n}^\top w_{*l} = \sum_{g=0}^{G} z1_{ng} w_{gl} . $$The gradient for the output layer can be computed as follows:$$\begin{align}\frac{\partial{E}}{\partial{w_{gk}}} &= -2 \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} (t_{nk} - y_{nk}) z1_{nk} \\ &= -2 \frac{1}{N} \frac{1}{K} \Zlm^\top (\Tm - \Ym).\end{align} $$For the hidden layer, $$\begin{align}\frac{\partial{E}}{\partial{v_{dg}}} &= \frac{\partial{\Big( \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{l=1}^{K} (t_{nl} - y_{nl})^2} \Big)}{\partial{v_{dg}}} \\ &= -2 \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{l=1}^{K} (t_{nl} - y_{nl}) \frac{\partial{y_{nl}}}{\partial{v_{dg}}} \end{align}$$where $$y_{nl} = \sum_{g=0}^{G} z1_{ng} w_{gl} = \sum_{g=0}^G w_{gl} h (\sum_{d=0}^D v_{dg} x1_{nd}) . $$Let $a_{ng} = \sum_{d=0}^D x1_{nd} v_{dg}$. Then, we can use a chain rule for the derivation. $$\begin{align}\frac{\partial{E}}{\partial{v_{dg}}} &= -2 \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{l=1}^{K} (t_{nl} - y_{nl}) \frac{\partial{\Big( \sum_{q=0}^G w_{ql} h (\sum_{p=0}^D v_{pq} x1_{np}) \Big)}}{\partial{v_{dg}}} \\ &= -2 \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{l=1}^{K} (t_{nl} - y_{nl}) \sum_{q=0}^G w_{ql} \frac{\partial{\Big( h (\sum_{p=0}^D v_{pq} x1_{np}) \Big)}}{\partial{v_{dg}}} \\ &= -2 \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{l=1}^{K} (t_{nl} - y_{nl}) \sum_{q=0}^G w_{ql} \frac{\partial{h(a_{ng})}}{\partial{a_{ng}}} \frac{\partial{a_{ng}}}{\partial{v_{dg}}} \\ &= -2 \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{l=1}^{K} (t_{nl} - y_{nl}) \sum_{q=0}^G w_{ql} \frac{\partial{h(a_{ng})}}{\partial{a_{ng}}} x1_{nd}.\end{align}$$When $h = tanh$, $$\frac{\partial{h(a_{ng})}}{\partial{a_{ng}}} = \frac{z_{ng}}{\partial{a_{ng}}} = (1 - z_{ng}^2). $$Thus, $$\frac{\partial{E}}{\partial{v_{dg}}} = -2 \frac{1}{N} \frac{1}{K} \sum_{n=1}^{N} \sum_{l=1}^{K} (t_{nk} - y_{nl}) \sum_{g=0}^G w_{gl} (1 - z_{ng}^2) x1_{nd}.$$Rewriting this in matrix form, $$\frac{\partial{E}}{\partial{v_{dg}}} = -2 \frac{1}{N} \frac{1}{K} \Xlm^\top \Big( (\Tm - \Ym) \Wm^\top \odot (1 - \Zm^2) \Big).$$Here, $\odot$ denotes the element-wise multiplication.To summarize, the backpropagation performs the this weight updates iteratively: $$\begin{align}\Vm &\leftarrow \Vm + \rho_h \frac{1}{N} \frac{1}{K} \Xlm^\top \Big( (\Tm - \Ym) \Wm^\top \odot (1 - \Zm^2) \Big), \\\Wm &\leftarrow \Wm + \rho_o \frac{1}{N} \frac{1}{K} \Zlm^\top \Big( \Tm - \Ym \Big)\end{align}$$where $\rho_h$ and $\rho_o$ are the learning rate for hidden and output layer weights. Implemented iteration follows.import numpy as np import matplotlib.pyplot as plt %matplotlib inline import IPython.display as ipd # for display and clear_output import time # for sleep # Make some training data n = 20 X = np.linspace(0.,20.0,n).reshape((n,1)) - 10 T = 0.2 + 0.05 * (X+10) + 0.4 * np.sin(X+10) + 0.2 * np.random.normal(size=(n,1)) # Make some testing data Xtest = X + 0.1*np.random.normal(size=(n,1)) Ttest = 0.2 + 0.05 * (Xtest+10) + 0.4 * np.sin(Xtest+10) + 0.2 * np.random.normal(size=(n,1)) nSamples = X.shape[0] nOutputs = T.shape[1] # Set parameters of neural network nHiddens = 10 rhoh = 0.5 rhoo = 0.1 rh = rhoh / (nSamples*nOutputs) ro = rhoo / (nSamples*nOutputs) # Initialize weights to uniformly distributed values between small normally-distributed between -0.1 and 0.1 V = 0.1*2*(np.random.uniform(size=(1+1,nHiddens))-0.5) W = 0.1*2*(np.random.uniform(size=(1+nHiddens,nOutputs))-0.5) # Add constant column of 1's def addOnes(A): return np.insert(A, 0, 1, axis=1) X1 = addOnes(X) Xtest1 = addOnes(Xtest) # Take nReps steepest descent steps in gradient descent search in mean-squared-error function nReps = 30000 # collect training and testing errors for plotting errorTrace = np.zeros((nReps,2)) N_ = X1.shape[0] K_ = W.shape[1] fig = plt.figure(figsize=(10,8)) for reps in range(nReps): # Forward pass on training data Z = np.tanh(X1 @ V) Z1 = addOnes(Z) Y = Z1 @ W # Error in output error = T - Y print("V:", V.shape) print("X1:", X1.T.shape) print("error:", error.shape) print("W.T:", W.T.shape) print("Z:", Z.shape) print(np.square(Z).shape) # TODO: Backward pass - the backpropagation and weight update steps V = V + ((rh / (N_ * K_)) * X1.T * ((error * W.T) @ (1 - np.square(Z)))) W = W + (ro / (N_ * K_)) * Z1.T * error # error traces for plotting errorTrace[reps,0] = np.sqrt(np.mean((error**2))) Ytest = addOnes(np.tanh(Xtest1 @ V)) @ W #!! Forward pass in one line errorTrace[reps,1] = np.sqrt(np.mean((Ytest-Ttest)**2)) if reps % 1000 == 0 or reps == nReps-1: plt.clf() plt.subplot(3,1,1) plt.plot(errorTrace[:reps,:]) plt.ylim(0,0.7) plt.xlabel('Epochs') plt.ylabel('RMSE') plt.legend(('Train','Test'),loc='upper left') plt.subplot(3,1,2) plt.plot(X,T,'o-',Xtest,Ttest,'o-',Xtest,Ytest,'o-') plt.xlim(-10,10) plt.legend(('Training','Testing','Model'),loc='upper left') plt.xlabel('$x$') plt.ylabel('Actual and Predicted $f(x)$') plt.subplot(3,1,3) plt.plot(X,Z) plt.ylim(-1.1,1.1) plt.xlabel('$x$') plt.ylabel('Hidden Unit Outputs ($z$)'); ipd.clear_output(wait=True) ipd.display(fig) ipd.clear_output(wait=True)V: (2, 10) X1: (2, 20) error: (20, 1) W.T: (1, 11) Z: (20, 10) (20, 10)$\newcommand{\xv}{\mathbf{x}} \newcommand{\wv}{\mathbf{w}} \newcommand{\yv}{\mathbf{y}} \newcommand{\zv}{\mathbf{z}} \newcommand{\av}{\mathbf{a}} \newcommand{\Chi}{\mathcal{X}} \newcommand{\R}{\rm I\!R} \newcommand{\sign}{\text{sign}} \newcommand{\Tm}{\mathbf{T}} \newcommand{\Xm}{\mathbf{X}} \newcommand{\Xlm}{\mathbf{X1}} \newcommand{\Wm}{\mathbf{W}} \newcommand{\Vm}{\mathbf{V}} \newcommand{\Ym}{\mathbf{Y}} \newcommand{\Zm}{\mathbf{Z}} \newcommand{\Zlm}{\mathbf{Z1}} \newcommand{\I}{\mathbf{I}} \newcommand{\muv}{\boldsymbol\mu} \newcommand{\Sigmav}{\boldsymbol\Sigma} \newcommand{\Phiv}{\boldsymbol\Phi}$ OptimizationSo far, we have been using gradient descent to find minimum or maximum values in our error function. In general, we call this maximization or minimization problem as an **optimization problem**. In optimization problems, we look for the largest or the smallest value that a function can take. By systematically choosing input vales within the constraint set, optimization problem seeks for the best available values of an objective function.So, for a given function $f(x)$ that maps $f: \Xm \rightarrow \Ym $ where $\Ym \subset \R$, we are looking for a $x^* \in \Xm$ that satisfies $$\begin{cases} f(x^*) \le f(x) &\forall x & \quad \text{if } \text{ minimization}\\ f(x^*) \ge f(x) &\forall x & \quad \text{if } \text{ maximization}. \end{cases}$$ The optimization problems are often expressed in following notation.$$\begin{equation*}\begin{aligned}& \underset{x}{\text{minimize}}& & f(x) \\& \text{subject to}& & x \le b_i, \; i = 1, \ldots, m,\\ &&& x \ge 0.\end{aligned}\end{equation*}$$ Least Squares$$\begin{equation*}\begin{aligned}& \underset{\wv}{\text{minimize}}& & \Vert \Xm \wv - t\Vert^2\end{aligned}\end{equation*}$$![](https://upload.wikimedia.org/wikipedia/commons/3/3a/Linear_regression.svg)As we discussed, least-squares problems can be solved analytically, $\wv = (\Xm^\top \Xm)^{-1} \Xm^\top t$.We easily formulate least-sqaures and solve very efficiently. Linear Programming$$\begin{equation*}\begin{aligned}& \underset{\xv}{\text{minimize}}& & \wv^\top \xv \\& \text{subject to}& & \av_i^\top \xv \le b_i, \; i = 1, \ldots, m.\end{aligned}\end{equation*}$$![](https://upload.wikimedia.org/wikipedia/commons/0/0c/Linear_Programming_Feasible_Region.svg)Linear programming or linear optimization finds a maximum or minimum from a mathematical model that is represented by linear relationships. There is no analytical formular for a solution, but there are reliable algorithms that solve LP efficiently. Convex Optimization$$\begin{equation*}\begin{aligned}& \underset{x}{\text{minimize}}& & f_0(x) \\& \text{subject to}& & f_i(x) \leq b_i, \; i = 1, \ldots, m.\end{aligned}\end{equation*}$$![](http://www.convexoptimization.com/images/stories/max.jpg)Convex condition: $$f_i(\alpha x_1 + (1-\alpha) x_2) \le \alpha f_i(x_1) + (1-\alpha) f_i(x_2)$$Convex optimization generalizes the linear programming problems. A convex optimization problem has the constraint set that forms convex functions. As a general model of LP, convex optimization problems do not have analytical solution but they also have reliable and efficient algorithms for it. Thus, it can be solved very quickly and reliably up to very large problems.However, it is difficulty to recognize if it is convex or not. Nonlinear OptimizationFor non-convex problems, we can apply local optimization methods, which is called nonlinear programming. Starting from initial guess, it searchs for a minimal point near neighborhood. It can be fast and can be applicable large problems. However, there is no guarantee for discovery of global optimum. Newton's methodNewton's method approximates the curve with quadratic function repeatedly to find a temporary point or stationary point of $f$. If we assume that for each measurement point $x^{(k)}$, we can compute $f(x^{(k)})$, $f^{\prime}(x^{(k)})$, and $f^{\prime\prime}(x^{(k)})$.Using second order Taylor expansion, we can approximate $q(x)$ for $f(x + \Delta x)$:$$q(x) = f(x^{(k)}) + f^{\prime}(x^{(k)}) \Delta x + \frac{1}{2} f^{\prime\prime}(x^{(k)}) \Delta x^2$$where $\Delta x = (x - x^{(k)})$. Minimizing this quadratic function, $$0 = q^\prime(x) = f^{\prime}(x^{(k)}) + f^{\prime\prime}(x^{(k)}) \Delta x. $$Setting $x = x^{(k+1)}$, we can get$$ x^{(k+1)} = x^{(k)} - \frac{f^{\prime}(x^{(k)})}{f^{\prime\prime}(x^{(k)})}.$$import numpy as np import matplotlib.pyplot as plt %matplotlib inline import scipy.optimize as opt from scipy.optimize import rosen, minimize # examples are from http://people.duke.edu/~ccc14/sta-663-2017/14C_Optimization_In_Python.html x = np.linspace(-5, 5, 1000) y = np.linspace(-5, 5, 1000) xs, ys = np.meshgrid(x, y) zs = rosen(np.vstack([xs.ravel(), ys.ravel()])).reshape(xs.shape) plt.figure(figsize=(8,8)) plt.contour(xs, ys, zs, np.arange(10)**5, cmap='jet') plt.text(1, 1, 'x', va='center', ha='center', color='k', fontsize=30); from scipy.optimize import rosen_der, rosen_hess def reporter(p): """record the points visited""" global ps ps.append(p) # starting position x0 = np.array([4,-4.1]) ps = [x0] minimize(rosen, x0, method="Newton-CG", jac=rosen_der, hess=rosen_hess, callback=reporter) ps = np.array(ps) plt.figure(figsize=(16, 8)) plt.subplot(121) plt.contour(xs, ys, zs, np.arange(10)**5, cmap='jet') plt.plot(ps[:, 0], ps[:, 1], '-ro') plt.subplot(122) plt.semilogy(range(len(ps)), rosen(ps.T));Vs. others?Now, let us take a look at other optimization tools including naive steepest descent and scaled conjugate gradient ([Moller, 1997](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.50.8063&rep=rep1&type=pdf)). To run this properly, you need to download [grad.py](https://webpages.uncc.edu/mlee173/teach/itcs4156online/notes/grad.py) under your current work folder.from grad import steepest res = steepest(np.array(x0), rosen_der, rosen, stepsize=0.0001, wtracep=True, ftracep=True) ps = np.array(res['wtrace']) plt.figure(figsize=(16, 8)) plt.subplot(121) plt.contour(xs, ys, zs, np.arange(10)**5, cmap='jet') plt.plot(ps[:, 0], ps[:, 1], '-ro') plt.subplot(122) plt.semilogy(range(len(ps)), res['ftrace']); from grad import scg res = scg(np.array(x0), rosen_der, rosen, wtracep=True, ftracep=True) res1 = scg(np.array([-4, 4]), rosen_der, rosen, wtracep=True, ftracep=True) ps = np.array(res['wtrace']) ps1 = np.array(res1['wtrace']) plt.figure(figsize=(16, 8)) plt.subplot(121) plt.contour(xs, ys, zs, np.arange(10)**5, cmap='jet') plt.plot(ps[:, 0], ps[:, 1], '-ro') plt.plot(ps1[:, 0], ps1[:, 1], '-bo') plt.subplot(122) plt.semilogy(range(len(ps)), res['ftrace']); x0 = [-4, 4] ps = [x0] minimize(rosen, x0, method="Newton-CG", jac=rosen_der, hess=rosen_hess, callback=reporter) ps = np.array(ps) plt.figure(figsize=(16, 8)) plt.subplot(121) plt.contour(xs, ys, zs, np.arange(10)**5, cmap='jet') plt.plot(ps[:, 0], ps[:, 1], '-ro') plt.subplot(122) plt.semilogy(range(len(ps)), rosen(ps.T));Neural Network! Now, let us use this optimization trick for our neural networks.# standardization class class Standardizer: """ class version of standardization """ def __init__(self, X, explore=False): self._mu = np.mean(X,0) self._sigma = np.std(X,0) if explore: print ("mean: ", self._mu) print ("sigma: ", self._sigma) print ("min: ", np.min(X,0)) print ("max: ", np.max(X,0)) def set_sigma(self, s): self._sigma[:] = s def standardize(self,X): return (X - self._mu) / self._sigma def unstandardize(self,X): return (X * self._sigma) + self._mu """ Neural Network referenced NN code by in R and C++ by (lemin) example usage: X = numpy.array([0,0,1,0,0,1,1,1]).reshape(4,2) T = numpy.array([0,1,1,0,1,0,0,1]).reshape(4,2) nn = nnet.NeuralNet([2,3,2]) nn.train(X,T, wprecision=1e-20, fprecision=1e-2) Y = nn.use(X) """ from grad import scg, steepest from copy import copy class NeuralNet: """ neural network class for regression Parameters ---------- nunits: list the number of inputs, hidden units, and outputs Methods ------- set_hunit update/initiate weights pack pack multiple weights of each layer into one vector forward forward processing of neural network backward back-propagation of neural network train train the neural network use appply the trained network for prediction Attributes ---------- _nLayers the number of hidden unit layers rho learning rate _W weights _weights weights in one dimension (_W is referencing _weight) stdX standardization class for data stdT standardization class for target Notes ----- """ # TODO: Try to implement Neural Network class with the member variables and methods described above X = np.array([0,0,1,0,0,1,1,1]).reshape(4,2) T = np.array([0,1,1,0,1,0,0,1]).reshape(4,2) nn = NeuralNet([2,3,2]) nn.train(X, T) Y = nn.use(X) Y T X # repeating the previous example # Make some training data n = 20 X = np.linspace(0.,20.0,n).reshape((n,1)) - 10 T = 0.2 + 0.05 * (X+10) + 0.4 * np.sin(X+10) + 0.2 * np.random.normal(size=(n,1)) # Make some testing data Xtest = X + 0.1*np.random.normal(size=(n,1)) Ttest = 0.2 + 0.05 * (Xtest+10) + 0.4 * np.sin(Xtest+10) + 0.2 * np.random.normal(size=(n,1)) nSamples = X.shape[0] nOutputs = T.shape[1] nn = NeuralNet([1,3,1]) nn.train(X, T, ftracep=True) Ytest, Z = nn.use(Xtest, retZ=True) plt.figure(figsize=(10,8)) plt.subplot(3,1,1) plt.plot(nn.ftrace) plt.ylim(0,0.7) plt.xlabel('Epochs') plt.ylabel('RMSE') plt.legend(('Train','Test'),loc='upper left') plt.subplot(3,1,2) plt.plot(X,T,'o-',Xtest,Ttest,'o-',Xtest,Ytest,'o-') plt.xlim(-10,10) plt.legend(('Training','Testing','Model'),loc='upper left') plt.xlabel('$x$') plt.ylabel('Actual and Predicted $f(x)$') plt.subplot(3,1,3) plt.plot(X, Z[1]) plt.ylim(-1.1,1.1) plt.xlabel('$x$') plt.ylabel('Hidden Unit Outputs ($z$)');Plotting CDMS data in Python Overview~~~~~~~~Data read via the CDMS Python interface can be plotted using the ``vcs``module. This module, part of the Climate DataAnalysis Tool (CDAT) is documented in the CDAT reference manual.The ``vcs`` module provides access to the functionality of the VCSvisualization program.Examples of plotting data accessed from CDMS are given below, as well asdocumentation for the plot routine keywords.Examples~~~~~~~~In the following examples, it is assumed that variable ``psl`` isdimensioned (time, latitude, longitude). ``psl`` is contained in thedataset named ``'sample.xml'``. Plotting a Gridded VariableExample: plotting a gridded variableimport cdms2, vcs f = cdms2.open("clt.nc") clt = f.variables['clt'] sample = clt[0,:] w=vcs.init() w.plot(sample) f.close()**Notes:** "3","Get a horizontal slice, for the first time point." "4","Create a VCS Canvas ``w``." "5", "Plot the data. Because sample is a transient variable, it encapsulates all the time, latitude, longitude, and attribute information." "7", "Close the file. This must be done after the reference to the persistent variable ``ps l``."Thats it! The axis coordinates, variable name, description, units, etc.are obtained from variable sample.What if the units are not explicitly defined for ``clt``, or a differentdescription is desired? ``plot`` has a number of other keywords whichfill in the extra plot information. Using A Plot Keywordsimport cdms2, vcs f = cdms2.open("clt.nc") clt = f.variables['clt'] sample = clt[0,:] w=vcs.init() w.plot(sample, units='percent', file_comment='', long_name="Total Cloud", comment1="Example plot", hms="00:00:00", ymd="1979/01/01") f.close()**Note:** Keyword arguments can be listed in any order. Plotting a Time-Latitude SliceAssuming that variable ``clt`` has domain ``(time,latitude,longitude)``,this example selects and plots a time-latitude slice:import cdms2, vcs f = cdms2.open("clt.nc") clt = f.variables['clt'] samp = clt[:,:,0] w = vcs.init() w.plot(samp, name='Total Cloudiness')"4", "``samp`` is a slice of ``clt``, at index ``0`` of the last dimension. Since ``samp`` was obtained from the slice operator, it is a transient variable, which includes the latitude and time information." "6", "The ``name`` keyword defines the identifier, default is the name found in the file." Plotting Subsetted DataCalling the variable ``clt`` as a function reads a subset of thevariable. The result variable ``samp`` can be plotted directly:import cdms2, vcs f = cdms2.open("clt.nc") clt = f.variables['clt'] samp = clt(time = (0.0,100.0), longitude = 180.0, squeeze=1) w = vcs.init() w.plot(samp) f.close()SIMEX notebook for CRL focussing.In this example, we add a focussing element to our beamline, a *Compound Refractive Lense* stack, or CRL. A CRL is a group of lenses stacked along the beam direction. Since the refractive index of most materials in the x-ray regime is close to $n=1$, multiple lenses have to be stacked to achieve a relatively short focal length $f$.A schematic drawing of a single lense and a CRL is given in the figure below (credits: http://www.x-ray-optics.de/index.php/en/types-of-optics/refractive-lenses, accessed on Oct. 13 2021): ![image.png](attachment:a89c42f6-ceca-485a-9a74-2e28d1f56520.png)The CRL lense can be made from various materials, here we us Beryllium. We will need the refractive index of Beryllium at our chosen photon energy. These material x-ray properties can be looked up at the website https://henke.lbl.gov/optical_constants/getdb2.html or programmatically using the python `xraydb` library.%load_ext autoreload %autoreload 2 %matplotlib inline # Import all SimEx modules from SimEx.Calculators.WavePropagator import WavePropagator from SimEx.Calculators.GaussianPhotonSource import GaussianPhotonSource from SimEx.Parameters.WavePropagatorParameters import WavePropagatorParameters from SimEx.Parameters.GaussWavefrontParameters import GaussWavefrontParameters from SimEx.Analysis.XFELPhotonAnalysis import XFELPhotonAnalysis from SimEx.Utilities.Units import electronvolt, meter, joule, radian import numpy import copy import xraydb from IPython.display import Markdown as mdSetup the initial wavefrontWe first create a wavefront at $z=100$ m downstream from the source. The intensity distribution in $x$, $y$, and $t$ is assumed to be Gaussian. To this end, we use the `GaussianPhotonSource` Calculator and it's corresponding parameter class, the `GaussWavefrontParameters`. We first describe the wavefront parameters which will then be used to create the wavefront itself:wavefront_parameters = GaussWavefrontParameters(photon_energy=8.0e3*electronvolt, photon_energy_relative_bandwidth=1e-3, beam_diameter_fwhm=1.0e-4*meter, pulse_energy=2.0e-6*joule, number_of_transverse_grid_points=400, number_of_time_slices=30, z = 100*meter )Now, we use the just created `wavefront_parameters` to initialize the Photon Source.photon_source = GaussianPhotonSource(wavefront_parameters, input_path="/dev/null", output_path="initial_wavefront.h5")Let's calculate the initial wavefront and visualize it:photon_source.backengine()Save wavefront data for later re-usephoton_source.saveH5()Creata analysis object and generate some plotsanalysis = XFELPhotonAnalysis('initial_wavefront.h5')Start initialization. Loading wavefront from initial_wavefront.h5. ... done. Getting intensities. ... done. Data dimensions = (400, 400, 30) Masking NANs. ... done.Intensity mapanalysis.plotIntensityMap()Plotting intensity map. R-spaceOn-axis power density as function of timeanalysis.plotOnAxisPowerDensity()Plotting on-axis power density.Plot intensity distribution in q-spaceanalysis.plotIntensityMap(qspace=True)Plotting intensity map. Switching to reciprocal space. ... done. Q-spacePlot the power as a function of time integrated over the transverse dimensionsanalysis.plotTotalPower()Plotting total power. Pulse energy 2e-06 JPlot the power spectrumanalysis.plotTotalPower(spectrum=True) import wpg from wpg import wpg_uti_wf as wpg_utils from wpg import wpg_uti_oe as optics_utils wavefront = copy.deepcopy(photon_source.data)Check the sampling qualityprint(wpg_utils.check_sampling(wavefront))WAVEFRONT SAMPLING REPORT +----------+---------+---------+---------+---------+---------+---------+---------+ |x/y |FWHM |px |ROI |R |Fzone |px*7 |px*10 | +----------+---------+---------+---------+---------+---------+---------+---------+ |Horizontal|1.147e-04|6.386e-06|2.548e-03|1.772e+02|1.196e-04|4.470e-05|6.386e-05| |Vertical |1.147e-04|6.386e-06|2.548e-03|1.772e+02|1.196e-04|4.470e-05|6.386e-05| +----------+---------+---------+---------+---------+---------+---------+---------+ Horizontal Fresnel zone extension NOT within [7,10]*pixel_width -> Check pixel width." Vertical Fresnel zone extension NOT within [7,10]*pixel_height -> Check pixel width." Horizontal ROI > 3* FWHM(x) -> OK Horizontal ROI > 3* FWHM(y) -> OK Focus sampling: FWHM > 10*px END OF REPORTSetup the beamlineOur experiment consists of* The photon source at the origin* 100 m of free space (already taken into account in the wavefront construction).* The CRL* Another distance of free space* The detector (screen)The goal of this exercise is to minimize the size of the beam by varying the final screen position along the beam direction.from wpg import Beamline, optical_elements, srwlib from wpg.useful_code.wfrutils import propagate_wavefront beamline = Beamline()Setting up the CRLWe employ the `CRL` class in `wpg.optical_elements`.optical_elements.CRL?We set up a circular (`shape='c'`) obstacle (`ap_or_ob = 'ob'`) of radius $100$ nm in both and place it at the beam center$x_\text{center} = y_\text{center} = 0$ (`x=0`, `y=0`). Get the components of the refractive index for Be (solid Be density is 1.85 g/ccm)delta, beta, att = xraydb.xray_delta_beta("Be", density=1.85, energy=photon_source.parameters.photon_energy.m_as(electronvolt)) delta, beta, att crl = optical_elements.CRL( _foc_plane=3, _delta=delta, _atten_len=att*1e-2, _shape=1, _apert_h=5.0e-3, _apert_v=5.0e-3, _r_min=5.8e-3, _n=20, _wall_thick=8.0e-5, _xc=0.0, _yc=0.0, _void_cen_rad=None, _e_start=0, _e_fin=0, _nx=1001, _ny=1001, ) optics_utils.show_transmission(crl) crl_pp = optical_elements.Use_PP(semi_analytical_treatment=1, zoom=1, sampling=1 ) beamline.append(crl, crl_pp) srwlib.srwl.SetRepresElecField(wavefront._srwl_wf, 'f') propagate_wavefront(wavefront,beamline) srwlib.srwl.SetRepresElecField(wavefront._srwl_wf, 't') wpg_utils.plot_intensity_map(wavefront) wpg_utils.plot_intensity_qmap(wavefront) print(wpg_utils.check_sampling(wavefront))WAVEFRONT SAMPLING REPORT +----------+---------+---------+---------+---------+---------+---------+---------+ |x/y |FWHM |px |ROI |R |Fzone |px*7 |px*10 | +----------+---------+---------+---------+---------+---------+---------+---------+ |Horizontal|1.147e-04|6.386e-06|2.548e-03|1.772e+02|1.196e-04|4.470e-05|6.386e-05| |Vertical |1.147e-04|6.386e-06|2.548e-03|1.772e+02|1.196e-04|4.470e-05|6.386e-05| +----------+---------+---------+---------+---------+---------+---------+---------+ Horizontal Fresnel zone extension NOT within [7,10]*pixel_width -> Check pixel width." Vertical Fresnel zone extension NOT within [7,10]*pixel_height -> Check pixel width." Horizontal ROI > 3* FWHM(x) -> OK Horizontal ROI > 3* FWHM(y) -> OK Focus sampling: FWHM > 10*px END OF REPORTEstimate focus positionUse the classical lense formula$$\frac{1}{z_O} + \frac{1}{z_I} = \frac{1}{f}$$where $z_O$ and $z_I$ are the distances from the object to the lense and distance from lense to image, respectively. **Solution**:With $z_O = 100\,\text{m}$ and $f = 27.2 m$, rearranging the lens formula yieldsz_O = 100 f = 27.2 z_I = 1./(1/f - 1/z_O) z_I md(f"The image position is at {numpy.round(z_I, 1)} m behind the lense")Propagate for another 37.4 mcrl_to_screen = optical_elements.Drift(_L=37.4, _treat=1) crl_to_screen_pp = optical_elements.Use_PP(semi_analytical_treatment=1) beamline.append(crl_to_screen, crl_to_screen_pp) # Reset the wavefront. wavefront = copy.deepcopy(photon_source.data) srwlib.srwl.SetRepresElecField(wavefront._srwl_wf, 'f') propagate_wavefront(wavefront, beamline) srwlib.srwl.SetRepresElecField(wavefront._srwl_wf, 't') wpg_utils.plot_intensity_map(wavefront)R-space (400,) (400,) FWHM in x = 3.822e-05 m. FWHM in y = 3.822e-05 m.**Tasks**1. Optimize the focus: Measure the focus FWHM at 10 positions around the chosen screen distance of 37.4 m. 1. Plot the focus FWHM against distance from CRL.1. Revisit the analytical expression for the beam waist of a Gaussian beam as a function of distance from focus. 1. Fit the beam waist expression to your data and compare the fitted focus FWHM to the analytical solution and the numerical data.!git add crl_focus.ipynb !git commit -a -m "Update CRL focus notebook:\nFinalize propagation to screen,\n Estimate focus from lens formua,\n Add tasks"[simple_diffraction 340e2ba] Update CRL focus notebook:\nFinalize propagation to screen,\n Estimate focus from lens formua,\n Add tasks 2 files changed, 1094 insertions(+), 1 deletion(-) create mode 100644 examples_diffraction/crl_focus.ipynbHow to use implemented algorithms================== Overview----------The project has the following structure:- *doc* contains documentation of the project - *src* contains all the source code - *algorithms* contains different algorithms - *test* contains modules for testing with unittest- *test_data* contains test data- *tools* different tools (scripts) used for building and testing- *Notebooks* contains ipython notebooks which demonstrate usage Importing *Graph* module---------------------------Firstly, we will import the *Graph* module. We are in the folder *Notebooks* so, we need to append the path.import sys, os # sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath('../src/')))) sys.path.append('../src/') import graph reload(graph)We will check if the library is imported correctly by computing a few parameters of empty graph.emptyG = graph.Graph() diam = emptyG.diameter() gcc = emptyG.global_clustering_coefficient() print("The diameter of empty graph: %d" % diam) print("The global cluster coeff of empty graph is: %f" % gcc)The diameter of empty graph: 0 The global cluster coeff of empty graph is: 0.000000Reading graph from a file-----------------------------Now we will show how to read graph from a file and compute requested parameters.path = '../test_data/' txt = ".txt" filenames = ['zachary_connected', 'graph_1000n_4000m', 'graph_100n_1000m'] graphs = [] # store all three graph objects in a list for i, g_name in enumerate(filenames): g = graph.Graph({}) g.read_from_file(filename=path+g_name+txt) graphs.append(g) results = [] params = ["vertices", "edges", "density", "diameter", "clustering coef"] print("%d, %d" % (graphs[0].number_of_vertices(), graphs[0].number_of_edges())) print("%d, %d" % (graphs[1].number_of_vertices(), graphs[1].number_of_edges())) print("%d, %d" % (graphs[2].number_of_vertices(), graphs[2].number_of_edges()))33, 78 1000, 3989 100, 960Computing requested parameters-----------------------------------We assume that the provided graphs are simple and thus multi edges in the files are ignored.Since one of the graphs has 1000 vertices and 3989 edges, computation of diameter will take some time.# compute parameters for i, G in enumerate(graphs): temp_results = [G.number_of_vertices(), G.number_of_edges(), G.density(), G.diameter(), G.global_clustering_coefficient() ] results.append(temp_results)Now we will present the results in a table. Notice that package *ipy_table* is required.from ipy_table import * dictList = [] data_str = "dataset" # convert the dictionary to a list for i in range(len(results)+1): if i == 0: dictList.append([data_str] + [p for p in params]) else: dictList.append([filenames[i-1]] + results[i-1]) # create table with make_table make_table(dictList) set_column_style(0, width='100', bold=True, color='hsla(225, 80%, 94%, 1)') set_column_style(1, width='100') # render the table render()Data Parallelism이번 세션에는 데이터 병렬화 기법에 대해 알아보겠습니다. 1. `torch.nn.DataParallel`가장 먼저 우리에게 친숙한 `torch.nn.DataParallel`의 동작 방식에 대해 알아봅시다. `torch.nn.DataParallel`은 single-node & multi-GPU에서 동작하는 multi-thread 모듈입니다. 1) Forward Pass1. 입력된 mini-batch를 **Scatter**하여 각 디바이스로 전송.2. GPU-1에 올라와 있는 모델의 파라미터를 GPU-2,3,4로 **Broadcast**.3. 각 디바이스로 복제된 모델로 **Forward**하여 Logits을 계산 함.4. 계산된 Logits을 **Gather**하여 GPU-1에 모음.5. Logits으로부터 **Loss**를 계산함. (with loss reduction)![](../images/dp_forward.png)코드로 나타내면 아래와 같습니다.import torch.nn as nn def data_parallel(module, inputs, labels, device_ids, output_device): inputs = nn.parallel.scatter(inputs, device_ids) # 입력 데이터를 device_ids들에 Scatter함 replicas = nn.parallel.replicate(module, device_ids) # 모델을 device_ids들에 복제함. logit = nn.parallel.parallel_apply(replicas, inputs) # 각 device에 복제된 모델이 각 device의 데이터를 Forward함. logits = nn.parallel.gather(outputs, output_device) # 모델의 logit을 output_device(하나의 device)로 모음 return logits2) Backward Pass1. 계산된 Loss를 각 디바이스에 **Scatter**함.2. 전달받은 Loss를 이용해서 각 디바이스에서 **Backward**를 수행하여 Gradients 계산.3. 계산된 모든 Gradient를 GPU-1로 **Reduce**하여 GPU-1에 전부 더함.4. 더해진 Gradients를 이용하여 GPU-1에 있는 모델을 업데이트.![](../images/dp_backward.png) 혹시나 모르시는 분들을 위해...- `loss.backward()`: 기울기를 미분해서 Gradient를 계산- `optimizer.step()`: 계산된 Gradient를 이용해서 파라미터를 업데이트- Computation cost는 `backward()` > `step()`.![](../images/backward_step.png)""" src/data_parallel.py """ from torch import nn from torch.optim import Adam from torch.utils.data import DataLoader from transformers import BertForSequenceClassification, BertTokenizer from datasets import load_dataset # 1. create dataset datasets = load_dataset("multi_nli").data["train"] datasets = [ { "premise": str(p), "hypothesis": str(h), "labels": l.as_py(), } for p, h, l in zip(datasets[2], datasets[5], datasets[9]) ] data_loader = DataLoader(datasets, batch_size=128, num_workers=2) # 2. create model and tokenizer model_name = "bert-base-cased" tokenizer = BertTokenizer.from_pretrained(model_name) model = BertForSequenceClassification.from_pretrained(model_name, num_labels=3).cuda() # 3. make data parallel module # device_ids: 사용할 디바이스 리스트 / output_device: 출력값을 모을 디바이스 model = nn.DataParallel(model, device_ids=[0, 1], output_device=0) # 4. create optimizer and loss fn optimizer = Adam(model.parameters(), lr=3e-5) loss_fn = nn.CrossEntropyLoss(reduction="mean") # 5. start training for i, data in enumerate(data_loader): optimizer.zero_grad() tokens = tokenizer( data["premise"], data["hypothesis"], padding=True, truncation=True, max_length=512, return_tensors="pt", ) logits = model( input_ids=tokens.input_ids.cuda(), attention_mask=tokens.attention_mask.cuda(), return_dict=False, )[0] loss = loss_fn(logits, data["labels"].cuda()) loss.backward() optimizer.step() if i % 10 == 0: print(f"step:{i}, loss:{loss}") if i == 100: break !python ../src/data_parallel.pyUsing custom data configuration default Reusing dataset multi_nli (/root/.cache/huggingface/datasets/multi_nli/default/0.0.0/591f72eb6263d1ab527561777936b199b714cda156d35716881158a2bd144f39) 100%|████████████████████████████████████████████| 3/3 [00:00<00:00, 383.29it/s] Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.seq_relationship.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.weight', 'cls.seq_relationship.bias'] - This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing BertForSequenceClassifi[...]![](../images/dp_training.png)Multi-GPU에서 학습이 잘 되는군요. 그런데 문제는 0번 GPU에 Logits이 쏠리다보니 GPU 메모리 불균형 문제가 일어납니다. 이러한 문제는 0번 device로 Logits이 아닌 Loss를 Gather하는 방식으로 변경하면 어느정도 완화시킬 수 있습니다. Logits에 비해 Loss는 Scalar이기 때문에 크기가 훨씬 작기 때문이죠. 이 작업은 [당근마켓 블로그](https://medium.com/daangn/pytorch-multi-gpu-%ED%95%99%EC%8A%B5-%EC%A0%9C%EB%8C%80%EB%A1%9C-%ED%95%98%EA%B8%B0-27270617936b)에 소개되었던 [PyTorch-Encoding](https://github.com/zhanghang1989/PyTorch-Encoding)의 `DataParallelCriterion`과 동일합니다. 블로그에 꽤나 복잡하게 설명되어 있는데, 복잡한 방법 대신 간단하게 **forward 함수를 오버라이드 하는 것** 만으로 동일 기능을 쉽게 구현 할 수 있습니다.![](../images/dp_forward_2.png)핵심은 Loss Computation과 Loss가 reduction을 multi-thread 안에서 작동 시키는 것입니다. 모델의 forward 함수는 multi-thread에서 작동되고 있기 때문에 Loss Computation 부분을 forward 함수 안에 넣으면 매우 쉽게 구현할 수 있겠죠.한가지 특이한 점은 이렇게 구현하면 Loss의 reduction이 2번 일어나게 되는데요. multi-thread에서 batch_size//4개에서 4개로 reduction 되는 과정(그림에서 4번)이 한번 일어나고, 각 디바이스에서 출력된 4개의 Loss를 1개로 Reduction 하는 과정(그림에서 5번)이 다시 일어나게 됩니다. 그렇다고 하더라도 Loss computation 부분을 병렬화 시킬 수 있고, 0번 GPU에 가해지는 메모리 부담이 적기 때문에 훨씬 효율적이죠.""" src/custom_data_parallel.py """ from torch import nn # logits을 출력하는 일반적인 모델 class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(768, 3) def forward(self, inputs): outputs = self.linear(inputs) return outputs # forward pass에서 loss를 출력하는 parallel 모델 class ParallelLossModel(Model): def __init__(self): super().__init__() def forward(self, inputs, labels): logits = super(ParallelLossModel, self).forward(inputs) loss = nn.CrossEntropyLoss(reduction="mean")(logits, labels) return loss운이 좋게도 우리가 자주 사용하는 Huggingface Transformers 모델들은 forward pass에서 곧 바로 Loss를 구하는 기능을 내장하고 있습니다. 따라서 이러한 과정 없이 transformers의 기능을 이용하여 진행하겠습니다. 아래의 코드는 Transformers 모델의 `labels`인자에 라벨을 입력하여 Loss를 바로 출력합니다.""" src/efficient_data_parallel.py """ # 1 ~ 4까지 생략... # 5. start training for i, data in enumerate(data_loader): optimizer.zero_grad() tokens = tokenizer( data["premise"], data["hypothesis"], padding=True, truncation=True, max_length=512, return_tensors="pt", ) loss = model( input_ids=tokens.input_ids.cuda(), attention_mask=tokens.attention_mask.cuda(), labels=data["labels"], ).loss loss = loss.mean() # (4,) -> (1,) loss.backward() optimizer.step() if i % 10 == 0: print(f"step:{i}, loss:{loss}") if i == 100: break !python ../src/efficient_data_parallel.pyUsing custom data configuration default Reusing dataset multi_nli (/root/.cache/huggingface/datasets/multi_nli/default/0.0.0/591f72eb6263d1ab527561777936b199b714cda156d35716881158a2bd144f39) 100%|████████████████████████████████████████████| 3/3 [00:00<00:00, 604.34it/s] Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForSequenceClassification: ['cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight'] - This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing BertForSequenceClassifi[...]2. `torch.nn.DataParallel`의 문제점 1) 멀티쓰레드 모듈이기 때문에 Python에서 비효율적임.Python은 GIL (Global Interpreter Lock)에 의해 하나의 프로세스에서 동시에 여러개의 쓰레드가 작동 할 수 없습니다. 따라서 근본적으로 멀티 쓰레드가 아닌 **멀티 프로세스 프로그램**으로 만들어서 여러개의 프로세스를 동시에 실행하게 해야합니다. 2) 하나의 모델에서 업데이트 된 모델이 다른 device로 매 스텝마다 복제되어야 함.현재의 방식은 각 디바이스에서 계산된 Gradient를 하나의 디바이스로 모아서(Gather) 업데이트 하는 방식이기 때문에 업데이트된 모델을 매번 다른 디바이스들로 복제(Broadcast)해야 하는데, 이 과정이 꽤나 비쌉니다. 그러나 Gradient를 Gather하지 않고 각 디바이스에서 자체적으로 `step()`을 수행한다면 모델을 매번 복제하지 않아도 되겠죠. 어떻게 이 것을 구현 할 수 있을까요? Solution? ➝ All-reduce!! 👍![](../images/allreduce.png)정답은 앞서 배웠던 All-reduce 연산입니다. 각 디바이스에서 계산된 Gradients를 모두 더해서 모든 디바이스에 균일하게 뿌려준다면 각 디바이스에서 자체적으로 `step()`을 수행 할 수 있습니다. 그러면 매번 모델을 특정 디바이스로부터 복제해 올 필요가 없겠죠. 따라서 All-reduce를 활용하는 방식으로 기존 방식을 개선해야 합니다. 그러나... 🤔그러나 All-reduce는 매우 비용이 높은 연산에 속합니다. 왜 그럴까요? All-reduce의 세부 구현을 살펴봅시다. Reduce + Broadcast 구현 방식![](../images/allreduce_1.png) All to All 구현 방식![](../images/allreduce_2.png) 3. `torch.nn.parallel.DistributedDataParallel` (이하 DDP) Ring All-reduce 💍Ring All-reduce는 2017년에 바이두의 연구진이 개발한 새로운 연산입니다. 기존의 방식들에 비해 월등히 효율적인 성능을 보여줬기 때문에 DDP 개발의 핵심이 되었죠.- https://github.com/baidu-research/baidu-allreduce![](../images/ring_allreduce.gif)![](../images/ring_allreduce.png) DDP란?DDP는 기존 DataParallel의 문제를 개선하기 위해 등장한 데이터 병렬처리 모듈이며 single/multi-node & multi-GPU에서 동작하는 multi-process 모듈입니다. All-reduce를 활용하게 되면서 마스터 프로세스의 개념이 없어졌기 때문에 학습 과정이 매우 심플하게 변합니다.![](../images/ddp.png)""" src/ddp.py """ import torch import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel from torch.optim import Adam from torch.utils.data import DataLoader, DistributedSampler from transformers import BertForSequenceClassification, BertTokenizer from datasets import load_dataset # 1. initialize process group dist.init_process_group("nccl") rank = dist.get_rank() world_size = dist.get_world_size() torch.cuda.set_device(rank) device = torch.cuda.current_device() # 2. create dataset datasets = load_dataset("multi_nli").data["train"] datasets = [ { "premise": str(p), "hypothesis": str(h), "labels": l.as_py(), } for p, h, l in zip(datasets[2], datasets[5], datasets[9]) ] # 3. create DistributedSampler # DistributedSampler는 데이터를 쪼개서 다른 프로세스로 전송하기 위한 모듈입니다. sampler = DistributedSampler( datasets, num_replicas=world_size, rank=rank, shuffle=True, ) data_loader = DataLoader( datasets, batch_size=32, num_workers=2, sampler=sampler, shuffle=False, pin_memory=True, ) # 4. create model and tokenizer model_name = "bert-base-cased" tokenizer = BertTokenizer.from_pretrained(model_name) model = BertForSequenceClassification.from_pretrained(model_name, num_labels=3).cuda() # 5. make distributed data parallel module model = DistributedDataParallel(model, device_ids=[device], output_device=device) # 5. create optimizer optimizer = Adam(model.parameters(), lr=3e-5) # 6. start training for i, data in enumerate(data_loader): optimizer.zero_grad() tokens = tokenizer( data["premise"], data["hypothesis"], padding=True, truncation=True, max_length=512, return_tensors="pt", ) loss = model( input_ids=tokens.input_ids.cuda(), attention_mask=tokens.attention_mask.cuda(), labels=data["labels"], ).loss loss.backward() optimizer.step() if i % 10 == 0 and rank == 0: print(f"step:{i}, loss:{loss}") if i == 100: break멀티프로세스 애플리케이션이기 때문에 `torchrun`를 사용합니다.!torchrun --nproc_per_node=2 ../src/ddp.pyWARNING:torch.distributed.run: ***************************************** Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. ***************************************** Using custom data configuration default Reusing dataset multi_nli (/root/.cache/huggingface/datasets/multi_nli/default/0.0.0/591f72eb6263d1ab527561777936b199b714cda156d35716881158a2bd144f39) 100%|████████████████████████████████████████████| 3/3 [00:00<00:00, 463.00it/s] Using custom data configuration default Reusing dataset multi_nli (/root/.cache/huggingface/datasets/multi_nli/default/0.0.0/591f72eb6263d1ab527561777936b199b714cda156d35716881158a2bd144f39) 100%|████████████████████████████████████████████| 3/3 [00:00<00:00, 428.06it/s] Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForSequenceClassification: ['cls.predic[...]import numpy as np import tensorflow as tf import matplotlib as plt from tensorflow import keras from tensorflow.keras.layers import Conv2D, concatenate, Dropout,MaxPool2D, MaxPooling2D, Conv2DTranspose, Activation, BatchNormalization,UpSampling2D, Add a = np.random.randn(1,16,16,18) b = tf.keras.layers.UpSampling2D(size=(8, 8), interpolation="nearest")(a) b.shape c =np.random.randn(1,128,128,18) d = tf.keras.layers.concatenate([b,c]) d.shape def conv_2d_block(x,n_filters,k_size,batchnorm=False): ''' add two Conv layers with relu activation ''' #first layer x = Conv2D(filters=n_filters,kernel_size=(k_size,k_size) , padding='same', kernel_initializer = 'he_normal')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) # 2nd layer x = Conv2D(filters=n_filters,kernel_size=(k_size,k_size) , padding='same', kernel_initializer = 'he_normal')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters,kernel_size=(k_size,k_size) , padding='same', kernel_initializer = 'he_normal')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) return x def u_net(input,n_filters=16,conv_k_size=3,pool_size=2,batchnorm=True,dropout=.2): c1 = conv_2d_block(input,n_filters * 1 , conv_k_size,batchnorm) p1 = MaxPool2D(pool_size=(pool_size,pool_size))(c1) p1 = Dropout(dropout)(p1) c2 = conv_2d_block(p1,n_filters * 2 , conv_k_size,batchnorm) p2 = MaxPool2D(pool_size=(pool_size,pool_size))(c2) p2 = Dropout(dropout)(p2) c3 = conv_2d_block(p2,n_filters * 4 , conv_k_size,batchnorm) p3 = MaxPool2D(pool_size=(pool_size,pool_size))(c3) p3 = Dropout(dropout)(p3) c4 = conv_2d_block(p3,n_filters * 8 , conv_k_size,batchnorm) p4 = MaxPool2D(pool_size=(pool_size,pool_size))(c4) p4 = Dropout(dropout)(p4) c5 = conv_2d_block(p4,n_filters * 16 , conv_k_size,batchnorm) #Up sampling u6 = Conv2DTranspose(filters=n_filters * 8 ,kernel_size=(3,3), strides=(2,2),padding='same')(c5) u6 = concatenate([u6,c4]) u6 = Dropout(dropout)(u6) c7 = conv_2d_block(u6,n_filters * 8 , conv_k_size,batchnorm) u8 = Conv2DTranspose(filters=n_filters * 4 ,kernel_size=(3,3), strides=(2,2),padding='same')(c7) u8 = concatenate([u8,c3]) u8 = Dropout(dropout)(u8) c9 = conv_2d_block(u8,n_filters * 4 , conv_k_size,batchnorm) u10 = Conv2DTranspose(filters=n_filters * 2,kernel_size=(3,3) , strides=(2,2),padding='same')(c9) u10 = concatenate([u10,c2]) u10 = Dropout(dropout)(u10) c11 = conv_2d_block(u10,n_filters * 2 , conv_k_size,batchnorm) u12 = Conv2DTranspose(filters=n_filters * 1 ,kernel_size=(3,3), strides=(2,2),padding='same')(c11) u12 = concatenate([u12,c1]) u12 = Dropout(dropout)(u12) c13 = conv_2d_block(u12,n_filters * 1 , conv_k_size,batchnorm) output = Conv2D(filters=3 , kernel_size=(1,1),activation='softmax')(c13) # output layer # model = Model(inputs=input,outputs=output,name='classifier') return output u_net(d).shapeQuality Evaluation on ImageNet1k This notebooks allows to run the quality evaluation of any classsification model from [**CoreML Model Zoo**](https://github.com/vladimir-chernykh/coreml-model-zoo) on **ImageNet1k** data ([ILSVRC](http://www.image-net.org/challenges/LSVRC/)).The **validation set** is taken because it is almost never included into the training set of the specific model and has never changed since 2012. Importsimport os import glob import importlib import xml.etree.ElementTree as ET from tqdm.notebook import tqdm import numpy as np from joblib import Parallel, delayed from PIL import Image import matplotlib.pyplot as plt from torch.utils.data import Dataset, DataLoader import coremltoolsWARNING:root:TensorFlow version 2.2.0 detected. Last version known to be fully compatible is 1.14.0 . WARNING:root:Keras version 2.3.1 detected. Last version known to be fully compatible of Keras is 2.2.4 .Define a function to dynamically import object from moduledef dynamic_import(abs_module_path, class_name): module_object = importlib.import_module(abs_module_path) target_class = getattr(module_object, class_name) return target_classModel Specify model name and versionmodel_name = "mobilenet_v2" model_spec = "mobilenet_v2_0.35_96_keras_applications"Load CoreML modelmodel = coremltools.models.MLModel(f"../../vision/classification/{model_name}/models/{model_spec}.mlmodel")Data Read ImageNet classes infowith open("./imagenet1k_classes.txt") as f: classes_coreml = f.readlines() classes_coreml = np.array([c.rstrip('\n') for c in classes_coreml]) classes_coreml_dict = {c:i for i, c in enumerate(classes_coreml)}ImageNet dataset readerclass DatasetImagenet(Dataset): def __init__(self, root, split, classes_mapping, transform=None): self.transform = transform self.root = root with open(classes_mapping) as fp: _temp = [line.rstrip('\n').split(" ", maxsplit=1) for line in fp] self.classnames_mapping = dict(_temp) self.classids_mapping = {wnid[0]: idd for idd, wnid in enumerate(_temp)} annotation_files = sorted(glob.glob(os.path.join(root, f"Annotations/CLS-LOC/{split}/*.xml"))) self.image_files = np.array([file.replace("/Annotations/", "/Data/").rstrip(".xml") + ".JPEG" for file in annotation_files]) self.wnids = Parallel(n_jobs=8)(delayed(self._annotation_wnids_reader)(file) for file in annotation_files) self.wnids = np.array([item[0] for item in self.wnids]) self.classnames = np.array([self.classnames_mapping[wnid] for wnid in self.wnids]) self.classids = np.array([self.classids_mapping[wnid] for wnid in self.wnids]) @staticmethod def _annotation_wnids_reader(annotation_file): xmltree = ET.parse(annotation_file) objects = xmltree.findall("object") result = [] for object_iter in objects: bndbox = object_iter.find("name") result.append(bndbox.text) return result def __len__(self): return len(self.classids) def __getitem__(self, idx): image = Image.open(self.image_files[idx]).convert("RGB") if self.transform is not None: image = self.transform(image) label = self.classids[idx] return image, labelAll the preprocessing steps are embedded into the network. All one has to do outside is resizing.Different models are trained in different frameworks and settings each of which requires a little bit different resizing options to fully repeat the quality.Load resizing transformation suitable exactly for the model of interest:%cd ../.. transform_resize_dict = dynamic_import( f"vision.classification.{model_name}.evaluation_utils.resize_utils", "transform_resize_dict") %cd tools/evaluation transform_coreml = transform_resize_dict[model_spec]/Users/vovacher/Dropbox/coreml-zoo/coreml-model-zoo /Users/vovacher/Dropbox/coreml-zoo/coreml-model-zoo/tools/evaluationCreate `Dataset` object with ImageNet dataset. It might take around 10 seconds because all the `xml`s with annotations are parsed.dataset = DatasetImagenet(root="/Users/vovacher/Downloads/ILSVRC", split="val", classes_mapping="/Users/vovacher/Downloads/ILSVRC/LOC_synset_mapping.txt", transform=transform_coreml)Data sample exampledataset[0]More examples visualizedplt.figure(figsize=(20, 8)) for i in range(10): plt.subplot(2, 5, i + 1) plt.imshow(dataset[i][0]) plt.axis("off") plt.title(classes_coreml[dataset[i][1]])Evaluation Define `DataLoader` which is able to sample from `Dataset` efficiently.def custom_collate_fn(batch): return {"input_1": batch[0][0], "target": batch[0][1]} dataloader = DataLoader(dataset=dataset, batch_size=1, num_workers=4, collate_fn=custom_collate_fn)Evaluate the model on **ILSVRC 2012 validation** datay_true = [] y_preds_proba = [] for data in tqdm(dataloader): y_true.append(data["target"]) _pred = model.predict(data=data, useCPUOnly=True) y_preds_proba.append(_pred) y_true = np.array(y_true) y_preds_proba = np.array(y_preds_proba)Transform predictions from class names into class numbers to compare with ground truthy_preds_proba_ids = [] for i in tqdm(range(len(y_preds_proba))): _outs = y_preds_proba[i]["output_1"] y_preds_proba_ids.append(np.zeros(len(_outs))) for k, v in _outs.items(): y_preds_proba_ids[-1][classes_coreml_dict[k]] = v y_preds_proba_ids = np.array(y_preds_proba_ids)Results Top-1 Accuracytop1_acc = (y_true == np.argmax(y_preds_proba_ids, axis=1)).mean() top1_accTop-5 Accuracy_top5 = np.argsort(y_preds_proba_ids, axis=1)[..., -5:] top5_acc = np.mean([y_true[i] in _top5[i] for i in range(len(_top5))]) top5_acc64-D image manifold: images%matplotlib inline import sys import numpy as np import matplotlib from matplotlib import pyplot as plt from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox import torch sys.path.append("../../") from experiments.datasets import FFHQStyleGAN64DLoader from experiments.architectures.image_transforms import create_image_transform, create_image_encoder from experiments.architectures.vector_transforms import create_vector_transform from manifold_flow.flows import ManifoldFlow, EncoderManifoldFlow import plot_settings as ps ps.setup()Helper function to go from torch to numpy conventionsdef trf(x): return np.clip(np.transpose(x, [1,2,0]) / 256., 0., 1.)Load modelsdef load_model( filename, outerlayers=20, innerlayers=8, levels=4, splinebins=11, splinerange=10.0, dropout=0.0, actnorm=True, batchnorm=False, linlayers=2, linchannelfactor=2, lineartransform="lu" ): steps_per_level = outerlayers // levels spline_params = { "apply_unconditional_transform": False, "min_bin_height": 0.001, "min_bin_width": 0.001, "min_derivative": 0.001, "num_bins": splinebins, "tail_bound": splinerange, } outer_transform = create_image_transform( 3, 64, 64, levels=levels, hidden_channels=100, steps_per_level=steps_per_level, num_res_blocks=2, alpha=0.05, num_bits=8, preprocessing="glow", dropout_prob=dropout, multi_scale=True, spline_params=spline_params, postprocessing="partial_mlp", postprocessing_layers=linlayers, postprocessing_channel_factor=linchannelfactor, use_actnorm=actnorm, use_batchnorm=batchnorm, ) inner_transform = create_vector_transform( 64, innerlayers, linear_transform_type=lineartransform, base_transform_type="rq-coupling", context_features=1, dropout_probability=dropout, tail_bound=splinerange, num_bins=splinebins, use_batch_norm=batchnorm, ) model = ManifoldFlow( data_dim=(3, 64, 64), latent_dim=64, outer_transform=outer_transform, inner_transform=inner_transform, apply_context_to_outer=False, pie_epsilon=0.1, clip_pie=None ) model.load_state_dict( torch.load("../data/models/{}.pt".format(filename), map_location=torch.device("cpu")) ) _ = model.eval() return model def load_emf_model( filename, outerlayers=20, innerlayers=8, levels=4, splinebins=11, splinerange=10.0, dropout=0.0, actnorm=True, batchnorm=False, linlayers=2, linchannelfactor=2, lineartransform="lu" ): steps_per_level = outerlayers // levels spline_params = { "apply_unconditional_transform": False, "min_bin_height": 0.001, "min_bin_width": 0.001, "min_derivative": 0.001, "num_bins": splinebins, "tail_bound": splinerange, } encoder = create_image_encoder( 3, 64, 64, latent_dim=64, context_features=None, ) outer_transform = create_image_transform( 3, 64, 64, levels=levels, hidden_channels=100, steps_per_level=steps_per_level, num_res_blocks=2, alpha=0.05, num_bits=8, preprocessing="glow", dropout_prob=dropout, multi_scale=True, spline_params=spline_params, postprocessing="partial_mlp", postprocessing_layers=linlayers, postprocessing_channel_factor=linchannelfactor, use_actnorm=actnorm, use_batchnorm=batchnorm, ) inner_transform = create_vector_transform( 64, innerlayers, linear_transform_type=lineartransform, base_transform_type="rq-coupling", context_features=1, dropout_probability=dropout, tail_bound=splinerange, num_bins=splinebins, use_batch_norm=batchnorm, ) model = EncoderManifoldFlow( data_dim=(3, 64, 64), latent_dim=2, encoder=encoder, outer_transform=outer_transform, inner_transform=inner_transform, apply_context_to_outer=False, pie_epsilon=0.1, clip_pie=None ) model.load_state_dict( torch.load("../data/models/{}.pt".format(filename), map_location=torch.device("cpu")) ) _ = model.eval() return model mf = load_model("mf_64_gan64d_april") emf = load_emf_model("emf_64_gan64d_april") pie = load_model("pie_64_gan64d_april")Sample comparisonn = 8 x_test = 0.5 + 255. * np.load("../data/samples/gan64d/x_test_prior.npy")[:n] x_gen_af = np.load("../data/results/flow_2_gan64d_april_samples.npy")[:n] x_gen_pie = np.load("../data/results/pie_64_gan64d_april_samples.npy")[:n] x_gen_pie_sample_v = pie.sample(u=None, context=torch.zeros((n,1)).to(torch.float), n=n, sample_orthogonal=True).detach().numpy() x_gen_mf = np.load("../data/results/mf_64_gan64d_april_samples.npy")[:n] x_gen_emf = np.load("../data/results/emf_64_gan64d_april_samples.npy")[:n] nrows = 6 ncols = 8 # xs = [x_test, x_gen_af, x_gen_pie, x_gen_mf, x_gen_emf] # labels = ["Original", "AF", r"PIE", r"$\mathcal{M}$-flow", r"$\mathcal{M}_e$-flow"] xs = [x_test, x_gen_af, x_gen_pie, x_gen_pie_sample_v, x_gen_mf, x_gen_emf] labels = ["Original", "AF", r"PIE (manifold)", r"PIE (off-manifold)", r"$\mathcal{M}$-flow", r"$\mathcal{M}_e$-flow"] fig, gs = ps.grid_width(ncols, nrows, width=ps.TEXTWIDTH, large_margin=0.04, small_margin=0.01, sep=0.005, t_space=False, b_space=False, r_space=False, l_space=True) for i in range(ncols): for j, (x, label) in enumerate(zip(xs, labels)): ax = plt.subplot(gs[j*ncols + i]) plt.imshow(trf(x[i])) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False) if i == 0: plt.ylabel(label) plt.savefig("../figures/gan64d_samples.pdf") nrows = 5 ncols = 4 xs = [x_test, x_gen_af, x_gen_pie, x_gen_mf, x_gen_emf] labels = ["Original", "AF", r"PIE", r"$\mathcal{M}$-flow", r"$\mathcal{M}_e$-flow"] fig, gs = ps.grid_width(ncols, nrows, width=0.33 * ps.TEXTWIDTH, large_margin=0.06, small_margin=0.01, sep=0.005, t_space=False, b_space=False, r_space=False, l_space=True) for i in range(ncols): for j, (x, label) in enumerate(zip(xs, labels)): ax = plt.subplot(gs[j*ncols + i]) plt.imshow(trf(x[i])) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False) if i == 0: plt.ylabel(label) plt.savefig("../figures/gan64d_samples_small.pdf")Test samples and projections to learned manifoldstest_idx=list(range(8)) n_test = len(test_idx) x_test = 0.5 + 255. * np.load("../data/samples/gan64d/x_test.npy")[test_idx] x_reco_mf = np.load("../data/results/mf_64_gan64d_april_model_x_reco_test.npy")[test_idx] x_reco_emf = np.load("../data/results/emf_64_gan64d_april_model_x_reco_test.npy")[test_idx] x_reco_pie = np.load("../data/results/pie_64_gan64d_april_model_x_reco_test.npy")[test_idx] nrows = 5 ncols = 4 enhance = 1 labels = ["Original", "PIE", "Residual", "$\mathcal{M}$-flow", r"Residual"] fig, gs = ps.grid_width(ncols, nrows, width=0.33 * ps.TEXTWIDTH, large_margin=0.06, small_margin=0.01, sep=0.005, t_space=False, b_space=False, r_space=False, l_space=True) for i in range(ncols): xs = [ trf(x_test[i]), trf(x_reco_pie[i]), 1. - enhance*np.abs(trf(x_reco_pie[i]) - trf(x_test[i])), trf(x_reco_mf[i]), 1. - enhance*np.abs(trf(x_reco_mf[i]) - trf(x_test[i])), ] for j, (x, label) in enumerate(zip(xs, labels)): ax = plt.subplot(gs[j * ncols + i]) plt.imshow(x) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False) if i == 0: plt.ylabel(label) plt.savefig("../figures/gan64d_projections_small.pdf")Training samplesloader = FFHQStyleGAN64DLoader() data = loader.load_dataset(train=False, dataset_dir="../data/samples/gan64d") fig = plt.figure(figsize=(5*3., 4*3.)) for i in range(20): x, _ = data[np.random.randint(len(data) - 1)] x_ = np.transpose(np.array(x), [1,2,0]) / 256. ax = plt.subplot(4, 5, i+1) plt.imshow(x_) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.tight_layout() plt.show()DatasetThe dataset contains information about 227 countries. This dataset has lots of interesting information on each country, such as the country's birth rates, death rates, and its gross domestic product (GDP). GDP is the value of all the goods and services produced in a year, expressed as dollars per person.import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns %matplotlib inline plt.style.use('ggplot') countries= pd.read_csv('../data/countries-of-the-world.csv') countries.head() sns.scatterplot(x=countries['GDP ($ per capita)'], y=countries['Phones (per 1000)']) plt.show()this plot does not show a linear relationship between GDP and percent literate, countries with a lower GDP do seem more likely to have a lower percent of the population that can read and write.sns.scatterplot(x=countries['GDP ($ per capita)'], y=countries['Literacy (%)']) plt.show()how many countries are in each region of the world?sns.countplot(y=countries['Region']) plt.show()Supplementary Script 1: Basic phylogenetic analysisimport os from collections import defaultdict import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats %matplotlib inline colors = sns.color_palette() # path to patient overview file of original cohort (Supplementary Table 1) overview_fp = 'Supplementary_Table_1.xlsx' # path to phylogenetic clustering file of original cohort (Supplementary Table 2) clustering_fp = 'Supplementary_Table_2.xlsx' col_mono_dm = 'Monophyletic distant mets' col_mono_lm = 'Monophyletic lymphatic mets' col_metmono_dm = 'Monophyletic distant mets ALT_CLF' col_metmono_lm = 'Monophyletic lymphatic mets ALT_CLF' col_no_dm = 'No distant mets' col_no_lm = 'No lymphatic mets' try: all_overview_df = pd.read_excel(overview_fp) # analyze Naxerova cohort (Kim cohort is analyzed separately for Fig. 2) overview_df = all_overview_df[all_overview_df.Dataset == 'Naxerova'] except FileNotFoundError: print('ERROR: File {} needs to be in the same folder!'.format(overview_fp)) raise try: clustering_df = pd.read_excel(clustering_fp) except FileNotFoundError: print('ERROR: File {} needs to be in the same folder!'.format(clustering_fp)) raise # create dataframe for bar plot depicting the observed frequency of monophyletic clades d = defaultdict(list) for index, row in overview_df.iterrows(): if not np.isnan(row[col_mono_dm]): met_type = 'Distant' d['Patient'].append(row.Patient) d['MonophyleticClade'].append(True if row[col_mono_dm] == 1 else False) d['MetMonophyleticClade'].append(True if row[col_metmono_dm] == 1 else False) d['Type'].append(met_type) d['NoSamples'].append(clustering_df[(clustering_df.Patient == row.Patient) & (clustering_df['Metastasis type'] == met_type)].m.values[0]) if not np.isnan(row[col_mono_lm]): met_type = 'Lymphatic' d['Patient'].append(row.Patient) d['MonophyleticClade'].append(True if row[col_mono_lm] == 1 else False) d['MetMonophyleticClade'].append(True if row[col_metmono_lm] == 1 else False) d['Type'].append(met_type) d['NoSamples'].append(clustering_df[(clustering_df.Patient == row.Patient) & (clustering_df['Metastasis type'] == met_type)].m.values[0]) phyletic_df = pd.DataFrame(data=d) print('Mean and median number of lymphatic mets for subjects with at least 2 lymphatic mets: {}, {}'.format( np.mean(phyletic_df[(phyletic_df['Type'] == 'Lymphatic')]['NoSamples']), np.median(phyletic_df[(phyletic_df['Type'] == 'Lymphatic')]['NoSamples']))) print('Mean and median number of distant mets for subjects with at least 2 distant mets: {}, {}'.format( np.mean(phyletic_df[(phyletic_df['Type'] == 'Distant')]['NoSamples']), np.median(phyletic_df[(phyletic_df['Type'] == 'Distant')]['NoSamples']))) def compare_monophyletic_clades(df, col_lm, col_dm, label): """ Investigate the fraction of patients with a monophyletic clade of all lymphatic or distant metastases :param df: dataframe with patients and whether there were monophyletic clades :param col_lm: name of boolean dataframe column for monophyletic lymphatic mets :param col_dm: name of boolean dataframe column for monophyletic distant mets :param label: what is being investigated """ # dm_mono_frac = len(df[df[col_dm] == 1]) / df[col_dm].count() lm_mono_frac = len(df[df[col_lm] == 1]) / df[col_lm].count() print('{} of distant metastases: {:.3%} ({}/{})'.format( label, dm_mono_frac, len(df[df[col_dm] == 1]), df[col_dm].count())) print('{} of lymphatic metastases: {:.3%} ({}/{})'.format( label, lm_mono_frac, len(df[df[col_lm] == 1]), df[col_lm].count())) oddsratio, pvalue = stats.fisher_exact( [[len(df[df[col_dm] == 1]), len(df[df[col_dm] == 0])], [len(df[df[col_lm] == 1]), len(df[df[col_lm] == 0])]]) print('Probability to observe a more imbalanced ratio by chance is {:.4e}.'.format(pvalue)) def plot_monophyletic_clade(df, y_col, ylabel, filepath=None): """ Create bar plot with the fraction of monophyletic clades per metastasis type :param df: dataframe with patients and whether there were monophyletic clades :param y_col: name of boolean column with monophyletic clade information :param ylabel: y-axis label :param filepath: if not None plot is stored at the given path """ plot_height = 3 # plot height ci = 90 # confidence interval sns.set(font_scale = 1.17) with sns.axes_style('white', {'axes.grid': False, 'grid.linestyle': u':', 'xtick.labelsize': 13, 'ytick.labelsize': 10, 'ytick.major.size': 4.0}): fg = sns.catplot(x='Type', y=y_col, data=phyletic_df, ci=ci, height=plot_height, aspect=0.9, palette=colors, kind='bar', order=['Lymphatic', 'Distant']) fg.set(ylim=[0, 1], ylabel=ylabel) fg.set(xlabel=''); fg.fig.get_axes()[0].yaxis.set_tick_params(which='major', left='on') if filepath is not None: plt.savefig(filepath, dpi=150, bbox_inches='tight', transparent=True) # Compare the monophyletic clades of lymphatic and distant mets (Fig. 1b) compare_monophyletic_clades(overview_df, col_mono_lm, col_mono_dm, label='Monophyletic clade') plot_monophyletic_clade(phyletic_df, y_col='MonophyleticClade', ylabel='Monophyletic clade', filepath='pnl_monophyletic_clade.pdf') # Compare the monophyletic clades of lymphatic and distant mets when no primary tumor samples # can be in a monophyletic clade (Supplementary Information) compare_monophyletic_clades(overview_df, col_metmono_lm, col_metmono_dm, label='Monophyletic met clade') plot_monophyletic_clade(phyletic_df, y_col='MetMonophyleticClade', ylabel='Monophyletic met clade', filepath='pnl_monophyletic_met_clade.pdf') # Create boxplot to compare the number of samples of lymphatic and distant metastases (Fig. 1e) # diamond marker for mean per group meanpointprops = dict(marker='d', markeredgecolor='black', markeredgewidth=0.2, markerfacecolor='magenta', markersize=9) with sns.axes_style('white', {'axes.grid': False, 'grid.linestyle': u':', 'xtick.labelsize': 13, 'ytick.labelsize': 10, 'ytick.major.size': 4.0}): f, ax = plt.subplots(figsize=(2.0, 2.5)) sns.boxplot(x='Type', y='NoSamples', data=phyletic_df, palette=colors, whis=True, showmeans=True, meanprops=meanpointprops, orient="v", width=0.7, showfliers=False, order=['Lymphatic', 'Distant'], ax=ax) sns.swarmplot(x='Type', y='NoSamples', data=phyletic_df, color=".3", size=6, order=['Lymphatic', 'Distant'], ax=ax) ax.yaxis.set_tick_params(which='major', left='on') ax.set(xlabel=''); ax.set(ylim=[0, 10], ylabel='No of sampled metastases') sns.despine() plt.savefig('pnl_met_samples.pdf', dpi=150, bbox_inches='tight', transparent=True) mwus = stats.mannwhitneyu(phyletic_df[phyletic_df.Type == 'Lymphatic'].NoSamples, phyletic_df[phyletic_df.Type == 'Distant'].NoSamples, alternative='two-sided') print('Number of lymphatic (mean: {:.3f}, #{}) vs distant (mean {:.3f}, #{}) metastases samples: Mann-Whitney U statistic {:.3f}, p-value {:.4e}'.format( np.mean(phyletic_df[phyletic_df.Type == 'Lymphatic'].NoSamples), len(phyletic_df[phyletic_df.Type == 'Lymphatic'].NoSamples), np.mean(phyletic_df[phyletic_df.Type == 'Distant'].NoSamples), len(phyletic_df[phyletic_df.Type == 'Distant'].NoSamples), mwus[0], mwus[1]))Number of lymphatic (mean: 3.700, #10) vs distant (mean 3.000, #6) metastases samples: Mann-Whitney U statistic 35.000, p-value 6.0720e-01Expected Portfolio Return:np.sum(weights * log_returns.mean()) * 250Expected Portfolio Variance:np.dot(weights.T, np.dot(log_returns.cov() * 250, weights))Expected Portfolio Volatility:np.sqrt(np.dot(weights.T,np.dot(log_returns.cov() * 250, weights)))***pfolio_returns = [] pfolio_volatilities = [] for x in range (1000): weights = np.random.random(num_assets) weights /= np.sum(weights) pfolio_returns.append(np.sum(weights * log_returns.mean()) * 250) pfolio_volatilities.append(np.sqrt(np.dot(weights.T,np.dot(log_returns.cov() * 250, weights)))) pfolio_returns, pfolio_volatilities pfolio_returns = [] pfolio_volatilities = [] for x in range (1000): weights = np.random.random(num_assets) weights /= np.sum(weights) pfolio_returns.append(np.sum(weights * log_returns.mean()) * 250) pfolio_volatilities.append(np.sqrt(np.dot(weights.T,np.dot(log_returns.cov() * 250, weights)))) pfolio_returns = np.array(pfolio_returns) pfolio_volatilities = np.array(pfolio_volatilities) pfolio_returns, pfolio_volatilities4.1 线性回归 4.1.1 标准方程$$\hat{\theta} = (X^T X)^{-1} X^T y$$在机器学习中,向量通常表示为列向量,是有单一列的二维数组。如果$\Theta$和x为列向量,则预测为$\hat{y}=\Theta^T x$,其中$\Theta^T$为$\Theta$(行向量而不是列向量)的转置,且$\Theta^T x$为$\Theta^T$和$\Theta$的矩阵乘积。%matplotlib inline import numpy as np import matplotlib.pyplot as plt X = 2 * np.random.rand(100, 1) y = 4 + 3 * X + np.random.randn(100, 1) plt.figure(figsize=(10, 6)) plt.scatter(X, y) plt.show() X_b = np.c_[np.ones((100, 1)), X] #在每个实例上add x0=1 theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y) theta_best X_new = np.array([[0], [2]]) X_new_b = np.c_[np.ones((2, 1)), X_new] y_predict = X_new_b.dot(theta_best) y_predict plt.figure(figsize=(10, 6)) plt.plot(X_new, y_predict, "r--") plt.plot(X, y, "b.") plt.axis([0, 2, 0, 15]) plt.show() from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(X, y) lin_reg.intercept_, lin_reg.coef_`LinearRegression`类基于`scipy.linalg.lstsq()`函数(最小二乘法)theta_best_svd, residuals, rank, s = np.linalg.lstsq(X_b, y, rcond=1e-6) theta_best_svd此函数计算:$$\hat{\theta} = X^+ y$$其中$X^+$是$X$的伪逆,具体说是Moore-Penrose逆,可以使用`np.linalg.pinv()`直接计算这个伪逆np.linalg.pinv(X_b).dot(y)Explorer un corpus Importsfrom collections import defaultdict import os import matplotlib.pyplot as plt import numpy as npCompter le nombre de documents dans le corpuspath = "../data/txt/" files = sorted(os.listdir(path)) # stocker dans 'files' la liste des fichiers contenus dans le dossier 'txt' len(files) print(files[:10])['Bxl_1847_Tome_I1_Part_1.txt', 'Bxl_1847_Tome_I1_Part_2.txt', 'Bxl_1847_Tome_I1_Part_3.txt', 'Bxl_1847_Tome_I1_Part_4.txt', 'Bxl_1847_Tome_I1_Part_5.txt', 'Bxl_1848_Tome_I1_Part_1.txt', 'Bxl_1848_Tome_I1_Part_2.txt', 'Bxl_1848_Tome_I1_Part_3.txt', 'Bxl_1849_Tome_I1_Part_1.txt', 'Bxl_1849_Tome_I1_Part_2.txt']Explorer les noms de fichier Nous allons manipuler ici les chaines de caractères.Il s'agit de la classe `str` en Python.Pour en savoir plus : https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/231888-creez-votre-premier-objet-les-chaines-de-caractereschaine = 'Bxl_1849_Tome_II1_Part_5.txt' type(chaine) # la méthode split chaine_split = chaine.split('_') chaine_split # Accéder à l'année year = chaine_split[1] year # Manipuler les str pour convertir une année en décennie year[:3] year[-1] year[:3] + '0s'Quelques statistiquesall_years = [str(year) for year in range(1847, 1979)] print(all_years) print(f"\nIl y a a priori {len(all_years)} années à traiter dans notre corpus") count_decade = defaultdict(int) # initialiser le dico des décennies count_years = defaultdict(int) count_cities = defaultdict(int) # initialiser le dico des villes count_tomes = defaultdict(int) # initialiser le dico pour les tomes covered_years = set() # initialiser un set de données pour les années couvertes (liste univoque) for f in files: # pour tous les fichiers contenus dans 'files' if "_" in f and f.endswith("txt"): # si le nom du fichier contient '_' et que le fichier termine par l'extension 'txt' alors elems = f.split("_") # splitter les noms et les stocker dans le tableau 'elems' city = elems[0] # le 1er élément de chaque tableau contient la ville à stocker dans le dico approprié year = elems[1] # le 2e élément contient l'année à stocker tome = elems[3] # le 4e élément contient le numéro du tome covered_years.add(year) # ajouter au set les années couvertes par le corpus decade = year[:3] + "0s" # transformer les années en décades count_decade[decade] += 1 # compter le nombre de décennies count_cities[city] += 1 # le nombre de villes count_tomes[tome] += 1 # et le nom des tomes count_years[year] += 1 else: print(f"Anomalous file: {f}") # sinon afficher le nom du fichier corrompus print(f"Il y a {count_cities['Bxl']} bulletins de Bruxelles and {count_cities['Lkn']} ") nb_rap = count_tomes['RptAn'] # stocker les rapports dans une variable print(f"{len(files) - nb_rap} documents sont des bulletins et {nb_rap} documents sont des rapports annuels") # effectuer la soustraction pour distinguer les bulletins des rapports print(len(covered_years)) print(count_years) print(f"\nIl manque par conséquent, {len(all_years)-len(covered_years)} années dans notre corpus") missing_years = [y for y in all_years if y not in covered_years] print(f"Années manquantes: {', '.join(missing_years)}")Années manquantes: 1853, 1875, 1916, 1917, 1940, 1941, 1942, 1943, 1944Visualisation du nombre de bulletins par décenniesCes visualisations sont obtenus avec la librairie Matplotlib.Pour en savoir plus : https://openclassrooms.com/fr/courses/4452741-decouvrez-les-librairies-python-pour-la-data-science/4740942-maitrisez-les-possibilites-offertes-par-matplotlib.index = np.arange(len(count_decade)) plt.bar(index, count_decade.values()) plt.xlabel('Décennie') plt.ylabel('# bulletins') plt.xticks(index, count_decade.keys(), fontsize=8, rotation=30) plt.title('Évolution du nombre de bulletins') plt.show()Migrating from Spark to BigQuery via Dataproc -- Part 5* [Part 1](01_spark.ipynb): The original Spark code, now running on Dataproc (lift-and-shift).* [Part 2](02_gcs.ipynb): Replace HDFS by Google Cloud Storage. This enables job-specific-clusters. (cloud-native)* [Part 3](03_automate.ipynb): Automate everything, so that we can run in a job-specific cluster. (cloud-optimized)* [Part 4](04_bigquery.ipynb): Load CSV into BigQuery, use BigQuery. (modernize)* [Part 5](05_functions.ipynb): Using Cloud Functions, launch analysis every time there is a new file in the bucket. (serverless) Catch-up cell%%bash wget http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz gunzip kddcup.data_10_percent.gz BUCKET='cloud-training-demos-ml' # CHANGE gsutil cp kdd* gs://$BUCKET/ bq mk sparktobqCreate reporting function%%writefile main.py from google.cloud import bigquery import google.cloud.storage as gcs import tempfile import os def create_report(BUCKET, gcsfilename, tmpdir): """ Creates report in gs://BUCKET/ based on contents in gcsfilename (gs://bucket/some/dir/filename) """ # connect to BigQuery client = bigquery.Client() destination_table = client.get_table('sparktobq.kdd_cup') # Specify table schema. Autodetect is not a good idea for production code job_config = bigquery.LoadJobConfig() schema = [ bigquery.SchemaField("duration", "INT64"), ] for name in ['protocol_type', 'service', 'flag']: schema.append(bigquery.SchemaField(name, "STRING")) for name in 'src_bytes,dst_bytes,wrong_fragment,urgent,hot,num_failed_logins'.split(','): schema.append(bigquery.SchemaField(name, "INT64")) schema.append(bigquery.SchemaField("unused_10", "STRING")) schema.append(bigquery.SchemaField("num_compromised", "INT64")) schema.append(bigquery.SchemaField("unused_12", "STRING")) for name in 'su_attempted,num_root,num_file_creations'.split(','): schema.append(bigquery.SchemaField(name, "INT64")) for fieldno in range(16, 41): schema.append(bigquery.SchemaField("unused_{}".format(fieldno), "STRING")) schema.append(bigquery.SchemaField("label", "STRING")) job_config.schema = schema # Load CSV data into BigQuery, replacing any rows that were there before job_config.create_disposition = bigquery.CreateDisposition.CREATE_IF_NEEDED job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE job_config.skip_leading_rows = 0 job_config.source_format = bigquery.SourceFormat.CSV load_job = client.load_table_from_uri(gcsfilename, destination_table, job_config=job_config) print("Starting LOAD job {} for {}".format(load_job.job_id, gcsfilename)) load_job.result() # Waits for table load to complete. print("Finished LOAD job {}".format(load_job.job_id)) # connections by protocol sql = """ SELECT COUNT(*) AS count FROM sparktobq.kdd_cup GROUP BY protocol_type ORDER by count ASC """ connections_by_protocol = client.query(sql).to_dataframe() connections_by_protocol.to_csv(os.path.join(tmpdir,"connections_by_protocol.csv")) print("Finished analyzing connections") # attacks plot sql = """ SELECT protocol_type, CASE label WHEN 'normal.' THEN 'no attack' ELSE 'attack' END AS state, COUNT(*) as total_freq, ROUND(AVG(src_bytes), 2) as mean_src_bytes, ROUND(AVG(dst_bytes), 2) as mean_dst_bytes, ROUND(AVG(duration), 2) as mean_duration, SUM(num_failed_logins) as total_failed_logins, SUM(num_compromised) as total_compromised, SUM(num_file_creations) as total_file_creations, SUM(su_attempted) as total_root_attempts, SUM(num_root) as total_root_acceses FROM sparktobq.kdd_cup GROUP BY protocol_type, state ORDER BY 3 DESC """ attack_stats = client.query(sql).to_dataframe() ax = attack_stats.plot.bar(x='protocol_type', subplots=True, figsize=(10,25)) ax[0].get_figure().savefig(os.path.join(tmpdir,'report.png')); print("Finished analyzing attacks") bucket = gcs.Client().get_bucket(BUCKET) for blob in bucket.list_blobs(prefix='sparktobq/'): blob.delete() for fname in ['report.png', 'connections_by_protocol.csv']: bucket.blob('sparktobq/{}'.format(fname)).upload_from_filename(os.path.join(tmpdir,fname)) print("Uploaded report based on {} to {}".format(gcsfilename, BUCKET)) def bigquery_analysis_cf(data, context): # check that trigger is for a file of interest bucket = data['bucket'] name = data['name'] if ('kddcup' in name) and not ('gz' in name): filename = 'gs://{}/{}'.format(bucket, data['name']) print(bucket, filename) with tempfile.TemporaryDirectory() as tmpdir: create_report(bucket, filename, tmpdir) %%writefile requirements.txt google-cloud-bigquery google-cloud-storage pandas matplotlib # verify that the code in the CF works name='kddcup.data_10_percent' if 'kddcup' in name and not ('gz' in name): print(True)Test that the function endpoint works# test that the function works import main as bq BUCKET='cloud-training-demos-ml' # CHANGE try: bq.create_report(BUCKET, 'gs://{}/kddcup.data_10_percent'.format(BUCKET), "/tmp") except Exception as e: print(e.errors)Deploy the cloud function!gcloud functions deploy bigquery_analysis_cf --runtime python37 --trigger-resource $BUCKET --trigger-event google.storage.object.finalizeDeploying function (may take a while - up to 2 minutes)...⠼Try it outCopy the file to the bucket:!gsutil rm -rf gs://$BUCKET/sparktobq !gsutil cp kddcup.data_10_percent gs://$BUCKET/Verify that the Cloud Function is being run. You can do this from the [Cloud Functions](https://console.cloud.google.com/functions/) part of the GCP Console.Once the function is complete (in about 30 seconds), see if the output folder contains the report:!gsutil ls gs://$BUCKET/sparktobq**Fuzzy C-means Clustering**from google.colab import files uploaded = files.upload() !pip install scikit-fuzzy import pandas as pd import numpy as np import matplotlib.pyplot as plt import skfuzzy import math #Specify colors for different classes colors=['b', 'grey', 'g', 'r', 'c', 'm', 'y', 'k', 'lime', 'purple'] #import Dataset and change permeability to log-scale dataset = pd.read_csv('Fuzzy_Clustering_Porosity_Permeability.csv') ds_log=pd.DataFrame.copy(dataset) ds_log['Permeability']=ds_log['Permeability'].apply(math.log10) # Scale the data from 0 to 1 from sklearn.preprocessing import MinMaxScaler scaler=MinMaxScaler() scaler.fit(ds_log) ds_log_scaled=scaler.transform(ds_log) #Transpose Scaled data for Fuzzy Cluster Algorithm ds_log_scaled=ds_log_scaled.T #Plot permeability vs porosity plt.figure() plt.plot(dataset['Porosity'],dataset['Permeability'],'ro') plt.xlabel('Porosity(%)') plt.ylabel('Permeability(md)') #Plot permeability vs porosity with scaled data plt.figure() plt.plot(ds_log_scaled[0,:],ds_log_scaled[1,:],'ro') plt.xlabel('Porosity') plt.ylabel('Permeability') # Defining loops for Fuzzy C-means clustering and visualization with 8 #plots import numpy as np seed=50 np.random.seed(seed) fig1, axes1=plt.subplots(2, 4, figsize=(12, 8)) fig1.suptitle('Fuzzy c-means clustering for Log scaled data') fpcs=[ ] n=2 for ax in axes1.reshape(-1): cntr, u, u0, d, jm, p, fpc=skfuzzy.cluster.cmeans(ds_log_scaled, n, 1.5, error=0.001, maxiter=500,init=None) # Plotting defined classes, for each data point in the data set cluster_membership=np.argmax(u, axis=0) for i in range(n): ax.plot(ds_log_scaled[0,:][cluster_membership==i], ds_log_scaled[1,:][cluster_membership==i], '.', color=colors[i]) # Mark the centroid for each class for x in cntr: ax.plot(x[0], x[1],'r*') ax.set_title('Centers={0}; FPC={1:.2f}'.format(n, fpc)) # Fuzzy partition coefficient storing fpcs.append(fpc) n = n + 1 #Plot fuzzy partition coefficient vs number of classes plt.plot(np.arange(2,10), fpcs) plt.xlabel("Number of Classes") plt.ylabel("Fuzzy Partition Coefficient(FCP)")Predicting Wine Quality by using Watson Machine Learning This notebook contains steps and code to create a predictive model to predict Wine Quality and then deploy that model to Watson Machine Learning so it can be used in an application. Learning GoalsThe learning goals of this notebook are:* Load a CSV file into the Object Storage Service linked to my Data Science Experience * Create an Pandas machine learning model* Train and evaluate a model* Persist a model in a Watson Machine Learning repository 1. SetupBefore you use the sample code in this notebook, you must perform the following setup tasks:* Create a Watson Machine Learning Service instance (a free plan is offered) and associate it with your project* Upload wine quality data to the Object Store service that is part of your data Science Experience trial 2. Load and explore dataIn this section load the data as Pandas DataFrame and perform a basic exploration.Load the data to the Pandas DataFrame from the associated Object Storage instance.import types import pandas as pd from ibm_botocore.client import Config import ibm_boto3 def __iter__(self): return 0 # @hidden_cell # The following code accesses a file in your IBM Cloud Object Storage. It includes your credentials. # You might want to remove those credentials before you share your notebook. client_8c14de1d42f947c2812d9d92c44fc593 = ibm_boto3.client(service_name='s3', ibm_api_key_id='', ibm_auth_endpoint="https://iam.bluemix.net/oidc/token", config=Config(signature_version='oauth'), endpoint_url='https://s3-api.us-geo.objectstorage.service.networklayer.com') body = client_8c14de1d42f947c2812d9d92c44fc593.get_object(Bucket='default-donotdelete-pr-ijbvuypsfmigjt',Key='winequality-red.csv')['Body'] # add missing __iter__ method, so pandas accepts body as file-like object if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body ) red = pd.read_csv(body) red.head() body = client_8c14de1d42f947c2812d9d92c44fc593.get_object(Bucket='default-donotdelete-pr-ijbvuypsfmigjt',Key='winequality-white.csv')['Body'] # add missing __iter__ method, so pandas accepts body as file-like object if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body ) white = pd.read_csv(body) white.head()Explore the loaded data by using the following Pandas DataFrame methods:* print dataframe info* print head records* count tail records* print sample records* describe dataframe* check isnull values in dataframe# Print info on white wine print(white.info()) # Print info on red wine print(red.info()) # First rows of `red` red.head() # Last rows of `white` white.tail() # Take a sample of 5 rows of `red` red.sample(5) # Describe `white` white.describe() # Double check for null values in `red` pd.isnull(red)3 Interactive Visualizations with Matplotlib and Numpy 3.1: Visualize Alcohol vs FrequencyDistribution of Alcohol in % Vol for red and white wines.import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 2) ax[0].hist(red.alcohol, 10, facecolor='red', alpha=0.5, label="Red wine") ax[1].hist(white.alcohol, 10, facecolor='white', ec="black", lw=0.5, alpha=0.5, label="White wine") fig.subplots_adjust(left=0, right=1, bottom=0, top=0.5, hspace=0.05, wspace=1) ax[0].set_ylim([0, 1000]) ax[0].set_xlabel("Alcohol in % Vol") ax[0].set_ylabel("Frequency") ax[1].set_xlabel("Alcohol in % Vol") ax[1].set_ylabel("Frequency") fig.suptitle("Distribution of Alcohol in % Vol") plt.show()3.2: Print histograms of alcohol using NumpyHistogram of alcohol for red and white wines.import numpy as np print(np.histogram(red.alcohol, bins=[7,8,9,10,11,12,13,14,15])) print(np.histogram(white.alcohol, bins=[7,8,9,10,11,12,13,14,15]))(array([ 0, 7, 673, 452, 305, 133, 21, 8]), array([ 7, 8, 9, 10, 11, 12, 13, 14, 15])) (array([ 0, 317, 1606, 1256, 906, 675, 131, 7]), array([ 7, 8, 9, 10, 11, 12, 13, 14, 15]))3.3: Visualize Quality vs SulphatesQuality vs Sulphates for red and white wines.import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 2, figsize=(8, 4)) ax[0].scatter(red['quality'], red["sulphates"], color="red") ax[1].scatter(white['quality'], white['sulphates'], color="white", edgecolors="black", lw=0.5) ax[0].set_title("Red Wine") ax[1].set_title("White Wine") ax[0].set_xlabel("Quality") ax[1].set_xlabel("Quality") ax[0].set_ylabel("Sulphates") ax[1].set_ylabel("Sulphates") ax[0].set_xlim([0,10]) ax[1].set_xlim([0,10]) ax[0].set_ylim([0,2.5]) ax[1].set_ylim([0,2.5]) fig.subplots_adjust(wspace=0.5) fig.suptitle("Wine Quality by Amount of Sulphates") plt.show()3.4: Visualize Quality vs Volatile Acidity vs AlcoholQuality vs volatile acidity vs alcohol for red and white wines.import matplotlib.pyplot as plt import numpy as np np.random.seed(570) redlabels = np.unique(red['quality']) whitelabels = np.unique(white['quality']) import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 2, figsize=(8, 4)) redcolors = np.random.rand(6,4) whitecolors = np.append(redcolors, np.random.rand(1,4), axis=0) for i in range(len(redcolors)): redy = red['alcohol'][red.quality == redlabels[i]] redx = red['volatile acidity'][red.quality == redlabels[i]] ax[0].scatter(redx, redy, c=redcolors[i]) for i in range(len(whitecolors)): whitey = white['alcohol'][white.quality == whitelabels[i]] whitex = white['volatile acidity'][white.quality == whitelabels[i]] ax[1].scatter(whitex, whitey, c=whitecolors[i]) ax[0].set_title("Red Wine") ax[1].set_title("White Wine") ax[0].set_xlim([0,1.7]) ax[1].set_xlim([0,1.7]) ax[0].set_ylim([5,15.5]) ax[1].set_ylim([5,15.5]) ax[0].set_xlabel("Volatile Acidity") ax[0].set_ylabel("Alcohol") ax[1].set_xlabel("Volatile Acidity") ax[1].set_ylabel("Alcohol") #ax[0].legend(redlabels, loc='best', bbox_to_anchor=(1.3, 1)) ax[1].legend(whitelabels, loc='best', bbox_to_anchor=(1.3, 1)) #fig.suptitle("Alcohol - Volatile Acidity") fig.subplots_adjust(top=0.85, wspace=0.7) plt.show()4. Create Pandas machine learning modelIn this section I prepare data, create and train Pandas machine learning model. 4.1: Prepare dataIn this subsection data is joined and prepared: labels are separated from the features.# Append `white` to `red` wines = red.append(white, ignore_index=True) wines.shape # Isolate target labels y = wines.quality # Isolate data X = wines.drop('quality', axis=1)4.2: Visualize data using Seaborn heatmapHeatmapimport seaborn as sns corr = wines.corr() sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values, cmap="YlGnBu")4.3: Preprocess DataStandardize features by removing the mean and scaling to unit variance# Import `StandardScaler` from `sklearn.preprocessing` from sklearn.preprocessing import StandardScaler import numpy as np # Scale the data with `StandardScaler` X = StandardScaler().fit_transform(X)4.4: Creating modelCreating model using K fold validation partitionsimport numpy as np from sklearn.model_selection import StratifiedKFold from keras.models import Sequential from keras.layers import Dense from keras.optimizers import SGD, RMSprop seed = 7 np.random.seed(seed) kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed) for train, test in kfold.split(X, y): model = Sequential() model.add(Dense(128, input_dim=11, activation='relu')) model.add(Dense(1)) # rmsprop = RMSprop(lr=0.001) sgd=SGD(lr=0.01) model.compile(optimizer=sgd, loss='mse', metrics=['mae']) # model.compile(optimizer='rmsprop', loss='mse', metrics=['mae']) model.fit(X[train], y[train], epochs=10, verbose=1)Epoch 1/10 5195/5195 [==============================] - 0s - loss: 2.3357 - mean_absolute_error: 1.0516 Epoch 2/10 5195/5195 [==============================] - 0s - loss: 0.7159 - mean_absolute_error: 0.6542 Epoch 3/10 5195/5195 [==============================] - 0s - loss: 0.5845 - mean_absolute_error: 0.5949 Epoch 4/10 5195/5195 [==============================] - 0s - loss: 0.5359 - mean_absolute_error: 0.5701 Epoch 5/10 5195/5195 [==============================] - 0s - loss: 0.5145 - mean_absolute_error: 0.5563 Epoch 6/10 5195/5195 [==============================] - 0s - loss: 0.5057 - mean_absolute_error: 0.5530 Epoch 7/10 5195/5195 [==============================] - 0s - loss: 0.4992 - mean_absolute_error: 0.5495 Epoch 8/10 5195/5195 [==============================] - 0s - loss: 0.4900 - mean_absolute_error: 0.5453 Epoch 9/10 5195/5195 [==============================] - 0s - loss: 0.4872 - mean_absolute_error: 0.5442 Epoch 10/10 5195/51[...]4.5: Evaluate modelEvaluate model by checking Mean Squared Error (MSE) and the Mean Absolute Error (MAE) and R2 score or the regression score functionmse_value, mae_value = model.evaluate(X[test], y[test], verbose=0) print(mse_value) print(mae_value) from sklearn.metrics import r2_score y_pred = model.predict(X[test]) r2_score(y[test], y_pred) y_pred # Model output shape model.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_29 (Dense) (None, 128) 1536 _________________________________________________________________ dense_30 (Dense) (None, 1) 129 ================================================================= Total params: 1,665 Trainable params: 1,665 Non-trainable params: 0 _________________________________________________________________4.6: Compare the resultsCheck the predictionsy_pred = y_pred.astype(int) predictions = np.column_stack((y[test], y_pred)); print(predictions[:15])[[5 5] [5 4] [7 5] [5 5] [4 4] [5 5] [6 5] [5 5] [6 5] [5 5] [5 4] [5 5] [5 6] [6 6] [5 4]]5. Persist modelImport client libraries.wml_credentials={ "url": "https://ibm-watson-ml.mybluemix.net", "username": "0faaa0df-0f3a-4aa7-835d-8929d6cda36e", "password": "", "instance_id": "2074d379-2141-42dc-9340-a78bf626de46" } from watson_machine_learning_client import \ WatsonMachineLearningAPIClient client = WatsonMachineLearningAPIClient(wml_credentials) h5file = 'winemodel-keras.h5' gzfile = 'winemodel-keras.tar.gz' model.save(h5file) import tarfile with tarfile.open(gzfile, 'w:gz') as tf: tf.add(h5file) metadata = { client.repository.ModelMetaNames.NAME: 'Wine Quality Prediction Model', client.repository.ModelMetaNames.FRAMEWORK_NAME: 'tensorflow', client.repository.ModelMetaNames.FRAMEWORK_VERSION: '1.3', client.repository.ModelMetaNames.RUNTIME_NAME: 'python', client.repository.ModelMetaNames.RUNTIME_VERSION: '3.5', client.repository.ModelMetaNames.FRAMEWORK_LIBRARIES: [{'name':'keras', 'version': '2.1.3'}] } published_model = client.repository.store_model(model=gzfile, meta_props=metadata) published_model6. Load model to verify that it was saved correctlyYou can load your model to make sure that it was saved correctly.import json import requests from base64 import b64encode token_url = service_path + "/v3/identity/token" # NOTE: for python 2.x, uncomment below, and comment out the next line of code: #userAndPass = b64encode(bytes(username + ':' + password)).decode("ascii") # Use below for python 3.x, comment below out for python 2.x userAndPass = b64encode(bytes(username + ':' + password, "utf-8")).decode("ascii") headers = { 'Authorization' : 'Basic %s' % userAndPass } response = requests.request("GET", token_url, headers=headers) watson_ml_token = json.loads(response.text)['token'] print(watson_ml_token)eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJ0ZW5hbnRJZCI6IjIwNzRkMzc5LTIxNDEtNDJkYy05MzQwLWE3OGJmNjI2ZGU0NiIsImluc3RhbmNlSWQiOiIyMDc0ZDM3OS0yMTQxLTQyZGMtOTM0MC1hNzhiZjYyNmRlNDYiLCJwbGFuSWQiOiIzZjZhY2Y0My1lZGU4LTQxM2EtYWM2OS1mOGFmM2JiMGNiZmUiLCJyZWdpb24iOiJ1cy1zb3V0aCIsInVzZXJJZCI6IjRhMGU2NzRkLTBmYzUtNDkzMi05NGExLTgxZDc4MGE0NGNlZSIsImlzcyI6Imh0dHBzOi8vdXMtc291dGgubWwuY2xvdWQuaWJtLmNvbS92My9pZGVudGl0eSIsImlhdCI6MTU1MTE5MTg1OCwiZXhwIjoxNTUxMjIwNjU4LCJjcmVhdGVkVGltZSI6MTU1MTE5MTg1OH0.BU28YSSv2P2iFBwINWKlkN9NkorRkXqLU0hS9uxmuRWKpNRPx1V-cx8Ix3xFrxkicMTqzEBwUXi0UP8iB5KyjRNrZO_OqwBoqmn04Jyf6tPIzJARNOVfkIzr2_bazSK6mpYWboJWsOYlnFq37R_mQRQGdmxLaExwe1W8leohKSJwpdVMszlcOHfS7nXLo2j1kGPaGSCbKJCj0lPcN4-Od4QBeRmlhy4mA9ucGfIkAmKDbryV5GWQR7McyVYB4izcBeJdaZHbLVN073_myD-fjRkMq13cOemoa9ynFQYH_Jy4I0YSmsVIowJk6c5kZb0UNCrRhq-CWW19-t9xA9GRLg6.2 Preview currenly published modelsmodel_url = service_path + "/v3/wml_instances/" + instance_id + "/published_models" headers = {'authorization': 'Bearer ' + watson_ml_token } response = requests.request("GET", model_url, headers=headers) published_models = json.loads(response.text) print(json.dumps(published_models, indent=2)){ "limit": 1000, "first": { "url": "https://ibm-watson-ml.mybluemix.net/v3/wml_instances/2074d379-2141-42dc-9340-a78bf626de46/published_models?limit=1000" }, "resources": [ { "metadata": { "created_at": "2019-02-21T13:31:38.699Z", "url": "https://ibm-watson-ml.mybluemix.net/v3/wml_instances/2074d379-2141-42dc-9340-a78bf626de46/published_models/4116080a-a8f8-4147-9cdd-79bec04900ea", "modified_at": "2019-02-21T13:38:58.094Z", "guid": "4116080a-a8f8-4147-9cdd-79bec04900ea" }, "entity": { "latest_version": { "created_at": "2019-02-21T13:31:38.812Z", "url": "https://ibm-watson-ml.mybluemix.net/v3/ml_assets/models/4116080a-a8f8-4147-9cdd-79bec04900ea/versions/1cea7dd4-1637-4173-8c7f-c17224f39ac1", "guid": "1cea7dd4-1637-4173-8c7f-c17224f39ac1" }, "runtime_environment": "spark-2.1", "name": "Heart Failure Prediction Model", "learning_configuration_url": "http[...]Read the details of any returned modelsprint('{} model(s) are available in your Watson ML Service'.format(len(published_models['resources']))) for model in published_models['resources']: print('\t- name: {}'.format(model['entity']['name'])) print('\t model_id: {}'.format(model['metadata']['guid'])) print('\t deployments: {}'.format(model['entity']['deployments']['count']))3 model(s) are available in your Watson ML Service - name: Heart Failure Prediction Model model_id: 4116080a-a8f8-4147-9cdd-79bec04900ea deployments: 1 - name: Wine Quality Prediction Model model_id: 213f75a7-406f-4c9d-b1ca-6f07edc7bbca deployments: 2 - name: Wine Quality Prediction Model model_id: 419dc1fe-812e-4bc9-9d6c-ee61c1febced deployments: 1Create a new deployment of the Model# Update this `model_id` with the model_id from model that you wish to deploy listed above. model_id = '213f75a7-406f-4c9d-b1ca-6f07edc7bbca' deployment_url = service_path + "/v3/wml_instances/" + instance_id + "/published_models/" + model_id + "/deployments" payload = "{\"name\": \"Wine Quality Prediction Model Deployment\", \"description\": \"First deployment of Wine Quality Prediction Model\", \"type\": \"online\"}" headers = {'authorization': 'Bearer ' + watson_ml_token, 'content-type': "application/json" } response = requests.request("POST", deployment_url, data=payload, headers=headers) print(response.text) deployment = json.loads(response.text) print('Model {} deployed.'.format(model_id)) print('\tname: {}'.format(deployment['entity']['name'])) print('\tdeployment_id: {}'.format(deployment['metadata']['guid'])) print('\tstatus: {}'.format(deployment['entity']['status'])) print('\tscoring_url: {}'.format(deployment['entity']['scoring_url']))Model 213f75a7-406f-4c9d-b1ca-6f07edc7bbca deployed. name: Wine Quality Prediction Model Deployment deployment_id: 7de2f5a3-44a1-439d-ada8-ce9e4c73eb60 status: DEPLOY_SUCCESS scoring_url: https://ibm-watson-ml.mybluemix.net/v3/wml_instances/2074d379-2141-42dc-9340-a78bf626de46/published_models/213f75a7-406f-4c9d-b1ca-6f07edc7bbca/deployments/7de2f5a3-44a1-439d-ada8-ce9e4c73eb60/onlineMonitor the status of deployment# Update this `deployment_id` from the newly deployed model from above. deployment_id = "7de2f5a3-44a1-439d-ada8-ce9e4c73eb60" deployment_details_url = service_path + "/v3/wml_instances/" + instance_id + "/published_models/" + model_id + "/deployments/" + deployment_id headers = {'authorization': 'Bearer ' + watson_ml_token, 'content-type': "application/json" } response = requests.request("GET", deployment_url, headers=headers) print(response.text) deployment_details = json.loads(response.text) for resources in deployment_details['resources']: print('name: {}'.format(resources['entity']['name'])) print('status: {}'.format(resources['entity']['status'])) print('scoring url: {}'.format(resources['entity']['scoring_url']))name: Heart Failure Prediction Model Deployment status: DEPLOY_SUCCESS scoring url: https://ibm-watson-ml.mybluemix.net/v3/wml_instances/2074d379-2141-42dc-9340-a78bf626de46/deployments/1bf4866e-6be7-4bca-be6d-18db9419e17f/online name: Wine Quality Prediction Model Deployment status: DEPLOY_SUCCESS scoring url: https://ibm-watson-ml.mybluemix.net/v3/wml_instances/2074d379-2141-42dc-9340-a78bf626de46/deployments/7de2f5a3-44a1-439d-ada8-ce9e4c73eb60/online6.3 Invoke prediction model deploymentDefine a method to call scoring url. Replace the **scoring_url** in the method below with the scoring_url returned from above.def get_prediction_ml(fa, va, ca, rs, ch, fsd, tsd, d, p, s, a): scoring_url = 'https://ibm-watson-ml.mybluemix.net/v3/wml_instances/2074d379-2141-42dc-9340-a78bf626de46/deployments/7de2f5a3-44a1-439d-ada8-ce9e4c73eb60/online' scoring_payload = { "fields":["fixed acidity", "volatile acidity", "citric acid", "residual sugar", "chlorides", "free sulfur dioxide", "total sulfur dioxide", "density", "pH", "sulphates", "alcohol"],"values":[[fa, va, ca, rs, ch, fsd, tsd, d, p, s, a]]} header = {'authorization': 'Bearer ' + watson_ml_token, 'content-type': "application/json" } scoring_response = requests.post(scoring_url, json=scoring_payload, headers=header) print(scoring_response.text) return (json.loads(scoring_response.text).get("values")[0][0]) data = np.array([7, 0.27, 0.36 ,20.7 ,0.045 ,45 ,170 ,1.001 ,3 ,0.45 ,8.8]).reshape(-1, 1) data = StandardScaler().fit_transform(data) data.T # print('What is quality of wine with such characteristics?: {}'.format(get_prediction_ml(0.45, 3.28, -2.19, -0.59, 1.19, -0.31, -0.86, 0.7, -0.11, 0.99, -0.58)))Exercise 4 - R: Neural Networks Learning Part I: *Neural Networks*Recognizing handwritten digits. Though the source is not explicitly mentioned, it is just like the [MNIST database](https://en.wikipedia.org/wiki/MNIST_database).Each value of $X$ will be a 20x20 grid of values representing the grayscale intensity at that location "unrolled" into a 400-dimensional vector. Here is an example for the first number in our data, $X^{(1)}$:$\hspace{1cm} X^{(1)} = \begin{bmatrix}x_{(1, 1)}^{(1)},\ x_{(1, 2)}^{(1)},\ \dots\ x_{(1, 20)}^{(1)} \\ x_{(2, 1)}^{(1)},\ x_{(2, 2)}^{(1)},\ \dots\ x_{(2, 20)}^{(1)} \\ \vdots \\ x_{(20, 1)}^{(1)},\ x_{(20, 2)}^{(1)},\ \dots\ x_{(20, 20)}^{(1)} \\ \end{bmatrix}\rightarrow \begin{bmatrix} x_1^{(1)},\ x_2^{(1)},\ \dots\ x_{400}^{(1)} \end{bmatrix}\rightarrow (x^{(1)})^T$Here is our collection of all of the numbers for $X$:$\hspace{1cm} X = \begin{bmatrix} (x^{(1)})^T \\ (x^{(2)})^T \\ \vdots \\ (x^{(400)})^T \end{bmatrix}$---Beginning with package imports, data loading, and initial visualizationrm(list=ls()) # Clearing all environment variables suppressPackageStartupMessages({ library(R.matlab) # Reads MATLAB/Octave matrices library(utils) # Used for a status bar in lengthy for loops library(nnet) # For comparing answers library(ggplot2) library(tidyr) library(dplyr) }) theme_update(plot.title = element_text(hjust = 0.5)) # Centers ggplot2 titles # Loading in the data from the .mat files data <- readMat('ex4/ex4data1.mat') # Assigning X/y and removing data from environment X <- data$X y <- data$y # Loading in the weights from the .mat files data <- readMat('ex4/ex4weights.mat') # Assigning theta1/2 and removing data from environment theta1 <- data$Theta1 theta2 <- data$Theta2 rm(data) # Clearing data from environment print(dim(X))[1] 5000 400Note: y has 0 listed as 10, but we will be leaving this alone until our final predictions since the thetas we are provided for the first part of the exercise are structured for this. In other words, it would be more trouble than it's worth to reshape our provided thetas.# Randomly selecting 100 digits to plot rowsToPlot <- X[sample(nrow(X),size=100,replace=FALSE),] row_to_matrix <- function(row) { # Formats a row from the matrix X into a matrix suitable for plotting with image() digit <- matrix(row, nrow=20, ncol=20) # Puts row into a 20x20 matrix digit <- t(apply(digit, 2, rev)) # Rotates matrix into correct position for image() return(digit) } plot_digits <- function(digits) { # Plots a grid of the supplied digits # Calculating parameters for the plot m <- dim(digits)[1] # Number of digits to plot columns <- floor(sqrt(m)) # Number of columns rows <- ceiling(sqrt(m)) # Number of rows # Parameters for the overall plot par(mfrow=c(rows, columns), # Rows, columns mar=c(0.0, 0.0, 0.0, 0.0)) # Margins between plots # Looping through the digits and plotting them for (i in 1:m) { image(row_to_matrix(digits[i, ]), # Formats digit col=grey.colors(255), # Colors to gray scale xaxt='n', yaxt='n') # Remoxes axis labels and tick marks } } plot_digits(rowsToPlot)Feedforward and Cost Function$J(\theta) = \frac{1}{m} \sum_{i=1}^m \sum_{k=1}^K \big[-y_k^{(i)}log((h_\theta(x^{(i)}))_k) - (1-y_k^{(i)}) log(1-(h_\theta(x^{(i)}))_k) \big] + \frac{\lambda}{2m} \big[\sum_{j=1}^{25} \sum_{k=1}^{400} (\theta_{j, k}^{(1)})^2 +\sum_{j=1}^{10} \sum_{k=1}^{25} (\theta_{j, k}^{(2)})^2 \big]$Note: $h_\theta(x^{(i)})_k = a^{(3)}_k$, or the activation of the $k$-th output unit Regularization Parameter$\frac{\lambda}{2m} \big[\sum_{j=1}^{25} \sum_{k=1}^{400} (\theta_{j, k}^{(1)})^2 +\sum_{j=1}^{10} \sum_{k=1}^{25} (\theta_{j, k}^{(2)})^2 \big]$Note: The regularization parameter ignores the bias unitsThe exercise states that the unregularized cost function should be around $0.287629$, and the regularized cost function with $\lambda = 1$ should be around $0.383770$sigmoid <- function(z) { # Calculates the sigmoid function for use as the activation function in the neural network z <- 1/(1+exp(-z)) return(z) } forward_propagate <- function(thetas, X) { # Forward propagates a sequential neural network with the provided input # Assumes X does not contain the bias term # Returns the following: # Prediction: Numerical predictions # ClassProbability: Class probabilities # Nodes: Values of the nodes pre-activation (excluding input layer) # Activations: Values of the nodes post-activation (exluding input layer) h <- as.matrix(X) # To store neuron values for backpropagation nodes <- list() activations <- list() activations[[1]] <- cbind(1, h) # Forward propagation iteration <- 1 # For getting similar function to Python's enumerate() for (theta in thetas) { h <- cbind(1, h) # Adding the bias term h <- as.matrix(h) %*% t(as.matrix(theta)) # Multiplying by the weights nodes[[iteration]] <- h h <- sigmoid(h) # Activation function activations[[iteration+1]] <- cbind(1, h) iteration <- iteration + 1 } prediction <- data.frame(h) colnames(prediction) <- c(1:9, 0) prediction <- colnames(prediction)[max.col(prediction,ties.method="first")] return(list("Prediction" = prediction, "ClassProbability" = h, "Nodes" = nodes, "Activations" = activations)) } nn_cost <- function(thetas, X, y, C=0.0) { # Returns the cost for a sequential neural network using the forward_propagate function m <- length(y) # Generating class probabilities h <- forward_propagate(thetas, X)$ClassProbability # One hot encoding y yOneHot <- y %>% data.frame(digit=.) %>% # Creating a data frame mutate(id = 1:n(), dummy = 1) %>% # Adding columns before the encoding spread(digit, value = dummy, fill = 0) %>% # One hot encoding select(-id) # Removing ID # Calculating the regularized cost error <- sum(-yOneHot * log(h) - (1 - yOneHot) * log(1 - h)) # Removing the bias term from regularization regParameter <- lapply(thetas, function(x) x[, -1]^2) %>% unlist() %>% sum() regParameter <- (C/(2*m)) * regParameter cost <- (1/m) * error + regParameter return(cost) } cat('Unregularized cost:', nn_cost(list(theta1, theta2), X, y), '\n') cat('Regularized cost:', nn_cost(list(theta1, theta2), X, y, 1.0))Unregularized cost: 0.2876292 Regularized cost: 0.3837699--- Part II: *Backpropagation*Here we'll learn parameters for our neural network through backpropagation. We have to first create two helper functions in order to conduct backpropagation:1. **restructure_theta:** The optimization function won't work with matrices, so we have to flatten our thetas into one long vector. This function will allow us to reshape the vector back into the original matrices for theta in order to use it with the **forward_propagate** function.2. **sigmoid_gradient:** The partial derivative of the sigmoid function which will be used to help calculate the gradient in backpropagationAfter that, we will have to create new values for our initial $\Theta^{(l)}$. We can't use $0$s like we did in the previous exercises because this more or less causes our nodes to drop out.We'll then finally build the backpropogation function, and then proceed with optimizing our $\Theta^{(l)}$ and generate predictions.# Creating a long theta to plug into functions longTheta <- c(as.vector(t(theta1)), as.vector(t(theta2))) restructure_thetas <- function(long_theta, thetas) { # Reshapes a long array of thetas into matrices for each theta counter <- 1 indices <- 0 reshapedThetas <- list() for (theta in thetas) { if (counter == 1) { reshapedTheta <- matrix(long_theta[1:length(theta)], nrow=dim(theta)[1], ncol=dim(theta)[2], byrow=TRUE) reshapedThetas[[counter]] <- reshapedTheta indices <- indices + length(theta) counter <- counter + 1 } else { reshapedTheta <- matrix(long_theta[(1+indices):(indices+length(theta))], nrow=dim(theta)[1], ncol=dim(theta)[2], byrow=TRUE) reshapedThetas[[counter]] <- reshapedTheta indices <- indices + length(theta) counter <- counter + 1 } } return(reshapedThetas) } cat('Original theta 2 shape:', dim(theta2), '\n') cat('Reshaped theta 2 shape:', dim(restructure_thetas(longTheta, list(theta1, theta2))[[2]]))Original theta 2 shape: 10 26 Reshaped theta 2 shape: 10 26Sigmoid Gradient$g'(z) = \frac{d}{dz}g(z) = g(z)(1-g(z))$where$sigmoid(z)=g(z)=\frac{1}{1+e^{-z}}$sigmoid_gradient <- function(z) { # Returns the gradient of a sigmoid function gradient <- sigmoid(z) * (1-sigmoid(z)) return(gradient) } sigmoid_gradient(0)Random InitializationRandomly selecting values for $\Theta^{(l)}$ uniformly in the range $[-\epsilon_{init}, \epsilon_{init}]$ since initial $\Theta^{(l)}$ values of $0$ will more or less cancel out the neurons they are assigned toWe will use the recommended approach for selecting $\epsilon$ by using the number of units in the network:$\epsilon_{init} = \frac{\sqrt(6)}{\sqrt{L_{in}+L_{out}}}$where$L_{in} = s_l$ and $L_{out} = s_{l+1}$, or the number of nodes in the input and output layers (excluding the bias nodes)# Num nodes in the input and output layers (excl. bias nodes) epsilon <- sqrt(6) / sqrt(400+10) initialThetas <- runif(length(longTheta), min=-epsilon, max=epsilon) length(initialThetas)BackpropagationAs a reminder, backpropagation is a way to learn parameters for neural networks. On a high level, we first propagate forward with initial parameters (which we created above by uniformly selecting between $[-\epsilon_{init}, \epsilon_{init}]$), then we obtain the error for our output nodes, and use that to do weighted averages moving backwards through the network to find which nodes in the hidden layer contributed to the error. We can then calculate the gradient which tells us how we can adjust the parameters to decrease the error. Finally, we pair it with an optimization algorithm in order to perform several iterations.Vectorized implementations of backpropagation exist, but stressed using a for loop that iterates through each training example for the sake of simplicity. Here is the mechanical approach for performing gradient descent:$\hspace{0.5cm}$ **Note:** These formulas are specific to our neural network with one hidden layer1. Set the input layer's values $(a^{(1)})$ to the $t$-th training example $x^{(t)}$. Perform a feedforward pass to compute the activations $(z^{(2)}, a^{(2)}, z^{(3)}, a^{(3)})$ for layers 2 and 3. Remember to add the $+1$ term to ensure the vectors of activation for layers $a^{(1)}$ and $a^{(2)}$ also include the bias unit.2. For each output unit $k$ in layer 3 (the output layer), set $$\delta_k^{(3)} = (a_k^{(3)} - y_k)$$ where $y_k \in \{0,1\}$ indicates whether the current training example belongs to class $k (y_k = 1)$, or if it belongs to a different class $(y_k = 0)$. This is an "error term" that measures how much that node was "responsible" for any errors in the output.3. For the hidden layer $l=2$, set $$\delta^{(2)} = (\theta^{(2)})^T \delta^{(3)}.*g'(z^{(2)})$$ This is the part that propagates backward, and like step 2, it is an "error" term that measures how much that node was "responsible" for any errors in the output. However, we determine this by a weighted average of the error terms in the output layer $(l+1)$.4. Accumulate the gradient from this training example using the following formula. Note that you should skip or remove $\delta_0^{(2)}$ $$ \Delta^{(l)} = \Delta^{(l)} + \delta^{(l+1)} (a^{(l)})^T$$5. Obtain the (unregularized) gradient for the neural network cost function by multiplying the accumulated gradients by $\frac{1}{m}$: $$\frac{\partial}{\partial \theta_{ij}^{(l)}}J(\Theta) = D_{ij}^{(l)} = \frac{1}{m}\Delta_{ij}^{(l)}$$5. Obtain the regularized gradient for the neural network cost function by multiplying the accumulated gradients by $\frac{1}{m}$ and applying the regularization term: $$\frac{\partial}{\partial \theta_{ij}^{(l)}}J(\Theta) = D_{ij}^{(l)} = \begin{cases}\frac{1}{m}\Delta_{ij}^{(l)} & for\ j=0 \\ \frac{1}{m}\Delta_{ij}^{(l)} + \frac{\lambda}{m}\Theta_{ij}^{(l)} & for\ j\geq 1 \end{cases}$$Using the obtained gradient, we can adjust the values of $\Theta^{(l)}$ and do it all over again.backpropagate <- function(long_theta, thetas, X, y, C=0.0) { # Calculates the gradients for provided thetas through # long_thetas is the vector of all thetas, and is the first parameter # since this is what the optimization changes # Also calculates the cost using nn_cost() m <- length(y) # Reshaping long_theta into original form for forward_propagate() reshaped_thetas <- restructure_thetas(long_theta, thetas) # Gathering info from forward propagation forwardProp <- forward_propagate(reshaped_thetas, X) h <- forwardProp$ClassProbability nodes <- forwardProp$Nodes activations <- forwardProp$Activations cost <- nn_cost(reshaped_thetas, X, y, C) # One hot encoding y yOneHot <- y %>% data.frame(digit=.) %>% # Creating a data frame mutate(id = 1:n(), dummy = 1) %>% # Adding columns before the encoding spread(digit, value = dummy, fill = 0) %>% # One hot encoding select(-id) # Removing ID # Creating initial deltas of 0 to be filled delta1 <- numeric(length(reshaped_thetas[[1]])) delta2 <- numeric(length(reshaped_thetas[[2]])) # Looping through each individual digit in the training set for (digit in 1:m) { # Step 1: Grabbing appropriate items for the backpropagation calculation # Using t(matrix()) to quickly reshape for linear algebra input_nodes <- t(matrix(activations[[1]][digit, ])) # (1, 401) hidden_nodes <- t(matrix(nodes[[1]][digit, ])) # (1, 25) hidden_activation <- t(matrix(activations[[2]][digit, ])) # (1, 26) output_nodes <- h[digit, ] # (NULL) actual <- yOneHot[digit, ] # (1, 10) # Step 2: Calculating delta 3 - the "error term" assigning responsibilitiy # of errors to nodes in the output layer d3 <- matrix(output_nodes - actual) # (10, 1) hidden_nodes <- cbind(1, hidden_nodes) # Adding the bias term # (1, 26) # Step 3: Calculating delta 2 - the weighted average of error terms in # the output layer to assign responsibility to nodes in the hidden layer d2 <- as.numeric((t(as.numeric(d3)) %*% reshaped_thetas[[2]]) %*% t(sigmoid_gradient(hidden_nodes))) * sigmoid_gradient(hidden_nodes) # Step 4: Accumulating the gradients delta1 <- delta1 + as.matrix(d2[, -1]) %*% input_nodes delta2 <- delta2 + as.numeric(d3) %*% hidden_activation } # Step 5: Obtaining the gradient for the cost function delta1 <- (1/m) * delta1 delta2 <- (1/m) * delta2 # Adding the regularization terms delta1[, -1] <- delta1[, -1] + (reshaped_thetas[[1]][, -1] * C) / m delta2[, -1] <- delta2[, -1] + (reshaped_thetas[[2]][, -1] * C) / m # Combining the gradients into one long gradient for the optimization function gradient <- c(as.vector(t(delta1)), as.vector(t(delta2))) return(list("Cost" = cost, "Gradient" = gradient)) } backprop <- backpropagate(initialThetas, list(theta1, theta2), X, y, 1.0) backprop$Cost backprop$Gradient[1:5]Optimizing $\Theta^{(l)}$ and Predictions# Creating helper functions for the optimization function backpropagate.cost <- function(long_theta, thetas, X, y, C=0.0) { backpropagate(long_theta, thetas, X, y, C=0.0)$Cost } backpropagate.gradient <- function(long_theta, thetas, X, y, C=0.0) { backpropagate(long_theta, thetas, X, y, C=0.0)$Gradient } startTime <- proc.time() optimalTheta <- optim( # Specifying function parameters par=initialThetas, # Initial guess fn=backpropagate.cost, # Function to minimize gr=backpropagate.gradient, X=X, y=y, thetas=list(theta1, theta2), C=1.0, method="BFGS", # Optimization function to use control=list(maxit = 250) # Maximum number of iterations )$par # Specifying that we only want the obtained thetas optimalTheta[1:5] endTime <- proc.time() endTime - startTime initialThetaList <- restructure_thetas(initialThetas, list(theta1, theta2)) newThetaList <- restructure_thetas(optimalTheta, list(theta1, theta2)) cat('Initial cost:', nn_cost(initialThetaList, X, y), '\n') cat('Optimized theta cost:', nn_cost(newThetaList, X, y)) predictions <- forward_propagate(newThetaList, X)$Prediction results <- data.frame(Prediction = as.numeric(predictions)) %>% mutate(Actual = y) results$Correct <- ifelse(results$Prediction == results$Actual, 1, 0) cat('Our accuracy:', mean(results$Correct))Our accuracy: 0.7304--- Part III: *Visualizing the hidden layer*Here we will visualize the representations captured by the hidden layer. This is done by reshaping the hidden layer $\Theta$ into a 20x20 matrix and plotting it. This will give us an idea of the patterns our network is learning. It's a similar idea of [eigenfaces](https://en.wikipedia.org/wiki/Eigenface) (even though the math behind them is different), and it is really cool!hiddenTheta <- restructure_thetas(optimalTheta, list(theta1, theta2))[[1]][,-1] plot_hidden_layer <- function(theta) { # Visualizes the hidden layer by plotting the values of theta applied # to the input layer. Darker sections correspond to heavier weights. # Calculating parameters for the plot m <- dim(theta)[1] # Number of digits to plot columns <- floor(sqrt(m)) # Number of columns rows <- ceiling(sqrt(m)) # Number of rows # Parameters for the overall plot par(mfrow=c(rows, columns), # Rows, columns mar=c(0.0, 0.0, 0.0, 0.0)) # Margins between plots # Looping through the digits and plotting them for (i in 1:m) { image(row_to_matrix(theta[i, ]), # Formats theta xaxt='n', yaxt='n') # Remoxes axis labels and tick marks } } plot_digits(hiddenTheta)Code the logic of RISK strategy board game in case of two players. The attacker rolls 3 reddices and the defender rolls 2 blue dices. In step one, we look for the greatest value in thedices. The looser losts 1 soldier. If the greatest value is the same at both sides, only theattacker losts 1 soldier. In step two, we look for the second greatest value and decide aboutlosting 1 soldier as above. The game has 3 possible outcomes: attacker losts 2 soldiers, bothsides lost 1 soldier each, the defender losts 2 soldiers.a) Simulate the game 1000 times and calculate the relative frequency of the three possibleoutcomes.b) Simulate the game 1000000 times and calculate the relative frequency of the threepossible outcomes.c) Calculate the exact probability of the three outcomes by analyzing all possible cases(positive cases / all cases).Print out the results of tasks a, b and c in a tabular form.#Import needed libraries import random #variables Declaration attacker_dices = 3 defender_dices = 2 number_of_turns = int(input("please enter the number of game turn:")) outcome_dict = {'attacker':0, 'both':0, 'defender':0} # a functing to calcualte the results def risk_game (attacker_dices,defender_dices): #start throwing the dices #generating sets of dices probability for attacker and defender #attacker set attacker_score =[] for i in range(attacker_dices): score = random.randint(1,6) attacker_score.append(score) print(attacker_score) # defender set defender_score =[] for i in range(defender_dices): score=random.randint(1,6) defender_score.append(score) print(defender_score) # find the highest value in each set and compaier it to get the results: attacker_highest_dice = max(attacker_score) defender_highest_dice = max(defender_score) # print(attacker_highest_dice,defender_highest_dice) if attacker_highest_dice > defender_highest_dice: defender_dices -=1 else: attacker_dices -=1 return attacker_dices,defender_dices #define the number of games that we want to play for game in range(number_of_turns): #in each game we have just 2 rounds for round in range(2): attacker_dices,defender_dices = risk_game(attacker_dices,defender_dices) #calculating the outcome if attacker_dices == 3: # the defender lost 2 solders outcome_dict['defender'] +=1 elif attacker_dices == 2: # both lost 1 solder outcome_dict['both'] +=1 else: # the attacker losts 2 solders outcome_dict['attacker'] +=1 # reset the dices for a new game attacker_dices = 3 defender_dices = 2 # print(outcome_dict) # results probabilties = {} probabilties['attacker'] = outcome_dict['attacker']/number_of_turns probabilties['both'] = outcome_dict['both']/number_of_turns probabilties['defender'] = outcome_dict['defender']/number_of_turns print(outcome_dict) print(probabilties){'attacker': 2, 'both': 2, 'defender': 1} {'attacker': 0.4, 'both': 0.4, 'defender': 0.2}Chapter 5 Simplificationsfrom sympy import * x, y, z = symbols('x, y, z') init_printing(use_unicode=True)5.1 単純化 `sympy`のどんな数式も`simplify()`で簡単な形にできる!:simplify(sin(x)**2 + cos(x)**2) simplify((x**3 + x**2 - x - 1) / (x**2 + 2*x + 1)) simplify(gamma(x) / gamma(x-2)) #ガンマ関数(特殊関数)注意点:その1simplify(x**2 + 2*x + 1)---> **因数分解できない!!!** 因数分解は`factor()`関数を使う:factor(x**2 + 2*x + 1)注意点:その2 `simplify()`は遅い! 解決策- `simplify()`は「ある程度」簡単な形にまでしか変形できないので、確実に式を簡単にしたいなら、その用途に応じた適切な関数を使うべき! - インタラクティブシェルで`simplify`の挙動を見てから**個別の関数**(以下) を使って簡単にしよう. 5.2 多項式 / 有理式 5.2.1 `expand`関数 多項式を展開し、必要ならば項をキャンセルする.expand((x + 1)**2) expand((x + 2)*(x - 3))「式を展開する」ことで「式が簡単になる」ことがある。expand((x + 1)*(x - 2) - (x - 1)*x) #式がキャンセルし合う5.2.2 `factor`関数 数式を可能な限り因数分解するfactor(x**3 - x**2 + x - 1) factor(x**2*z + 4*x*y*z + 4*y**2*z) factor_list(x**2*z + 4*x*y*z + 4*y**2*z) #(変数or定数, べき)三角関数程度の式なら、関数`factor`, `expand`で対応可能expand((cos(x) + sin(x))**2) factor(cos(x)**2 + 2*cos(x)*sin(x) + sin(x)**2)5.2.3 `collect`関数 特定の変数でまとめたり、特定次の係数を取り出す.expr = x*y + x -3 + 2*x**2 - z*x**2 + x**3 expr collected_expr = collect(expr, x) #xでまとめる. collected_exprさらに以下のようにcoeffメソッドで特定次を取り出せる.collected_expr.coeff(x, 2) #xの2次だけ取り出す.5.2.4 `cancel`関数 有理式を簡単にするcancel((x**2 + 2*x + 1) / (x**2 + x)) expr = 1/x + (2*x/2 - 2) /(x - 4) expr cancel(expr) #分母を通分する factor(expr) #factorも同じような操作をする. expr = (x*y**2 - 2*x*y*z + x*z**2 + y**2 - 2*y*z + z**2) / (x**2 - 1) expr cancel(expr) factor(expr) #factorも同じような変形をする.**コメント**式を単にキャンセルさせてシンプルにさせたいときは、`factor()`より`cancel()`のほうが効率的 5.2.5 `apart`関数 有理式(分数)を部分分数分解するx = symbols('x') expr = (4*x**3 + 21*x**2 + 10*x + 12) / (x**4 + 5*x**3 + 5*x**2 + 4*x) expr apart(expr)5.3 三角関数 **コメント**: 逆三角関数は頭に"a"を付ける: acos, asin, atan, etc...acos(x) cos(acos(x)) asin(1)5.3.1 `trigsimp`関数 三角関数の表式を、公式を用いて可能な限りシンプルな形にする.trigsimp(sin(x)**2 + cos(x)**2) trigsimp(sin(x)**4 - 2*cos(x)**2*sin(x)**2 + cos(x)**4) trigsimp(sin(x)*tan(x)/sec(x)) trigsimp(cosh(x)**2-sinh(x)**2)5.3.2 `expand_trig`関数 三角関数の式を展開する。 `trigsimp`と`expand_trig`は完全に逆の操作をするexpand_trig(sin(x + y)) expand_trig(tan(2*x))5.4 べき乗x, y = symbols('x y', positive=True) #変数が正であると仮定 a, b = symbols('a, b', real = True) #変数が実数であると仮定 z, t, c = symbols('z t c')**コメント**: `sqrt(x)`と`x**Rational(1,2)`, `x**0.5`, `x**(1/2)`は同じsqrt(x) x**Rational(1,2) x**(0.5) x**(1/2)5.4.1 `powsimp` 関数 冪が変数(`Sympy`シンボル)のときに限り、シンプルな形にするpowsimp(x**a*x**b) #これ以上簡単にできない. powsimp(x**a*y**a)変数の仮定にかかわらず実行させたいとき:powsimp(t**c*z**c)powsimp(t**c*z**c, force=True)とする. `t` もしくは `z` が負になっても強制的にこの変形は行われる.(z*t)**2 #冪が整数、有理数, 2のとき. sqrt(x*y) #同じ**注意** このような式に対しては`powsimp`は使えない:powsimp(z**2*t**2) #指数が整数 sqrt(x*y)--->冪が変数のときに`powsimp`で簡単にできる. 5.4.2 `expand_power_expr`関数, `expand_power_base`関数 べき乗を展開する. `powsimp`関数と逆の操作expand_power_exp(x**(a + b)) expand_power_base((x*y)**a)**注意** これも`powsimp()`と同様で、変形できないときは元の式を返す:expand_power_base((z*t)**c)`t*z`が正という条件を`symbols`でつけていれば展開できるが、今回のようにそうと限らないときは展開してくれない. 強制的に行うにはexpand_power_base((z*t)**c, force=True)とする. また冪が数のときはx**2*x**3 expand_power_exp(x**5)のように変形できない。 5.4.3 `powdenest`関数 べき乗のべき乗を展開(x**a)**b #カッコを外して展開 powdenest((x**a)**b) powdenest((z**a)**b) powdenest((z**a)**b, force=True)5.5 指数関数、対数関数ln(x) #ln(x)とlog(x)は同じ. log(x) x, y = symbols('x y', positive=True) n = symbols('n', real=True)5.5.1 `expand_log`関数 対数関数を展開するexpand_log(log(x*y)) expand_log(log(x/y)) expand_log(log(x**2)) expand_log(log(x**n)) expand_log(log(z*t))**注意** これまでと同様にして、正でない変数は展開できないので、そのときは`Force=True`オプションを付ける。expand_log(log(z**2)) expand_log(log(z**2), force=True)5.5.2 `logcombine`関数 対数関数をシンプルにする.logcombine(log(x) + log(y)) #対数関数を簡単にする logcombine(n*log(x)) logcombine(n*log(z)) logcombine(n*log(z), force=True)5.6 特殊関数x, y, z = symbols('x y z') k, m, n = symbols('k m n')5.6.1 階乗factorial(n) factorial(10)5.6.2 組み合わせ (Combination)binomial(n, k) #nCk combsimp(factorial(n) / factorial(n - 3)) #シンプルにする combsimp(binomial(n + 1, k + 1) / binomial(n, k))5.6.3 ガンマ関数gamma(z) combsimp(gamma(x)*gamma(1 - x)) #ガンマ関数にも使える5.6.4 一般化された超幾何関数hyper([1, 2], [3], z)5.6.5 関数を別の関数で書き換えるtan(x).rewrite(sin) #tanをsinで書き換える factorial(x).rewrite(gamma) #階乗をガンマ関数で書き換える5.6.6 特殊関数をいくつかの恒等式で書き換えるexpand_func(gamma(x + 3))New Algorithms for Simulating Dynamical Friction , , — *RadiaSoft, LLC*This notebook describes—and documents in code—algorithms for simulating the dynamical friction experienced by ions in the presence of magnetized electrons. The $\LaTeX$ preamble is *here*.$$%% math text\newcommand{\hmhsp}{\mspace{1mu}}% math hair space\newcommand{\mhsp}{\mspace{2mu}}% math hair space\newcommand{\ud}{\mathop{}\!\mathrm{d}}% upright d for differential\newcommand{\ui}{\mathrm{i}}% upright i for imaginary unit\newcommand{\ue}{\mathrm{e}}% upright e for Euler number%%\newcommand{\Mion}{m_\text{ion}}\newcommand{\Me}{m_\text{e}}%%\newcommand{\vQion}{\vec{q}_\text{ion}}\newcommand{\vPion}{\vec{p}_\text{ion}}\newcommand{\Qion}[1]{1_\text{ion}}\newcommand{\Pion}[1]{p_{\text{ion},\hmhsp1}}%%\newcommand{\vQe}{\vec{q}_\text{e}}\newcommand{\vPe}{\vec{p}_\text{e}}\newcommand{\Qe}[1]{1_\text{e}}\newcommand{\Pe}[1]{p_{\text{e},\hmhsp1}}%%\newcommand{\Map}[2][]{\mathcal{2}^{1}}%%\newcommand{\pgc}{p_\text{gc}}\newcommand{\xgc}{x_\text{gc}}\newcommand{\ygc}{y_\text{gc}}$$""" Python preamble """ %matplotlib inline print mp """ Python preamble (cont.) """ from __future__ import division import numpy as np import math import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import LogNorm import matplotlib as mpl from scipy.constants import pi from scipy.constants import speed_of_light as clight from scipy.constants import epsilon_0 as eps0 from scipy.constants import mu_0 as mu0 from scipy.constants import elementary_charge as qe from scipy.constants import electron_mass as me from scipy.constants import proton_mass as mp from scipy.constants import Boltzmann as kB fourPiEps0 = 4 * pi * eps0 invFourPiEps0 = 1 / fourPiEps0 """ reset some default options """ np.set_printoptions(linewidth=96) """ indexing """ (Ix, Ipx, Iy, Ipy, Iz, Ipz) = range(6) """ prefixes """ (femto, pico, nano, micro, milli, one, kilo, mega, giga, tera, peta) = \ 10. ** np.asarray(range(-15, 15+1, 3))We define the ion charge and mass here as global parameters. We do the same for the magnetic field strength $B$ and the thermal velocity $v_\text{th}$. Then we compute various related derived quantities.""" angular frequency of Larmor rotations NB: This is a *signed* quantity, which means that for electrons, say, you must set Z = -1. """ def omega_Larmor(mass, B, Z = 1): return Z * qe * B / mass Z_ion = 1 M_ion = mp B_mag = 1. # Tesla e_temp = 300. # Kelvin N_gyro = 100 # a somewhat arbitrary choice, range [100, 160] """ derived quantities """ V_th = math.sqrt(2 * kB * e_temp / me) rho_gc = me * V_th / (qe * B_mag) Omega_e = omega_Larmor(me, B_mag, Z = -1) T_e = (2 * pi) / abs(Omega_e) T_intxn = N_gyro * T_e print "V_th = ", V_th print "rho_gc / µm = ", rho_gc / micro print "Omega_e / s^(-1) = ", Omega_e print "frequency / GHz = ", Omega_e / (2 * pi) / giga print "T_e / ns = ", T_e / nano print "T_intxn / ns = ", T_intxn / nanoV_th = 95361.4171888 rho_gc / µm = 0.542189740332 Omega_e / s^(-1) = -1.7588200236e+11 frequency / GHz = -27.9924900765 T_e / ns = 0.0357238672682 T_intxn / ns = 3.57238672682Two-body Magnetized CollisionsThe Hamiltonian for a two-body interaction between an ion and a magnetized electron is$$\vphantom{\Big]} H(\vQion, \vPion, \vQe, \vPe) = H_0(\vPion, \Qe{y}, \vPe) + H_\text{C}(\vQion, \vQe)$$where$$\begin{align} H_0(\vPion, \Qe{y}, \vPe) &= \frac{1}{2\Mion}\bigl(\Pion{x}^2 + \Pion{y}^2 + \Pion{z}^2\bigr) + \frac{1}{2\Me}\bigl((\Pe{x} + e B \Qe{y})^2 + \Pe{y}^2 + \Pe{z}^2\bigr),\\[1ex] H_\text{C}(\vQion, \vQe) &= -\frac{Ze^2}{4\pi\varepsilon_0} \big/ {\sqrt{(\Qion{x}-\Qe{x})^2 + (\Qion{y}-\Qe{y})^2 + (\Qion{z}-\Qe{z})^2}},\end{align}\\[1ex]$$and $e$ denotes the elementary quantum of charge.The simplest second-order scheme for integrating this system uses a *split-operator* approach: We approximate the total map $\Map{M}$ for a time step of size $h$ by the symmetric form$$\vphantom{\Big]} \Map{M}(h) \approx \Map{M}_0(h/2) \Map{M}_C(h) \Map{M}_0(h/2)$$where $\Map{M}_0$ and $\Map{M}_C$ are the exact maps for the Hamiltonians $H_0$ and $H_C$ respectively. The map $\Map{M}_0$ is a simple linear map. The map $\Map{M}_C$ generates a nonlinear kick of both ion and electron momenta. Hamiltonians for Two-body Magnetized Collisions""" Hamiltonian for free ion and electron in a magnetic field, under the assuption that the ion is unaffected by that magnetic field. Arguments: z_i (ndArray): 6 x N array of canonical coördinates and conjugate momenta for the ions z_e (ndArray): 6 x N array of canonical coördinates and conjugate momenta for the electrons In both of the above arrays, the six phase-space variables are given in the order(x, px, y, py, z, pz) Return: the total 'free' energy of each ion-electron pair """ def H_twobody_0(z_i, z_e): ham_i = ((z_i[Ipx,:] ** 2 + z_i[Ipy,:] ** 2 + z_i[Ipz,:] ** 2) / (2 * M_ion)) ham_e = ((z_e[Ipx,:] + (-qe) * B_mag * z_e[Iy,:]) ** 2 + z_e[Ipy,:] ** 2 + z_e[Ipz,:] ** 2) / (2 * me) return ham_i + ham_e """ Hamiltonian for the interaction of each ion-electron pair. """ def H_twobody_C(z_i, z_e): g_ie = -(Z_ion * qe ** 2) / (4 * pi * eps0) intxn = g_ie / np.sqrt( + (z_i[Ix,:] - z_e[Ix,:]) ** 2 + (z_i[Iy,:] - z_e[Iy,:]) ** 2 + (z_i[Iz,:] - z_e[Iz,:]) ** 2) return intxn """ Total Hamiltonian for each ion-electron pair. """ def H_twobody(z_i, z_e): ham_0 = H_twobody_0(z_i, z_e) ham_C = H_twobody_C(z_i, z_e) return ham_0 + ham_CMaps for Two-body Magnetized Collisions""" define transfer maps for ions and electrons There are three maps to define here: one each for ions and electrons under H_0, and another """ """ matrix for a linear drift """ def MatD(mass, h): Mdrift = np.identity(6) for i in (Ix, Iy, Iz): Mdrift[i, i + 1] = h / mass return Mdrift """ matrix for linear electron dynamics in a solenoidal field """ def MatK0_e(h): mw = me * Omega_e wh = Omega_e * h cwh = math.cos(wh) swh = math.sin(wh) cwh1m = 2 * math.sin(wh / 2) ** 2 # 1 - cos(a) = 2 sin^2(a / 2) MK0 = np.identity(6) MK0[Iy, Iy ] = cwh MK0[Ipy, Ipy] = cwh MK0[Iy, Ipy] = swh / mw MK0[Ipy, Iy ] = -mw * swh MK0[Iz, Ipz] = h / me MK0[Ix, Ipx] = swh / mw MK0[Ix, Iy ] = swh MK0[Ix, Ipy] = cwh1m / mw MK0[Iy, Ipx] = -cwh1m / mw MK0[Ipy, Ipx] = -swh return MK0 """ map phase-space coördinates forward in time by amount h based on the Hamiltonian H_0, which describes the free motion of ions and the motion of electrons in a solenoidal magnetic field """ def MapZ_0(h, z_i, z_e): mat = MatD(M_ion, h) zf_i = mat.dot(z_i) mat = MatK0_e(h) zf_e = mat.dot(z_e) return zf_i, zf_e """ map phase-space coördinates forward in time by amount h based on the Hamiltonian H_C, which describes the collision between a single ion-electron pair """ def MapZ_C(h, z_i, z_e): g = h * Z_ion * qe ** 2 / (4 * pi * eps0) dz = z_i - z_e denom = (dz[Ix,:] ** 2 + dz[Iy,:] ** 2 + dz[Iz,:] ** 2) ** (3/2) zf_i = z_i.copy() zf_e = z_e.copy() for ip in (Ipx, Ipy, Ipz): zf_i[ip,:] = z_i[ip,:] - g * dz[ip - 1] / denom zf_e[ip,:] = z_e[ip,:] + g * dz[ip - 1] / denom return zf_i, zf_e def apply_MapZ_0(h, n, z_i, z_e): mat_i = MatD(M_ion, h) mat_e = MatK0_e(h) zf_i = [z_i] zf_e = [z_e] for i in range(n): z_i = mat_i.dot(z_i) z_e = mat_e.dot(z_e) zf_i.append(z_i) zf_e.append(z_e) return np.asarray(zf_i), np.asarray(zf_e) """ second-order split-operator integration for the total Hamiltonian """ def apply_MapZ(h, n, z_i, z_e): hh = 0.5 * h mat_i = MatD(M_ion, hh) mat_e = MatK0_e(hh) zf_i = [z_i] zf_e = [z_e] for i in range(n): z_i = mat_i.dot(z_i) z_e = mat_e.dot(z_e) z_i, z_e = MapZ_C(h, z_i, z_e) z_e = mat_e.dot(z_e) z_i = mat_i.dot(z_i) zf_i.append(z_i) zf_e.append(z_e) return np.asarray(zf_i), np.asarray(zf_e)Guiding-center Coördinates and $\Theta$-J Coördinates Transformations To and From Guiding-center Coördinates and $\Theta$-J CoördinatesWe transform the electron's transverse phase-space coördinates using the type-1 generating function$$ F_1(x,y;\, \phi,\ygc) = m\Omega\Bigl[\frac{1}{2}(y - \ygc)^2\cot\phi - y \ygc\Bigr].$$This yields the following transformation rules: *to* guiding-center coördinates$$\begin{align} m\Omega &= qB_0, \quad\text{(this is a signed quantity)}\\[1ex] \phi &= \arctan\Bigl(\frac{p_x + e B y}{p_y}\Bigr),\\[1ex] p_\phi &= \frac{1}{2m\Omega}\bigl[(p_x + m\Omega y)^2 + p_y^2\bigr],\\[1ex] \ygc &= -\frac{p_x}{m\Omega},\\[1ex] \pgc &= p_y + m\Omega x.\end{align}$$*from* guiding-center coördinates$$\begin{align} r_L &= \frac{1}{m\Omega}\sqrt{2m\Omega\,p_\phi}, \quad\text{(this is a signed quantity)}\\[1ex] x &= \frac{\pgc}{m\Omega} - r_L\cos\phi,\\[1ex] p_x &= -m\Omega\,\ygc,\\[1ex] y &= \ygc + r_L\sin\phi,\\[1ex] p_y &= m\Omega\,r_L\cos\phi.\end{align}$$We also require the transformation to and from the coördinates $\Theta$-J:$$\begin{align} \Theta &= \dotsb, \\ J &= p_\phi + \frac{Ze^2}{4\pi\varepsilon_0} \frac{r_L}{\Omega} \frac{(\Qion{x}-\xgc)\cos\phi - (\Qion{y}-\ygc)\sin\phi}{% \bigl[(\Qion{x}-\Qe{x})^2 + (\Qion{y}-\Qe{y})^2 + (\Qion{z}-\Qe{z})^2 + r_L^2\bigr]^{3/2}}.\end{align}$$$$\begin{align} \phi &= \dotsb, \\ p_\phi &= \dotsb.\end{align}$$""" convert to guiding-center coordinates """ def toGuidingCenter(z_e): mOmega = me * Omega_e zgc = z_e.copy() zgc[Ix,:] = np.arctan2(z_e[Ipx,:] + mOmega * z_e[Iy,:], z_e[Ipy,:]) zgc[Ipx,:] = (((z_e[Ipx,:] + mOmega * z_e[Iy,:]) ** 2 + z_e[Ipy,:] ** 2) / (2 * mOmega)) zgc[Iy,:] = - z_e[Ipx,:] / mOmega zgc[Ipy,:] = z_e[Ipy,:] + mOmega * z_e[Ix,:] return zgc """ convert from guiding-center coordinates """ def fromGuidingCenter(zgc): mOmega = me * Omega_e rhoL = np.sqrt(2 * mOmega * zgc[Ipx,:]) / mOmega z_e = zgc.copy() z_e[Ix,:] = zgc[Ipy,:] / mOmega - rhoL * np.cos(zgc[Ix,:]) z_e[Ipx,:] = - mOmega * zgc[Iy,:] z_e[Iy,:] = zgc[Iy,:] + rhoL * np.sin(zgc[Ix,:]) z_e[Ipy,:] = mOmega * rhoL * np.cos(zgc[Ix,:]) return z_e """ return J(z_gc) coordinates using the (probably correct) minus sign """ def actionJ(z_i, zgc): g = Z_ion * qe ** 2 / (4 * pi * eps0) mOmega = me * Omega_e rhoL = np.sqrt(2 * zgc[Ipx,:] / mOmega) num = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) * np.cos(zgc[Ix,:]) - (z_i[Iy,:] - zgc[Iy,:]) * np.sin(zgc[Ix,:])) den = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2 + (z_i[Iy,:] - zgc[Iy,:]) ** 2 + (z_i[Iz,:] - zgc[Iz,:]) ** 2 + rhoL ** 2) ** (3/2) return zgc[Ipx,:] + g * (rhoL / Omega_e) * num / den """ return the Larmor radius """ def rLarmor(z_e): mOmega = me * Omega_e return np.sqrt((z_e[Ipx,:] + mOmega * z_e[Iy,:]) ** 2 + z_e[Ipy,:] ** 2) / mOmega """ return the Larmor radius """ def rLarmor_gc(zgc): mOmega = me * Omega_e return np.sqrt(2 * mOmega * zgc[Ipx,:]) / mOmega """ return the perturbation ratio (uses the minus sign) """ def pertubationRatio(z_i, zgc): mOmega = me * Omega_e rhoL = np.sqrt(2 * mOmega * zgc[Ipx,:]) / mOmega num = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) * np.cos(zgc[Ix,:]) - (z_i[Iy,:] - zgc[Iy,:]) * np.sin(zgc[Ix,:])) den = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2 + (z_i[Iy,:] - zgc[Iy,:]) ** 2 + (z_i[Iz,:] - zgc[Iz,:]) ** 2 + rhoL ** 2) return 2 * rhoL * num / den """ return the ratio (impact parameter) / (Larmor radius) """ def impactParamOverRL(z_i, zgc): mOmega = me * Omega_e rhoL = np.sqrt(2 * mOmega * zgc[Ipx,:]) / mOmega b = np.sqrt((z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2 + (z_i[Iy,:] - zgc[Iy,:]) ** 2) return b / rhoLHamiltonians using Guiding-center Coördinates""" Hamiltonian for free ion and electron in a magnetic field, under the assuption that the ion is unaffected by that magnetic field. """ def H_gc_0(z_i, zgc): ham_i = ((z_i[Ipx,:] ** 2 + z_i[Ipy,:] ** 2 + z_i[Ipz,:] ** 2) / (2 * M_ion)) ham_e = Omega_e * actionJ(z_i, zgc) + zgc[Ipz,:] ** 2 / (2 * me) return ham_i + ham_e """ Hamiltonian for the ion-electron interaction in guiding-center (Θ,J) coördinates. """ def H_gc_C(z_i, zgc): g_ie = (Z_ion * qe ** 2) / (4 * pi * eps0) mOmega = me * Omega_e intxn = -g_ie / np.sqrt( + (z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2 + (z_i[Iy,:] - zgc[Iy,:]) ** 2 + (z_i[Iz,:] - zgc[Iz,:]) ** 2 + 2 * actionJ(z_i, zgc) / mOmega) return intxn """ total Hamiltonian for the ion-electron system in GC coördinates """ def H_gc(z_i, zgc): ham_0 = H_gc_0(z_i, zgc) ham_C = H_gc_C(z_i, zgc) return ham_0 + ham_C """ Hamiltonian for the ion-electron interaction in guiding-center (Θ,J) coördinates. """ def H_gc_Cp(z_i, zgc): g_ie = (Z_ion * qe ** 2) / (4 * pi * eps0) mOmega = me * Omega_e intxn = -g_ie / np.sqrt( + (z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2 + (z_i[Iy,:] - zgc[Iy,:]) ** 2 + (z_i[Iz,:] - zgc[Iz,:]) ** 2 + 2 * actionJp(z_i, zgc) / mOmega) return intxn """ total Hamiltonian for the ion-electron system in GC coördinates """ def H_gc_m(z_i, zgc): ham_0 = H_gc_0(z_i, zgc) ham_C = H_gc_Cm(z_i, zgc) return ham_0 + ham_CMaps using Guiding-center Coördinates""" define transfer maps for ions and electrons There are three maps to define here: one each for ions and electrons under H_0, and another for the interaction under H_c """ """ Map phase-space coördinates forward in time by amount h. This map is based on the Hamiltonian H_gc_0, which describes the free motion of ions and the averaged motion of electrons in a solenoidal magnetic field. NB: We do not update the \Theta coördinate, as it does not contribute to the dynamics of any other variables. """ def MapZgc_0(h, z_i, zgc): matD = MatD(M_ion, h) zf_i = matD.dot(z_i) zf_e = zgc.copy() zf_e[Iz,:] += (h / me) * zgc[Ipz,:] return zf_i, zf_e """ Map phase-space coördinates forward in time by amount h. This map is based on the Hamiltonian H_gc_C, which describes the collision between a single ion-electron pair in guiding-center coördinates. NB: We do not update the \Theta coördinate, as it does not contribute to the dynamics of any other variables. """ def MapZgc_C(h, z_i, zgc): g_ie = Z_ion * qe ** 2 / (4 * pi * eps0) mOmega = me * Omega_e dr3 = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2 + (z_i[Iy,:] - zgc[Iy,:]) ** 2 + (z_i[Iz,:] - zgc[Iz,:]) ** 2 + (2 / mOmega) * actionJ(z_i, zgc)) ** (3/2) Omega_gc = (g_ie / mOmega) / dr3 S = np.sin(Omega_gc * h) C1 = 2 * np.sin(Omega_gc * (h / 2)) ** 2 zf_i = z_i.copy() zf_e = zgc.copy() Dxgc = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) * C1 + (z_i[Iy,:] - zgc[Iy,:]) * S) Dygc = ((z_i[Iy,:] - zgc[Iy,:]) * C1 - (z_i[Ix,:]- zgc[Ipy,:] / mOmega) * S) Dpz = (Omega_gc * h) * mOmega * (z_i[Iz,:] - zgc[Iz,:]) zf_i[Ipx,:] += mOmega * Dygc zf_i[Ipy,:] -= mOmega * Dxgc zf_i[Ipz,:] -= Dpz zf_e[Iy,:] += Dygc zf_e[Ipy,:] += mOmega * Dxgc zf_e[Ipz,:] += Dpz return zf_i, zf_e def apply_MapZgc_0(h, n, z_i, zgc): mat_i = MatD(M_ion, h) mat_e = np.identity(6) mat_e[Iz, Ipz] = h / me zf_i = [z_i] zf_e = [zgc] for i in range(n): z_i = mat_i.dot(z_i) zgc = mat_e.dot(zgc) zf_i.append(z_i) zf_e.append(zgc) return np.asarray(zf_i), np.asarray(zf_e) def apply_MapZgc(h, n, z_i, zgc): hh = 0.5 * h mat_i = MatD(M_ion, hh) mat_e = np.identity(6) mat_e[Iz, Ipz] = hh / me zf_i = [z_i] zf_e = [zgc] for i in range(n): z_i = mat_i.dot(z_i) zgc = mat_e.dot(zgc) z_i, zgc = MapZgc_C(h, z_i, zgc) zgc = mat_e.dot(zgc) z_i = mat_i.dot(z_i) zf_i.append(z_i) zf_e.append(zgc) return np.asarray(zf_i), np.asarray(zf_e)Maps using the Magnus Expansion""" compute $\Delta P_\text{ion}$ using the Magnus expansion """ def magnus_deltaPIon(h, z_i, zgc): g_ie = (Z_ion * qe ** 2) / (4 * pi * eps0) mOmega = me * Omega_e xgc = zgc[Ipy,:] / mOmega C1 = ((z_i[Ix,:] - xgc) ** 2 + (z_i[Iy,:] - zgc[Iy,:]) ** 2 + (z_i[Iz,:] - zgc[Iz,:]) ** 2 + (2 / mOmega) * actionJ(z_i, zgc)) C2 = (2 * ((z_i[Ix,:] - xgc) * z_i[Ipx,:] / M_ion + (z_i[Iy,:] - zgc[Iy,:]) * z_i[Ipy,:] / M_ion + (z_i[Iz,:] - zgc[Iz,:]) * (z_i[Ipz,:] / M_ion - zgc[Ipz,:] / me))) C3 = ((z_i[Ipx,:] / M_ion) ** 2 + (z_i[Ipy,:] / M_ion) ** 2 + (z_i[Ipz,:] / M_ion - zgc[Ipz,:] / me) ** 2) B = np.sqrt(C1 + (C2 + C3 * h) * h) Delta = 4 * C1 * C3 - C2 ** 2 D1 = (2 * C3 * h + C2) / B - C2 / np.sqrt(C1) D2 = (C2 * h + 2 * C1) / B - 2 * np.sqrt(C1) dPx = - ((2 * g_ie / Delta) * ((z_i[Ix,:] - xgc) * D1 - (z_i[Ipx,:] / M_ion) * D2)) dPy = - ((2 * g_ie / Delta) * ((z_i[Iy,:] - zgc[Iy,:]) * D1 - (z_i[Ipy,:] / M_ion) * D2)) dPz = - ((2 * g_ie / Delta) * ((z_i[Iz,:] - zgc[Iz,:]) * D1 - (z_i[Ipz,:] / M_ion - zgc[Ipz,:] / me) * D2)) return np.asarray(( dPx, dPy, dPz)).T """ For the Magnus computation to work well, the interaction must be perturbative. This function return a value for the minimum impact parameter, above which the interaction becomes perturbative. """ def minImpactParam(magB, EkOverEV, bOverRL): numer = 2 * EkOverEV * (invFourPiEps0 * Z_ion * me) denom = ((1 / bOverRL) ** 2 + (tan_alpha / (N_gyro * pi)) ** 2) * magB ** 2 return (numer / denom) ** (1/3)Conversion from (*Q*,*V*) to (*Q*,*P*)""" define matrix that transforms ion coördinate-velocity data to canonically conjugate phase-space variables """ QVtoZion = np.identity(6) for i in (Ipx, Ipy, Ipz): QVtoZion[i, i] = M_ion """ define matrix that transforms electron coördinate-velocity data to canonically conjugate phase-space variables NB: This depends on the local magnetic vector potential, hence on the local magnetic field. """ QVtoZe = np.identity(6) for i in (Ipx, Ipy, Ipz): QVtoZe[i, i] = me QVtoZe[Ipx, Iy] = (-qe) * (-B_mag)Explore a range of values for the perturbation parametersZ_ion = 1 M_ion = mp e_temp = 300. # Kelvin B_mag = 1. # Tesla N_gyro = 100 # enforce adequate averaging tan_alpha = 5.0 # and an adequate opening angle """ derived quantities """ Omega_e = omega_Larmor(me, B_mag, Z = -1) T_e = (2 * pi) / abs(Omega_e) bOverLarmorR = 20.0 # 20 ==> max perturbation ratio of about 0.1 EkinOverVmax = 10.0 # 10 ==> eV_closest_approach / E_kinetic = 0.1 print minImpactParam(B_mag, EkinOverVmax, bOverLarmorR) / micro decades = 2 pts_per_decade = 3 logBs = np.linspace(0., 1. * float(decades), num = 1 + pts_per_decade * decades, endpoint = True) bvals = np.power(10, logBs) impactParameterB = micro * bvals print "b / μm = ", impactParameterB / micro print "b_min / μm =", minImpactParam(B_mag, EkinOverVmax, bOverLarmorR) / micro """ (the following depend on the impact parameter) """ LarmorR = impactParameterB / bOverLarmorR # (this version is defined positive) V_perp = - LarmorR * Omega_e L_intxn = tan_alpha * impactParameterB #[-1] * np.ones_like(impactParameterB) V_z = 2 * L_intxn / (N_gyro * T_e) T_intxn = 2 * L_intxn / V_z num_steps_per_gyro = 40 delta_Te = T_e / num_steps_per_gyro print T_intxn / delta_Te num_steps = int(np.floor(T_intxn[0] / delta_Te)) print "num_steps =", num_steps """ initial condition for the ion --- just one, for now """ QVion = np.array([ ( 0.0 * rho_gc, 0.000 * V_th, 0.0 * rho_gc, 0.000 * V_th, 0.0, 0.000 * V_th) ]).transpose() Zion = QVtoZion.dot(QVion) """ initial conditions for the electrons """ QVelec = np.asarray([ np.zeros(impactParameterB.shape), V_perp, impactParameterB - LarmorR, np.zeros(impactParameterB.shape), - L_intxn, L_intxn * abs(Omega_e) / (pi * N_gyro) ]) Zelec = QVtoZe.dot(QVelec) num_elec0 = Zelec.shape[1] num_ion0 = Zion.shape[1] """ === create arrays of electrons and ions === Here we arrange them so that we can pair each ion with each electron, and compute the \Delta{p} for each interaction. """ ZI_elec = np.hstack([Zelec for e in range(num_ion0)]) ZI_ion = Zion[:, np.arange(num_ion0 * num_elec0) // num_elec0] num_elec = ZI_elec.shape[1] num_ion = ZI_ion.shape[1] ZF_i, ZF_e = apply_MapZ(delta_Te, num_steps, ZI_ion, ZI_elec) navg = 1 # number of gyrotron oscillations over which to average; set to 1, 2, 4, 5, or 10 ZI_elec_gc = toGuidingCenter(ZI_elec) ZFgc_i, ZFgc_e = apply_MapZgc(navg * 40*delta_Te, num_steps//40 // navg, ZI_ion, ZI_elec_gc) deltaP_exp = np.array(ZF_i[-1, [Ipx, Ipy, Ipz], :] - ZF_i[0, [Ipx, Ipy, Ipz], :]).T deltaP_avg = np.array(ZFgc_i[-1, [Ipx, Ipy, Ipz], :] - ZFgc_i[0, [Ipx, Ipy, Ipz], :]).T deltaP_mgn = magnus_deltaPIon(T_intxn, ZI_ion, ZI_elec_gc) print deltaP_exp print print deltaP_avg print print deltaP_mgn dDeltaP_ax = deltaP_avg - deltaP_exp dDeltaP_mx = deltaP_mgn - deltaP_exp relErr_avg = (np.linalg.norm(dDeltaP_ax, axis = 1) / np.linalg.norm(deltaP_exp, axis = 1)) relErr_mgn = (np.linalg.norm(dDeltaP_mx, axis = 1) / np.linalg.norm(deltaP_exp, axis = 1)) eV_closest_approach = (invFourPiEps0 * Z_ion * qe ** 2 / impactParameterB) E_kinetic_e = (me / 2) * (V_perp ** 2 + V_z ** 2) eVcaOverEkin = eV_closest_approach / E_kinetic_e llres = plt.figure() plt.loglog(eVcaOverEkin, relErr_avg, 'bo') plt.loglog(eVcaOverEkin, relErr_mgn, 'rs') plt.annotate(s="Averging",xy=(1.e-1,1.e-3)) plt.annotate(s="Magnus",xy=(1.e-2,5.e-1)) plt.xlabel("$eV_{\mathrm{ca}} / E_{\mathrm{kin}}$") plt.ylabel("relative error in $\Delta{P}_{\mathrm{ion}}$") plt.show() llres.savefig("/Users/dabell/Desktop/foo.pdf") mae_compare.savefig("/Users/dabell/RadiaSoft/MCool/MgnAvgExpCompare.pdf")Coadd script# imports import glob import pdb import matplotlib.pyplot as plt from scipy.interpolate import interp1d from astropy.table import Table, vstack from xastropy.xutils import xdebug as xdbTestingx1d_path = os.getenv('DROPBOX_DIR')+'/COS-LRG/tmp/' x1d_files = glob.glob(x1d_path+'*x1d.fits')Check an x1d filetbl = Table.read(x1d_files[0]) tbl[0:1]Segment A# Load sega_tbls = [] for x1d_file in x1d_files: tbl = Table.read(x1d_file) sega_tbls.append(tbl[0:1]) # Grab one wavelength array wave = sega_tbls[0]['WAVELENGTH'][0,:].data wave[0:5] # Sum exposure time total_time = np.zeros_like(wave) for sega_tbl in sega_tbls: total_time += sega_tbl['DQ_WGT'][0,:]*sega_tbl['EXPTIME'] #xdb.xhist(total_time) # Find DQmin for all exposures -- Why are we doing this step?? dqmin = np.ones_like(wave).astype(int) * 99999 for sega_tbl in sega_tbls: # Reset DQ dq = sega_tbl['DQ'][0,:].data reset_1024 = dq == 1024 dq[reset_1024] = 2 dqmin = np.minimum(dq, dqmin) # Find DQ_WGT max for all exposures DQWmax = np.zeros_like(wave) for sega_tbl in sega_tbls: # Reset DQ dqw = sega_tbl['DQ_WGT'][0,:].data DQWmax = np.maximum(dqw, DQWmax) #xdb.xhist(dqwmax) # Generate calib values total_counts = np.zeros_like(wave) for sega_tbl in sega_tbls: # total_counts += DQWmax * sega_tbl['GCOUNTS'][0,:] xdb.xplot(wave, total_counts)Calibration# Calibration wave_calib, calib = [], [] for sega_tbl in sega_tbls: # gddq = (sega_tbl['DQ'] > 0) & (sega_tbl['FLUX'] > 0) # Append wave_calib.append(sega_tbl['WAVELENGTH'][gddq].data.flatten()) calib.append( (sega_tbl['NET'][gddq] / sega_tbl['FLUX'][gddq]).data) #xdb.xhist(total_counts) wave_calib = np.concatenate(wave_calib) calib = np.concatenate(calib) # sort srt = np.argsort(wave_calib) wave_calib = wave_calib[srt] calib = calib[srt] xdb.xplot(wave_calib, calib) # Cut down gdwv = wave_calib < 2100. # Anything above that is junk # Spline f = interp1d(wave_calib[gdwv], calib[gdwv], bounds_error=False, fill_value=0.) # cubic behaves badly plt.clf() ax = plt.gca() ax.scatter(wave_calib[gdwv], calib[gdwv]) ax.plot(wave, f(wave)) plt.show()モデルの確認- ランダムカットフォレストのモデルでバッチ変換- 推論結果を可視化 パラメータtraining_job_name = 'machine-temperature-iot-training-job' labeled_test_data_s3_path = 's3://bucket-name/machine_temperature_iot/test.csv' output_data_s3_path = 's3://bucket-name/machine_temperature_iot/transform' shingle_size = 288バッチ変換用にデータ形式を変更テスト用データのラベル部分を削除して、バッチ変換に使える形にした上で、S3にアップロードするimport pandas as pd transform_input_data_local_path = '/tmp/test_transform.csv' pd.read_csv(labeled_test_data_s3_path, header=None)\ .drop(columns=0)\ .to_csv(transform_input_data_local_path, header=None, index=None) from os import path transform_input_data_s3_path = path.join(path.dirname(labeled_test_data_s3_path), 'test_transform.csv') !aws s3 cp $transform_input_data_local_path $transform_input_data_s3_pathバッチ変換from sagemaker.estimator import Estimator model = Estimator.attach(training_job_name=training_job_name) transformer = model.transformer( instance_count=1, instance_type='ml.m4.xlarge', strategy='MultiRecord', assemble_with='Line', output_path=output_data_s3_path ) transformer.transform( transform_input_data_s3_path, content_type='text/csv', split_type='Line' ) transformer.wait()推論結果を取得temp_path = '/tmp/transform_output/' !aws s3 sync $transformer.output_path $temp_path from os import path # バッチ変換の結果ファイルは入力ファイル名に.outが付いている output_path = path.join(temp_path, path.basename(transform_input_data_s3_path))+'.out' !head $output_path import json with open(output_path) as f: lines = f.readlines() scores = list(map(lambda l : json.loads(l)['score'], lines)) import pandas as pd df = pd.read_csv(labeled_test_data_s3_path, header=None) # 異常度スコアを入れる df_format = pd.DataFrame(data={ 'is_anomaly': df.iloc[:, 0], # 異常値かどうか 'value': df.iloc[:, shingle_size], # 温度の値 'score': scores, # 異常度スコア 'anomaly_threshold': [0]*len(df) # 異常度の閾値(この後計算する) })異常度スコアの閾値を計算score_mean = df_format.score.mean() score_std = df_format.score.std() score_cutoff = score_mean + 1 * score_std df_format['anomaly_threshold'] = pd.Series([score_cutoff]*len(df_format), df_format.index)結果を可視化import numpy as np import matplotlib.pyplot as plt import seaborn as sns # pyplotで描画する図を綺麗にする sns.set() # 図がインラインで描画されるようにする(JupyterLabでは不要) %matplotlib inline fig, ax1 = plt.subplots() ax2 = ax1.twinx() ax1.plot(df_format.value, color='C0', alpha=0.7) ax2.plot(df_format.score, color='C1', alpha=0.7) # 異常値のラベルデータ anomalies_true = df_format[df_format.is_anomaly == 1] ax1.plot(anomalies_true.value, 'ko' ) # 推論による異常値 anomalies_infer = df_format.score[df_format.score >= score_cutoff] ax2.plot(anomalies_infer, 'ro' ) ax2.plot(df_format.anomaly_threshold, 'r', alpha=0.5) ax1.grid(which='major', axis='both') ax1.set_ylabel('Machine Temperature', color='C0') ax2.set_ylabel('Anomaly Score', color='C1') ax1.tick_params('y', colors='C0') ax2.tick_params('y', colors='C1') ax1.set_ylim(0, max(df_format.value)) ax2.set_ylim(min(df_format.score), 1.5*max(max(df_format.score), score_cutoff)+1) fig.set_figwidth(12) plt.show()Minimization functionfrom biorefineries import lipidcane2g as lc import biosteam as bst import numpy as np import pandas as pd import matplotlib.pyplot as plt from biosteam.utils import colors from math import floor, ceil from biosteam import plots from biosteam.utils import CABBI_colors from thermosteam.units_of_measure import format_units from biosteam.plots.utils import style_axis, style_plot_limits, fill_plot, set_axes_labels shadecolor = (*colors.neutral.RGBn, 0.30) linecolor = (*colors.neutral_shade.RGBn, 0.85) markercolor = (*colors.CABBI_blue_light.RGBn, 1) edgecolor = (*colors.CABBI_black.RGBn, 1) def tickmarks(data, accuracy=50, N_points=5): dmin = data.min() dmax = data.max() dmin = floor(dmin/accuracy) * accuracy dmax = ceil(dmax/accuracy) * accuracy step = (dmax - dmin) / (N_points - 1) if step == 0: return [0, 1] else: return [dmin + step * i for i in range(N_points)] def create_inflection_plot(ax, name='1g', load=False, save=True): lipid_retention = np.linspace(0.5, 1.0, 10) if load: try: efficiency_inflection = np.load(f'lipid_extraction_efficiency_inflection{name}.npy') except: return create_inflection_plot(ax, name, False, save) else: lc.load(name) efficiency_inflection = np.array([ lc.lipid_extraction_specification.solve_MFPP_inflection(i) for i in lipid_retention ]) save and np.save(f'lipid_extraction_efficiency_inflection{name}', efficiency_inflection) mask = ~np.isnan(efficiency_inflection) ax.plot(100 * lipid_retention[mask], 100 * efficiency_inflection[mask], label=str(name)) fig, ax = plt.subplots() create_inflection_plot(ax, 0, load=False) create_inflection_plot(ax, 1, load=False) create_inflection_plot(ax, 2, load=False) create_inflection_plot(ax, 3, load=False) plt.xlabel('Lipid retention [%]') plt.ylabel('MFPP-Extraction efficiency inflection') plt.legend() plt.show() lc.load(0) lipid_retention = np.linspace(0.5, 1.0, 10) efficiency_inflection = np.array([ lc.lipid_extraction_specification.solve_MFPP_inflection(i) for i in lipid_retention ]) maskTensorFlow Convolutional Neural Network for Classification of MNIST Handwritten Digit Dataset by [](https://sites.google.com/view/bvsk35/home?authuser=0) Part 1 IntroductionHere I have created a basic code to show the how to use TensorFlow and buidl CNN for classification of MNIST Handwritten digit dataset. We can achieve accuracy of 97% using fully connected nerual network. But using simple CNN we can achieve accuracy over 99%.Convolutional Networks work by moving small filters across the input image. This means the filters are re-used for recognizing patterns throughout the entire input image. This makes the Convolutional Networks much more powerful than Fully-Connected networks with the same number of variables. This in turn makes the Convolutional Networks faster to train. CNNsThese are inspired from the Wiesel and Hubel research on analysis of visual cortex, development of the visual system and description of ocular dominance columns for which they have won Nobel Prize.Below image is representation of convolutional neural networks in LeNet style (The LeNet architecture was first introduced by LeCun et al. in their 1998 paper, Gradient-Based Learning Applied to Document Recognition). If input image is Black & weight i.e. binary then it has only single channel i.e. only set of pixel values will be present. If input image is a Color image then we have three channels i.e. one set of pixels for primary colors RGB. If we apply `n` different set of filters on any kind of image (B&W or Color) then we end up with `n` different outputs i.e. each filter gives one output. Each of the `n` filters can have `m` subset of filters based on no. of channels for example if image is B&W then we have `1` channel hence `m = 1`, if color then we have `3` channels hence `m = 3`. Also, after applying first convolution layer on given image (B&W or Color) we can have output with `p` no. of channels, then in that case `m` for next layer will be equal to `p`.In below picture we can see that we start with 3 channel image and 128 x 128 pixels. Then we apply filter of some size (it can be anything) since we have 3 channels in the image each filter we apply will have 3 channels and we have 16 of those. Size of the output remains same because we can pad the input to maintain size of input and ouput to be same. In each convolutional layer some kind of activation function is applied. Calculation of the output of convolutional is simple, it is nothing but dot product of the filter applied on the image. After this people do max-pooling (sometimes location of the feature is not important) to decrease the resolution of the image. This is done by taking max value of the pixel in that image. We can other variants like L2 pooling etc. There is no activation function after pooling operation. This process is repeated until we extracted the important information/features from the images. Finally we have a fully-connected layer for doing the classification. This process reduces no. of trainable parameters required drastically [link](http://cs231n.github.io/convolutional-networks/). ![alt text](Images/4.png "Fig.1") Convolutional LayerThe following chart shows the basic idea of processing an image in the first convolutional layer. The input image depicts the number 7 and four copies of the image are shown here, so we can see more clearly how the filter is being moved to different positions of the image. For each position of the filter, the dot-product is being calculated between the filter and the image pixels under the filter, which results in a single pixel in the output image. So moving the filter across the entire input image results in a new image being generated.The red filter-weights means that the filter has a positive reaction to black pixels in the input image, while blue pixels means the filter has a negative reaction to black pixels.In this case it appears that the filter recognizes the horizontal line of the 7-digit, as can be seen from its stronger reaction to that line in the output image.![alt text](Images/1.png "Fig.2")The step-size for moving the filter across the input is called the stride. There is a stride for moving the filter horizontally (x-axis) and another stride for moving vertically (y-axis).In the code below, the stride is set to 1 in both directions, which means the filter starts in the upper left corner of the input image and is being moved 1 pixel to the right in each step. When the filter reaches the end of the image to the right, then the filter is moved back to the left side and 1 pixel down the image. This continues until the filter has reached the lower right corner of the input image and the entire output image has been generated.When the filter reaches the end of the right-side as well as the bottom of the input image, then it can be padded with zeroes (white pixels). This causes the output image to be of the exact same dimension as the input image.Furthermore, the output of the convolution may be passed through a so-called Rectified Linear Unit (ReLU), which merely ensures that the output is positive because negative values are set to zero. The output may also be down-sampled by so-called max-pooling, which considers small windows of 2x2 pixels and only keeps the largest of those pixels. This halves the resolution of the input image e.g. from 28x28 to 14x14 pixels.Note that the second convolutional layer is more complicated because it takes 16 input channels. We want a separate filter for each input channel, so we need 16 filters instead of just one. Furthermore, we want 36 output channels from the second convolutional layer, so in total we need 16 x 36 = 576 filters for the second convolutional layer. It can be a bit challenging to understand how this works. FlowchartThe following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below. **Note:** below flow chart shows everything correct but the weights shown in the picture are stacked based on channel not the filter i.e., if each (6, 6) weight image grid shown in the color corresponds to one channel stream. For example take convolution layer 2: it must be interpreted as each color image of the weight presented in first row and first column in the first grid combined with the all 16 makes one filter. Hence by combining each (row, col) weight element from all grids we end up creating 36 filters with 16 input channels. Look at the last section where you can weights are plotted based on filters. This image is here just for reference. ![alt text](Images/2.png "Fig.3")Flowcharts depicts what has been descibed above. The input image is processed in the first convolutional layer using the filter-weights. This results in 16 new images, one for each filter in the convolutional layer. The images are also down-sampled so the image resolution is decreased from 28x28 to 14x14.These 16 smaller images are then processed in the second convolutional layer. We need filter-weights for each of these 16 channels, and we need filter-weights for each output channel of this layer. There are 36 output channels so there are a total of 16 x 36 = 576 filters in the second convolutional layer. The resulting images are down-sampled again to 7x7 pixels.The output of the second convolutional layer is 36 images of 7x7 pixels each. These are then flattened to a single vector of length 7 x 7 x 36 = 1764, which is used as the input to a fully-connected layer with 128 neurons (or elements). This feeds into another fully-connected layer with 10 neurons, one for each of the classes, which is used to determine the class of the image, that is, which number is depicted in the image.The convolutional filters are initially chosen at random, so the classification is done randomly. The error between the predicted and true class of the input image is measured as the so-called cross-entropy. The optimizer then automatically propagates this error back through the Convolutional Network using the chain-rule of differentiation and updates the filter-weights so as to improve the classification error. This is done iteratively thousands of times until the classification error is sufficiently low.Note that the computation in TensorFlow is actually done on a batch of images instead of a single image, which makes the computation more efficient. This means the flowchart actually has one more data-dimension when implemented in TensorFlow. Practical implementationIn theory CNN architecture says `Convolutional Layer - Non-linear Activation - Pooling Layer` but in practice we do `Convolutional Layer - Pooling Layer - Non-linear Activation`. There is nice reasoning for this in the following [link](https://stackoverflow.com/questions/35543428/activation-function-after-pooling-layer-or-convolutional-layer). It says that if we have non-decreasing non-linear activation functions then order doesn't matter becasue they satisfy commutative property. This can be verified by doing experiments and observing that we end up with same amount performance. Also, applying activation function helps to decrease the computation time (it may not matter sometimes because max time spent while doing convolution). **Supress warnings**: When calling *tensorflow* module in Juptyer Notebook it raises warning of *h5py* compliation against wrong version of *hdf5* in Windows OS when using Anaconda. Even though internally all versions are correct this might be due to wrong version written recipe file. **TLDR;** Warnings can be ignored (since everything works properly) and to suppress the warning I am calling this function. But it is not necessary. If you want to see warnings once (which happens by default in Juptyer notebook) uncomment the third line below and comment the second line.import warnings warnings.filterwarnings('ignore') # warnings.filterwarnings(action='once')ImportsImport all the required modules and *Load_MNIST_Data* has a class *MNIST* written by me which has all basic helper functions required.import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix import tensorflow as tf from Load_MNIST_Data import MNISTPart 2 Load DataLoad the MNIST data using *prep_data* which outputs training set, validation set and test set scaled based on the scaling defined by the user. Default is 'MMS': MinMaxScaler and other option is 'SS': StandardScaler. Use *one_hot_encoded* to generate the one hot encoded vectors for training purposes.data = MNIST(scaling='MMS') train_img, train_label, val_img, val_label, test_img, test_label = data.prep_data('train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz') hot_label_train, hot_label_val, hot_label_test = data.one_hot_encoded(train_label, val_label, test_label)The MNIST data-set has now been loaded and consists of 70000 images and labels for the images. The dataset is split into 3 mutually exclusive subsets.print("Size of:") print("- Training-set:\t\t{}".format(data.num_train)) print("- Validation-set:\t{}".format(data.num_val)) print("- Test-set:\t\t{}".format(data.num_test))Size of: - Training-set: 55000 - Validation-set: 5000 - Test-set: 10000Configuration of Neural NetworkThe configuration of the Convolutional Neural Network is defined here for convenience. Copy some of the data-dimensions for convenience.# Convolutional Layer 1. filter_size_1 = 5 # Convolution filters are 5 x 5 pixels. num_filters_1 = 16 # There are 16 of these filters. # Convolutional Layer 2. filter_size_2 = 5 # Convolution filters are 5 x 5 pixels. num_filters_2 = 36 # There are 36 of these filters. # Fully-connected layer. fc_size = 128 # Number of neurons in fully-connected layer. # The number of pixels in each dimension of an image. img_size = data.img_size # The images are stored in one-dimensional arrays of this length. img_size_flat = data.img_size_flat # Tuple with height and width of images used to reshape arrays. img_shape = data.img_shape # Number of classes, one class for each of 10 digits. num_classes = data.num_classes # Number of colour channels for the images: 1 channel for gray-scale. num_channels = data.num_channelsOne-Hot EncodingThe output data is loaded as both integer labels and One-Hot encoded arrays. This means the labels have been converted from a single integer to a vector whose length equals the number of possible classes. All elements of the vector are zero except for the $i^{th}$ element which is 1 and means the class is $i$. For example, the One-Hot encoded labels for the first 5 images in the test-set are:print('One-Hot Encoded vectors: \n', hot_label_test[:5, :])One-Hot Encoded vectors: [[0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]]We also need the classes as integers for various comparisons and performance measures. These can be found from the One-Hot encoded arrays by taking the index of the highest element using the `np.argmax()` function. But this has already been done when the dataset was loaded, so we can see the labels for the first five images in the test set. Compare these to the One-Hot encoded arrays above.print('Labels: \n', test_label[:5])Labels: [7 2 1 0 4]Helper function for PlottingThis function is used to plot images in a nxn grid.def plot_images(images, cls_true, cls_pred=None, color_map='binary'): check = int(np.sqrt(images.shape[0])) try: assert check * check == images.shape[0] except AssertionError: raise AssertionError("Input array first dimension must be square") if check != 1: # Create figure with nxn sub-plots. fig, axes = plt.subplots(check, check) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape(img_shape), cmap=color_map) # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true[i]) else: xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) ax.set_xlabel(xlabel) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) else: fig, axes = plt.subplots() axes.imshow(images.reshape(img_shape), cmap=color_map) # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true) else: xlabel = "True: {0}, Pred: {1}".format(cls_true, cls_pred) axes.set_xlabel(xlabel) # Remove ticks from the plot. axes.set_xticks([]) axes.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show()Plot a few images to see how does dataset looksThis images depends on which kind of scaling you are using. If you use 'SS' scaling then images look as if it has been smudged and if you use 'MMS' scaling then images look as if more crisp **People mostly use 'MMS' scaling** .# Get the first images from the test-set. images = test_img[0:9] # Get the true classes for those images. cls_true = test_label[0:9] # Plot the images and labels using our helper-function above. plot_images(images=images, cls_true=cls_true)Part 3 TensorFlow GraphThe entire purpose of TensorFlow is to have a computational graph that can be executed much more efficiently than if the same calculations were to be performed directly in Python. TensorFlow can be more efficient than NumPy because TensorFlow knows the entire computation graph that must be executed, while NumPy only knows the computation of a single mathematical operation at a time.TensorFlow can also efficiently load tons of real world data present in storage drives (in the format of byte) and perform computations faster than NumPy. TensorFlow can also automatically calculate the gradients that are needed to optimize the variables of the graph so as to make the model perform better. This is because the graph is a combination of simple arithmetic expressions so the gradient of the entire graph can be calculated using the chain-rule for derivatives.TensorFlow can also take advantage of multi-core CPUs as well as GPUs - and Google has even built special chips just for TensorFlow which are called TPUs (Tensor Processing Units) that are even faster than GPUs.A TensorFlow graph consists of the following parts (and many more):* Placeholder: variables used to feed input into the graph.* Model variables: that are going to be optimized so as to make the model perform better.* Model: which is essentially just a mathematical function that calculates some output given the input in the placeholder variables and the model variables.* Cost: measure that can be used to guide the optimization of the variables.* Optimization method: which updates the variables of the model.In addition, the TensorFlow graph may also contain various debugging statements e.g. for logging data to be displayed using TensorBoard. Helper-functions for creating new variablesFunctions for creating new TensorFlow variables in the given shape and initializing them with random values. Note that the initialization is not actually done at this point, it is merely being defined in the TensorFlow graph.`tf.truncated_normal()`: Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.def new_weights(shape): return tf.Variable(tf.truncated_normal(shape, stddev=0.05)) def new_biases(length): return tf.Variable(tf.constant(0.05, shape=[length]))Helper-function for creating a new Convolutional LayerThis function creates a new convolutional layer in the computational graph for TensorFlow. Nothing is actually calculated here, we are just adding the mathematical formulas to the TensorFlow graph.It is assumed that the argument `input_img` passed to the function is a 4-dim tensor with the following dimensions:- Total no. of images.- Y-axis of each image.- X-axis of each image.- Channels of each image.Note that the input channels may either be **colour-channels**, or it may be **filter-channels** if the input is produced from a previous convolutional layer.The `output image` of the convolution layer will be another 4-dim tensor with the following dimensions:- Total no. of images, same as input.- Y-axis of each image. If 2x2 pooling is used, then the height and width of the input images is divided by 2.- X-axis of each image. If 2x2 pooling is used, then the height and width of the input images is divided by 2.- Channels produced by the convolutional filters.`tf.nn.conv2d`: Computes a 2-D convolution given 4-D `input` and `filter` tensors. Please refer to this [link](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d) for detailed explanation for each args that this function takes. It is very important. `tf.nn.max_pool`: Performs the max pooling on the input. Please refer to this [link](https://www.tensorflow.org/api_docs/python/tf/nn/max_pool) and this [link](https://stackoverflow.com/questions/38601452/the-usages-of-ksize-in-tf-nn-max-pool) for detailed explanation for each args that this function takes.**Note:** All our images and filters are square matrices.def new_conv_layer(input_img, # input images from previous layer. num_input_channels, # no. of channels in prev. layer. filter_size, # width and height of each filter. num_filters, # no. of filters. use_pooling=True): # Use 2x2 max-pooling. # Shape of the filter-weights for the convolution. # This format is determined by the TensorFlow API. shape = [filter_size, filter_size, num_input_channels, num_filters] # Create new weights aka. filters with the given shape. weights = new_weights(shape=shape) # Create new biases, one for each filter. biases = new_biases(length=num_filters) # Create the TensorFlow operation for convolution. # Note the strides are set to 1 in all dimensions. # The first and last stride must always be 1, # because the first is for the image-number and # the last is for the input-channel. # For ex: strides=[1, 2, 2, 1] would mean that the filter # is moved 2 pixels across the x- and y-axis of the image. # The padding is set to 'SAME' which means the input image # is padded with zeroes so the size of the output is the same. layer = tf.nn.conv2d(input=input_img, filter=weights, strides=[1, 1, 1, 1], padding='SAME') # Add the biases to the results of the convolution. # A bias-value is added after applying one of the `n` filters (which contains `p`-channel filters). layer += biases # Use pooling to down-sample the image resolution. if use_pooling: # ksize: talks about size of the pooling kernel. # stride: talks about no. of steps taking by kernel. # This is 2x2 max-pooling, which means that we # consider 2x2 windows and select the largest value # in each window. Then we move 2 pixels to the next window. layer = tf.nn.max_pool(value=layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Rectified Linear Unit (ReLU). # It calculates max(x, 0) for each input pixel x. # This adds some non-linearity to the formula and allows us # to learn more complicated functions. layer = tf.nn.relu(layer) # Note that ReLU is normally executed before the pooling, # but since relu(max_pool(x)) == max_pool(relu(x)) we can # save 75% of the relu-operations by max-pooling first. # We return both the resulting layer and the filter-weights # because we will plot the weights later. return layer, weightsHelper-function for flattening a layerA convolutional layer produces an output tensor with 4 dimensions. We will add fully-connected layers after the convolution layers, so we need to reduce the 4-dim tensor to 2-dim which can be used as input to the fully-connected layer.def flatten_layer(layer): # Get the shape of the input layer. layer_shape = layer.get_shape() # The shape of the input layer is assumed to be: # layer_shape == [num_images, img_height, img_width, num_channels] # The number of features is: img_height * img_width * num_channels # We can use a function from TensorFlow to calculate this. # num_elements(): Returns the total number of elements, or none for incomplete shapes. num_features = layer_shape[1:4].num_elements() # Reshape the layer to [num_images, num_features]. # Note that we just set the size of the second dimension # to num_features and the size of the first dimension to -1 # which means the size in that dimension is calculated # so the total size of the tensor is unchanged from the reshaping. layer_flat = tf.reshape(layer, [-1, num_features]) # The shape of the flattened layer is now: # [num_images, img_height * img_width * num_channels] # Return both the flattened layer and the number of features. return layer_flat, num_featuresHelper-function for creating a new Fully-Connected Layer or Dense LayerThis function creates a new fully-connected layer in the computational graph for TensorFlow. It is assumed that the input is a 2-dim tensor of shape `[num_images, num_inputs]`. The output is a 2-dim tensor of shape `[num_images, num_outputs]`.def new_fc_layer(input_img, # The previous layer. num_inputs, # Num. inputs from prev. layer. num_outputs, # Num. outputs. use_ReLU=True): # Use ReLU in layers before final layer # Create new weights (using Xavier initialisation) and biases. weights = tf.Variable(tf.contrib.layers.xavier_initializer()([num_inputs, num_outputs])) biases = new_biases(length=num_outputs) # Calculate the layer as the matrix multiplication of # the input and weights, and then add the bias-values. layer = tf.matmul(input_img, weights) + biases # Use ReLU activation function if use_ReLU: layer = tf.nn.relu(layer) return layerPlaceholder variablesPlaceholder variables serve as the input to the graph that we may change each time we execute the graph. This is known as feeding the placeholder variables.First we define the placeholder variable for the *input* *images*. This allows us to change the images that are input to the TensorFlow graph. This is a tensor, which just means that it is a multi-dimensional vector or matrix. The data-type is set to `float64` and the shape is set to `[None, img_size_flat]`, where None means that the tensor may hold an arbitrary number of images with each image being a vector of length `img_size_flat`.X = tf.placeholder(name='Input', dtype=tf.float32, shape=[None, img_size_flat])The convolutional layers expect x to be encoded as a 4-dim tensor so we have to reshape it so its shape is instead `[num_images, img_height, img_width, num_channels]`. Note that `img_height == img_width == img_size` and `num_images` can be inferred automatically by using `-1` for the size of the first dimension. So the reshape operation is:X_image = tf.reshape(X, [-1, img_size, img_size, num_channels])Finally, we have the placeholder variable for the **desired/predicted** one-hot encoded labels associated with the images that were input in the placeholder variable `X`. The shape of this placeholder variable is `[None, num_classes]` which means it may hold an arbitrary number of them and each one-hot encoded labels is a vector of length num_classes which is 10 in this case.y_true_hot = tf.placeholder(name='Outputs', dtype=tf.float32, shape=[None, num_classes])Next we have the placeholder variable for the **desired/predicted** *labels* of each image in the placeholder variable `X`. These are integers and the dimensionality of this placeholder variable is set to `[None]` which means the placeholder variable is a one-dimensional vector of arbitrary length.y_true = tf.argmax(y_true_hot, axis=1)Convolutional Layer 1Create the first convolutional layer. It takes `X_image` as input and creates num_filters1 different filters, each having width and height equal to `filter_size1`. Finally we wish to down-sample the image so it is half the size by using 2x2 max-pooling.layer_conv_1, weights_conv_1 = \ new_conv_layer(input_img=X_image, num_input_channels=num_channels, filter_size=filter_size_1, num_filters=num_filters_1, use_pooling=True)WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer.Check the shape of the tensor that will be output by the convolutional layer. It should be `(?, 14, 14, 16)` which means that there is an arbitrary number of images (indicated by `?`), each image is `14` pixels wide and `14` pixels high, and there are `16` different channels, `1` channel for each of the `5 x 5 x 1` filters and we have `16` of them.layer_conv_1Convolutional Layer 2Create the second convolutional layer, which takes as input the output from the first convolutional layer. The number of input channels corresponds to the number of filters in the first convolutional layer.layer_conv_2, weights_conv_2 = \ new_conv_layer(input_img=layer_conv_1, num_input_channels=num_filters_1, filter_size=filter_size_2, num_filters=num_filters_2, use_pooling=True)Check the shape of the tensor that will be output from this convolutional layer. The shape is `(?, 7, 7, 36)` where `?` means that there is an arbitrary number of images, with each image having width and height of `7` pixels, and there are `36` different channels, `1` channel for each `5 x 5 x 16` filter and we have `36` of them.layer_conv_2Flatten LayerThe convolutional layers output 4-dim tensors. We now wish to use these as input in a fully-connected network, which requires for the tensors to be reshaped or flattened to 2-dim tensors.layer_flat, num_features = flatten_layer(layer_conv_2)Check that the tensors now have shape `(?, 1764)` which means there's an arbitrary number of images which have been flattened to vectors of length `1764` each. Note that `1764 = 7 x 7 x 36`.layer_flat num_featuresFully-Connected Layer 1Add a fully-connected layer to the network. The input is the flattened layer from the previous convolution. The number of neurons or nodes in the fully-connected layer is `fc_size`. `ReLU` is used the activation function.layer_fc_1 = new_fc_layer(input_img=layer_flat, num_inputs=num_features, num_outputs=fc_size)WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0. For more information, please see: * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md * https://github.com/tensorflow/addons If you depend on functionality not listed there, please file an issue.Check that the output of the fully-connected layer is a tensor with shape `(?, 128)` where there are an arbitrary number of images and `fc_size == 128`.layer_fc_1Fully-Connected Layer 2Add another fully-connected layer that outputs vectors of length `10` for determining which of the `10` classes the input image belongs to. We will use `SoftMax` as acitvation activation.layer_fc_2 = new_fc_layer(input_img=layer_fc_1, num_inputs=fc_size, num_outputs=num_classes, use_ReLU=False) # Check the output shape layer_fc_2Predicted ClassThe second fully-connected layer estimates how likely it is that the input image belongs to each of the 10 classes. However, these estimates are a bit rough and difficult to interpret because the numbers may be very small or large, so we want to normalize them so that each element is limited between zero and one and the 10 elements sum to one. This is calculated using the so-called softmax function and the result is stored in `y_pred`.y_pred_raw_prob = tf.nn.softmax(layer_fc_2)The class-number is the index of the largest element.y_pred = tf.argmax(y_pred_raw_prob, axis=1)Cost-function to be optimizedTo make the model better at classifying the input images, we must somehow change the variables for `weights` and `biases`. To do this we first need to know how well the model currently performs by comparing the predicted output of the model `y_pred` to the desired output `y_true`.The **cross-entropy** is a performance measure used in classification. The cross-entropy is a continuous function that is always positive and if the predicted output of the model exactly matches the desired output then the cross-entropy equals zero. The goal of optimization is therefore to minimize the cross-entropy so it gets as close to zero as possible by changing the weights and biases of the model.TensorFlow has a built-in function for calculating the cross-entropy. Note that it uses the values of the **logits** i.e., locally induced fields because it also calculates the softmax internally. TensorFlow class *locally induced fields* as *logits*. For more refer [TensorFlow-cross-entropy](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits_v2).cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=layer_fc_2, labels=y_true_hot)We have now calculated the cross-entropy for each of the image classifications so we have a measure of how well the model performs on each image individually. But in order to use the cross-entropy to guide the optimization of the model's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the image classifications.cost = tf.reduce_mean(cross_entropy)Optimization methodNow that we have a cost measure that must be minimized, we can then create an optimizer. In this case it is the `AdamOptimizer` which is an advanced form of `Gradient Descent`.optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)Performance measuresWe need a few more performance measures to display the progress. Below: this is a vector of booleans whether the predicted class equals the true class of each image.correct_prediction = tf.equal(y_pred, y_true)This calculates the classification accuracy by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then calculating the average of these numbers.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))Part 4 TensorFlow Run Create TensorFlow sessionOnce the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph.def tf_reset(): try: sess.close() except: pass return tf.Session() sess = tf_reset()Initialize variablesThe variables for `weights` and `biases` must be initialized before we start optimizing them.# Initialize the variables sess.run(tf.global_variables_initializer())Helper-function to perform optimization iterationsThere are 55000 images in the training-set. It takes a long time to calculate the gradient of the model using all these images. We therefore use *Batch Stochastic Gradient Descent with Adam Optimizer* which only uses a small batch of images in each iteration of the optimizer.batch_size = 64Function for performing a number of optimization iterations so as to gradually improve the `weights` and `biases` of the model. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples.# Run training # Counter for total number of iterations performed so far. total_iterations = 0 def optimize(iterations, verbose_freq=100): # Ensure we update the global variable rather than a local copy. global total_iterations for i in range(total_iterations, total_iterations+iterations): # get a random subset/batch of the training data # x_batch now holds a batch of images and # y_true_batch are the true labels for those images. X_batch, y_true_batch, y_true_hot_batch = data.random_batch(train_img, train_label, hot_label_train, batch_size=batch_size) # run the optimizer and get the mse _, cost_run = sess.run([optimizer, cost], feed_dict={X: X_batch, y_true_hot: y_true_hot_batch}) # print the mse every so often if i % verbose_freq == 0: acc = sess.run(accuracy, feed_dict={X: X_batch, y_true_hot: y_true_hot_batch}) print('Epoch: {0:d} Training Accuracy: {1:0.3f} Training Cost: {2:0.3f}'.format(i, acc, cost_run))Helper-functions to show performanceFunction for printing the classification accuracy on the test-set.def print_accuracy(): # Use TensorFlow to compute the accuracy. acc = sess.run(accuracy, feed_dict={X: test_img, y_true: test_label, y_true_hot: hot_label_test}) # Print the accuracy. print("Accuracy on test-set: {0:.1%}".format(acc))Helper-function to plot confusion matrixFunction for printing and plotting the confusion matrix using scikit-learn.def plot_confusion_matrix(cls_pred, color_map='viridis'): # This is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # Get the true classifications for the test-set. cls_true = test_label # Get the confusion matrix using sklearn. cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred) # Print the confusion matrix as text. print(cm) # Plot the confusion matrix as an image. plt.figure(figsize=(8, 6)) plt.matshow(cm, cmap=color_map) # Make various adjustments to the plot. plt.colorbar() tick_marks = np.arange(num_classes) plt.xticks(tick_marks, range(num_classes)) plt.yticks(tick_marks, range(num_classes)) plt.xlabel('Predicted') plt.ylabel('True') # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show()Helper-function to plot example errorsFunction for plotting examples of images from the test-set that have been mis-classified.def plot_example_errors(cls_pred, correct): # This function is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # correct is a boolean array whether the predicted class # is equal to the true class for each image in the test-set. # Negate the boolean array. incorrect = (correct == False) # Get the images from the test-set that have been # incorrectly classified. images = test_img[incorrect] # Get the predicted classes for those images. cls_pred = cls_pred[incorrect] # Get the true classes for those images. cls_true = test_label[incorrect] # Plot the first 9 images. plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9])Helper-function for showing the performanceFunction for printing the classification accuracy on the test-set.It takes a while to compute the classification for all the images in the test-set, that's why the results are re-used by calling the above functions directly from this function, so the classifications don't have to be recalculated by each function.# Split the test-set into smaller batches of this size. test_batch_size = 256 def print_test_accuracy(show_example_errors=False, show_confusion_matrix=False): # Number of images in the test-set. num_test = data.num_test # Allocate an array for the predicted classes which # will be calculated in batches and filled into this array. cls_pred = np.zeros(shape=num_test, dtype=np.int) # Now calculate the predicted classes for the batches. # We will just iterate through all the batches. # There might be a more clever and Pythonic way of doing this. # The starting index for the next batch is denoted i. i = 0 while i < num_test: # The ending index for the next batch is denoted j. j = min(i + test_batch_size, num_test) # Get the images from the test-set between index i and j. images = test_img[i:j, :] # Get the associated labels. labels = hot_label_test[i:j, :] # Calculate the predicted class using TensorFlow. cls_pred[i:j] = sess.run(y_pred, feed_dict={X: images, y_true_hot: labels}) # Set the start-index for the next batch to the # end-index of the current batch. i = j # Convenience variable for the true class-numbers of the test-set. cls_true = test_label # Create a boolean array whether each image is correctly classified. correct = (cls_true == cls_pred) # Calculate the number of correctly classified images. # When summing a boolean array, False means 0 and True means 1. correct_sum = correct.sum() # Classification accuracy is the number of correctly classified # images divided by the total number of images in the test-set. acc = float(correct_sum) / num_test # Print the accuracy. msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})" print(msg.format(acc, correct_sum, num_test)) # Plot some examples of mis-classifications, if desired. if show_example_errors: print("Example errors:") plot_example_errors(cls_pred=cls_pred, correct=correct) # Plot the confusion matrix, if desired. if show_confusion_matrix: print("Confusion Matrix:") plot_confusion_matrix(cls_pred=cls_pred)Part 5 Performance before any optimizationThe accuracy on the test-set will be vary low around 10%. This is because the model has only been initialized and not optimized at all, so it always predicts that the image shows a zero digit.print_test_accuracy()Accuracy on Test-Set: 10.1% (1010 / 10000)Performance after 10 optimization iterationoptimize(iterations=10) print_test_accuracy()Accuracy on Test-Set: 24.3% (2433 / 10000)Performance after 100 optimization iterationsoptimize(iterations=90) # We already performed 10 iterations print_test_accuracy(show_example_errors=True)Accuracy on Test-Set: 70.4% (7036 / 10000) Example errors:Performance after 1000 optimization iterationsoptimize(iterations=900) print_test_accuracy(show_example_errors=True)Accuracy on Test-Set: 93.9% (9389 / 10000) Example errors:Performance after 10000 optimization iterationsoptimize(iterations=9000, verbose_freq=1000) print_test_accuracy(show_example_errors=True, show_confusion_matrix=True)Accuracy on Test-Set: 98.8% (9876 / 10000) Example errors:Part 6 Visualization of Weights and LayersIn trying to understand why the convolutional neural network can recognize handwritten digits, we can visualize the weights of the convolutional filters and the resulting output images.**Remember:** how tensorflow sees each image and filter of a convolution layer- Image: `[batch, in_height, in_width, in_channels]`- Filter: `[filter_height, filter_width, in_channels, out_channels]` Helper-function for plotting convolutional weightsdef plot_conv_weights(weights, filter_no=0, color_map='seismic'): # Assume weights are TensorFlow ops for 4-dim variables # e.g. weights_conv1 or weights_conv2. # Retrieve the values of the weight-variables from TensorFlow. # A feed-dict is not necessary because nothing is calculated. w = sess.run(weights) # Get the lowest and highest values for the weights. # This is used to correct the colour intensity across # the images so they can be compared with each other. w_min = np.min(w) w_max = np.max(w) # Number of filters used in the conv. layer. num_filters = w.shape[3] # Number of channels used in the conv. layer. num_channels = w.shape[2] # If we want to plot on filter then it would be of size (filter_height x filter_width x num_channels) # In total we will have num_filters no. of filters # Number of grids to plot. # Rounded-up, square-root of the number of channels num_grids = int(np.ceil(np.sqrt(num_channels))) if num_grids == 1: print('Filter Shape:', w.shape[0], 'x', w.shape[0]) print('Filter Channels:', num_channels) print('Hence each filter shape:', w.shape[0], 'x', w.shape[0], 'x', num_channels) print('Total no. of filters:', num_filters) print('Plotting ALL the Filters') new_num_grids = int(np.ceil(np.sqrt(num_filters))) fig, axes = plt.subplots(new_num_grids, new_num_grids) for i, ax in enumerate(axes.flat): # Plot the image img = w[:, :, 0, i] ax.imshow(img, vmin=w_min, vmax=w_max, interpolation='nearest', cmap=color_map) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) plt.show() else: # Check if the input channels is square or odd power of 2 # If both if-else conditions are not satisified then it will not plot properly if num_grids**2 = num_channels: # Create figure with a grid of sub-plots. fig, axes = plt.subplots(num_grids, num_grids) else: a = np.log(num_channels)/np.log(2) num_grids_1 = np.power(2, int((a+1)/2)) num_grids_2 = np.power(2, int((a-1)/2)) # Create figure with a grid of sub-plots. fig, axes = plt.subplots(num_grids_1, num_grids_2) print('Filter Shape:', w.shape[0], 'x', w.shape[0]) print('Filter Channels:', num_channels) print('Hence each filter shape:', w.shape[0], 'x', w.shape[0], 'x', num_channels) print('Total no. of filters:', num_filters) print('Plotting filter number:', filter_no) # Create figure with a grid of sub-plots. fig, axes = plt.subplots(num_grids, num_grids) # Plot all the filter-weights. for i, ax in enumerate(axes.flat): # Get the weights for the i'th filter of the input channel. # See new_conv_layer() for details on the format # of this 4-dim tensor. img = w[:, :, i, filter_no] # Plot image. ax.imshow(img, vmin=w_min, vmax=w_max, interpolation='nearest', cmap=color_map) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show()Helper-function for plotting the output of a convolutional layerdef plot_conv_layer(layer, image, color_map='binary'): # Assume layer is a TensorFlow op that outputs a 4-dim tensor # which is the output of a convolutional layer, # e.g. layer_conv1 or layer_conv2. # Create a feed-dict containing just one image. # Note that we don't need to feed y_true because it is # not used in this calculation. feed_dict = {X: [image]} # Calculate and retrieve the output values of the layer # when inputting that image. values = sess.run(layer, feed_dict=feed_dict) # Number of filters used in the conv. layer. num_filters = values.shape[3] # Number of grids to plot. # Rounded-up, square-root of the number of filters. num_grids = int(np.ceil(np.sqrt(num_filters))) # Create figure with a grid of sub-plots. fig, axes = plt.subplots(num_grids, num_grids) # Plot the output images of all the filters. for i, ax in enumerate(axes.flat): # Only plot the images for valid filters. if iInput ImagesHelper-function for plotting an image.def plot_image(image): plt.imshow(image.reshape(img_shape), interpolation='nearest', cmap='binary') plt.show() # Example: image_1 = test_img[0] plot_image(image_1) # Example: image_2 = test_img[4] plot_image(image_2)Convolution Layer 1Now plot the one or all (depending on no. of input channels) filter weights for the first convolutional layer. **Note** that positive weights are red and negative weights are blue.plot_conv_weights(weights=weights_conv_1)Filter Shape: 5 x 5 Filter Channels: 1 Hence each filter shape: 5 x 5 x 1 Total no. of filters: 16 Plotting ALL the FiltersApplying each of these convolutional filters to the first input image gives the following output images, which are then used as input to the second convolutional layer. **Note** that these images are down-sampled to `14 x 14` pixels which is half the resolution of the original input image.plot_conv_layer(layer=layer_conv_1, image=image_1)The following images are the results of applying the convolutional filters to the second image.plot_conv_layer(layer=layer_conv_1, image=image_2)Convolution Layer 2Now plotting the one or all (depending on no. of input channels) filter weights for the second convolutional layer. There are `16` output channels from the first conv-layer, which means there are `16` input channels to the second conv-layer.plot_conv_weights(weights=weights_conv_2, filter_no=0)Filter Shape: 5 x 5 Filter Channels: 16 Hence each filter shape: 5 x 5 x 16 Total no. of filters: 36 Plotting filter number: 0There are `36` output channels to the second convolutional layer, so we can make another `35` plots of filter weights like this. For example for the second filter.plot_conv_weights(weights=weights_conv_2, filter_no=1)Filter Shape: 5 x 5 Filter Channels: 16 Hence each filter shape: 5 x 5 x 16 Total no. of filters: 36 Plotting filter number: 1Close TensorFlow SessionAfter done using TensorFlow, close the session to release its resources.# sess = tf_reset()Table of Contents1  Objectives2  Solving the Line of Best Fit by Guessing3  The Loss Function4  The Cost Function5  Better Way of Guessing: Gradient Descent5.1  Gradient Descent in Words5.2  Stepping Down a Hill: Step Size5.3  Putting It All Together6  Level Up: Gradient Descent Walk Throughimport numpy as np import matplotlib.pyplot as plt %matplotlib inlineObjectives - Explain and use the concept of a gradient- Explain the algorithm of gradient descent- Describe the effect of the "learning rate" in the algorithm Solving the Line of Best Fit by Guessing Let's say we have some data below:# Randomly created data in x & y np.random.seed(27) x = np.random.rand(30,1).reshape(30) y_randterm = np.random.normal(0,3,30) y = 3 + 50 * x + y_randtermHere's the data plotted out:f, ax = plt.subplots(figsize=(8,6)) ax.scatter(x, y) ax.set_title('Data Points to Model') ax.set_xlabel('x', fontsize=14) ax.set_ylabel('y', fontsize=14) ax.set_xlim(0,1) ax.set_ylim(0,60) plt.tight_layout()If we wanted to make a best-fit line, what would you say it's about? Let's create a couple functions to make this easier to make a guess# Plotting a guess of a regression line def regression_formula(x, a, b): return a*x + b def plot_data_and_guess(slope, intercept, ax, x1=x, x2=y, **kwargs): ''' Plot our data and regression line on the given axis. Arguments: slope : float Value for the slope the regression line. intercept : float Value for the intercept the regression line. ax : Axes Axis to plot data and regression line x1 : array-like Values along the x-axis x2 : array-like Values along the y-axis Returns: fig : Figure ax : Axes ''' # Plot data and regression line ax.scatter(x1, x2) yhat = regression_formula(x1, slope ,intercept) ax.plot(x1, yhat, 'r-', **kwargs) # Embelishments ax.set_title('Data Points to Model') ax.set_xlabel('x', fontsize=14) ax.set_ylabel('y', fontsize=14) ax.set_xlim(0,1) ax.set_ylim(0,60) return axSo what do you think the regression parameters are?# Our guess guess = { 'slope': 30, 'intercept': 0, 'color':'orange' } f, ax = plt.subplots(figsize=(8,6)) plot_data_and_guess(**guess, ax=ax)What would be your next guess be? - How can we tell when our guess is "better"?- Could we formalize this? The Loss Function One way we can know how well our guess or _model_ did is to compare the predicted values with the actual values. These are the _residuals_. So this would give us the error for each data point:$$ r_i = \hat{y}_i - y_i $$def calculate_residuals(x_values, y_values, slope, intercept): '''Find the residulas for each data point''' yhat = intercept + slope*x_values errors = y_values - yhat return errorsThis is great but we can go further by having just one number to represent how "bad" or "good" our model was to the real points. This leads us to the **mean squared error** or **MSE**. This is all the residuals squared and then averaged:$$ MSE = \frac{1}{n} \sum_{i}^{n} (\hat{y}_i - y_i)^2 $$def mse(x_values, y_values, slope, intercept): resid_sq = calculate_residuals(x_values, y_values, slope, intercept)**2 return sum(resid_sq)/len(x_values) # Use our guess from earlier slope = guess.get('slope', 30) intercept = guess.get('intercept', 0) mse(x,y,slope,intercept)> The function we use to find how bad our model did in prediction is typically called the **loss function** What we found here is great! We can now compare different models with one another.If we made a few different guesses, we could make our predictions and then calculate from the _loss function_ how good or bad our model did! We will want to find the _smallest loss_. The Cost Function Now our model changes based on the different model _parameters_ (the coefficients $\beta_i$ for linear regression). If we imagine all the different ways we can adjust these parameters $\vec{\theta}$ and measure how well the model with the loss or **cost function** $J(\vec{\theta})$, we can plot this as a surface in this multidimensional plane. See the image below: ![](img/gradientdescent.png) > Note that the terms **loss function** and **cost function** are frequently used interchangeably. Sometimes they are the same function, but sometimes they differ by making changes in the cost to improve _training_ or _learning_. Let's try creating the cost function's curve/surface for just one parameter (slope) using our earlier data example:table = np.zeros((20,2)) # Find the MSE for different slope values for idx, val in enumerate(range(40, 60)): table[idx,0] = val table[idx,1] = mse(x, y, slope=val, intercept=0) plt.figure(figsize=(10,7)) plt.plot(table[:,0], table[:,1], '-') plt.xlabel("Slope Values", fontsize=14) plt.ylabel("MSE", fontsize=14) plt.title("MSE with changes to slope", fontsize=16);Based on this graph, what is the optimal slope value?How could we extend this to find the best slope _and_ intercept combination? Better Way of Guessing: Gradient Descent So this probably all sounds great! We just need to find the minimum of the cost function!But there's some bad news; we don't usually know what the cost function (which can be complicated!) "looks" like without trying a whole lot of different parameters $\vec{\theta}$. We'd need an _infinite_ number of parameter combinations to know $J(\vec{\theta})$ completely. So what can we do?Well, we can take one "guess" (set of parameters) and then measure $J(\vec{\theta})$. Then we can adjust our guess/parameters in a "good" direction, "down the hill". This is the basic idea of gradient descent. > **Gradient descent** is an optimization procedure that uses the _gradient_ (a generalized notion of a derivative) of the cost function. So how do find this "better" guess? Well, we need to find the best direction to move "downhill" the fastest. We can do this with a generalization of the derivative called the **gradient**:$$\begin{align}\\ \large -\nabla J &= \sum_i \dfrac{\partial J}{\partial \theta_i} \\ &= \frac{\partial J}{\partial \theta_1} + \frac{\partial J}{\partial \theta_2} + \dots + \frac{\partial J}{\partial \theta_n}\end{align}$$ In the multivariate case, the gradient tells us how the function is changing **in each dimension**. A large value of the derivative with respect to a particular variable means that the gradient will have a large component in the corresponding direction. Therefore, **the gradient will point in the direction of steepest increase**. ![](img/gradientdescent.png) Gradient Descent in Words - Make a guess at where the function attains its minimum value- Calculate the gradient/derivative at that point- Use that value to decide how to make your next guess!Repeat until we get the derivative as close as we like to 0.If we want to improve our guess at the minimum of our loss function, we'll move in the **opposite direction** of the gradient away from our last guess. Hence we are using the *gradient* of our loss function to *descend* to the minimum value of the relevant loss function. Stepping Down a Hill: Step Size So we now have the basic idea of gradient descent of "going down a hill" and hopefully it's obvious that the steeper the hill, the more we can adjust our parameters to get to "bottom" (optimal parameters) faster.But a big question is how big of a step do we take? > The amount we adjust our parameter is determined by out **step size** or **learning rate** $\alpha$ If our steps are _too big_, we risk skipping over the minimum value (optimal parameters).If our steps are _too small_, it might take us too long to reach the minimum value. ![learning_rate](https://www.jeremyjordan.me/content/images/2018/02/Screen-Shot-2018-02-24-at-11.47.09-AM.png) Here's an elegant solution: Make the size of your step **proportional to the value of the derivative at the point where you currently are in parameter space**! If we're very far from the minimum, then our values will be large, and so we therefore can safely take a large step; if we're close to the minimum, then our values will be small, and so we should therefore take a smaller step.I said the size of the step is proportional to the value of the derivative. The constant of proportionality is often called the **"learning rate"**. This page helps to explain the dangers of learning rates that are too large and too small: https://www.jeremyjordan.me/nn-learning-rate/. > Note there are other optimizations we can do for gradient descent and rely on adjusting our cost function or how we take steps or both. Putting It All Together The general algorithm looks like this:We'll make a guess, $\vec{s}$, at where our loss function attains a minimum. If we're not happy with how close the value of the gradient there is to 0, then we'll make a new guess, and the new guess will be constructed as follows:$\large\vec{s}_{new} = \vec{s}_{old} - \alpha\nabla f(\vec{s}_{old})$,where $\alpha$ is the learning rate.In the one-dimensional case, we'll have:$\large x_{new} = x_{old} - \alpha\frac{df}{dx}|_{x_{old}}$. Level Up: Gradient Descent Walk Through Let's go back to our original example and implement gradient descent to find the optimal parameters (slope and intercept)f, ax = plt.subplots(figsize=(8,6)) ax.scatter(x, y) ax.set_title('Data Points to Model') ax.set_xlabel('x', fontsize=14) ax.set_ylabel('y', fontsize=14) plt.tight_layout()First we need to find the gradient for each cost function (2-dimensions: a & b; slope & intercept):$$ \frac{\partial}{\partial b} (y_i - (b + ax_i))^2 = -2 \cdot (y_i-ax_i - b)$$$$ \frac{\partial}{\partial a} (y_i - (b + ax_i))^2 = -2 \cdot x_i \cdot (y_i-ax_i - b)$$ Let's formalize this into a function:def partial_deriv(a, b, x_i, y_i, respect_to): ''' Get the partial derivative for cost function with respect to slope (a) or intercept (b). ''' if respect_to == 'b': # intercept return (y_i - (a * x_i + b)) elif respect_to == 'a': # slope return (x_i * (y_i - (a * x_i + b))) else: print('Choose either respect_to: a or b ') return 0Next let's define the step we take (amount we adjust the parameters by) using the gradient and learning rate:def step_gradient(a, b, x, y, learning_rate): db = 0 da = 0 # For each data point, update the derivative for the slope & intercept N = len(x) for i in range(len(x)): # Derivatives already pre-done # Partial derivatives of loss/cost function with respect to b & a db += -(2/N) * partial_deriv(a,b,x[i],y[i],respect_to='b') da += -(2/N) * partial_deriv(a,b,x[i],y[i],respect_to='a') # Adjust the slope & intercept by the gradient new_b = b - (learning_rate * db) new_a = a - (learning_rate * da) return (new_a, new_b)Let's try it out and keep track of our guessesguesses = [] alpha = 0.05 # Our guess guess = { 'slope': 60, 'intercept': 10 } guesses.append(guess) f, ax = plt.subplots(figsize=(8,6)) plot_data_and_guess(**guess, ax=ax); step = step_gradient(guess['slope'], guess['intercept'], x, y, learning_rate=alpha) step mse(x, y, guess['slope'], guess['intercept'])Let's update our guess and try again:# Our guess using the new step new_slope, new_intercept = step guess = { 'slope': new_slope, 'intercept': new_intercept } guesses.append(guess) # Getting adjusted parameters step = step_gradient(guess['slope'], guess['intercept'], x, y, learning_rate=alpha) display(step) display(mse(x, y, guess['slope'], guess['intercept'])) # Plotting out our new parameters f, ax = plt.subplots(figsize=(8,6)) plot_data_and_guess(**guess, ax=ax);Let's repeat this another 200 times:for i in range(200): # Our guess using the new step new_slope, new_intercept = step guess = { 'slope': new_slope, 'intercept': new_intercept } guesses.append(guess) # Getting adjusted parameters step = step_gradient(guess['slope'], guess['intercept'], x, y, learning_rate=alpha) # Only display every 10 if (i % 10) == 0: print(f'Step # {i}:') display(step) display(mse(x, y, guess['slope'], guess['intercept'])) print('-'*30)What does our final result look like?# Plotting out our new parameters f, ax = plt.subplots(figsize=(8,6)) plot_data_and_guess(**guesses[-1], ax=ax);Let's take a look at the MSE over the guesses:mses = [ mse(x,y,d['slope'],d['intercept']) for d in guesses ] plt.plot(range(len(mses)),mses)Functions_Authors: Mahesh--- Lesson objectivesBy the end of this lesson, you should be able to:1. Successfully **create** and **invoke** a function2. Understand how to use parameters in a function3. Understand how to return a value from a function4. Know what a lambda function is and how to create one Intro---From the pre-work (and perhaps what you've done before DSI), remember that functions:- start with `def`, followed by the name of the function.- take inputs (or arguments).- return outputs.- use `return` instead of `print`.- are used frequently to make coding more efficient.One of my favorite novels - 's *Fahrenheit 451* - has a quote that is appropriate here: > "We begin by beginning, I guess." Activity: Basic function---Create a function in cell below called `greeting` that prints `"Howdy"`.# Declare the function # Invoke the functionActivity: Function parameters---In the [Kaggle Titanic competition](https://www.kaggle.com/c/titanic/data), the names of everyone in the manifest look like this:> Last, Title. FirstCreate a function called `titanic_name` that accepts 3 parameters:- `first_name`- `last_name`- `title` And prints the full name in the format above. Named parameters vs Ordered parameters---In the above example, you must order in which you add your arguments coincides with the order of the parameters in the function declaration. If you called them out of order like so:```pythontitanic_name('Doe', 'Captain', 'John')```Then the following would happen:1. `'Doe'` would be assigned to `first_name`2. `'Captain'` would be assigned to `last_name`3. `'John'` would be assigned to `title`.As a result, the function would print `'Captain, '`. No bueno.# greet_by_name # titanic_nameИмпорт пакетов- **opencv** - пакет для Computer Vision и дата препроцессинага для него.- **sys** - пакет для работы с выводом на консоль.- **numpy** - пакет для работы с массивами данных.- **os** - пакет для работы с операционной системой, используется в связке с sys.- **inspect** - пакет помогает извлекать информацию из лайвв объектов, таких как классы, функции и т.т.п.- **skimage** - пакет алгоритмов, предназначенных для работы с изображениями, в особенности с их препроцессингом. Дополнение к opencv. От сюда извлекаем модуль ***io*** (Утилиты для чтения и записи изображений в различных форматаx) и функцию ***resize*** (Изменяет размер изображения до указанного).- **matplotlib.pyplot** - модуль для графического отображения данных.- **IPython** - от сюда извлекается функция, которая помогает очищать вывод в jupyter.- **pandas** - пакет для обработки и анализа данных.%matplotlib inline import cv2 import sys import numpy as np import os import inspect from skimage import io import matplotlib.pyplot as plt from skimage.transform import resize from IPython.display import clear_output import pandas as pdГлобальные переменные1. **img_size** - размер изображения, на выходе. Данный размер должен совпадать с размером изображений на котых обучается наша CNN.2. **faces_in_image_limit** - количество лиц, на одной фотографии.img_size = 256 faces_in_image_limit = 1Основной алгоритм. SSD: Single Shot MultiBox DetectorВыбор пал на SSD, т.к. по сравнению с Haar Cascade данный алгоритм работает в разы лучше. В дополнение он быстрее Faster R-CNN, а перформансы у них очень близки друг к другу. Сравнение алгоритмов можно найти [тут](https://towardsdatascience.com/face-detection-models-which-to-use-and-why-d263e82c302c). Полное описание SSD от их авторов можно найти на [arxiv'e](https://arxiv.org/pdf/1512.02325.pdf) наглядное описание можно посмотреть [тут](https://pythonawesome.com/ssd-single-shot-multibox-detector-a-pytorch-tutorial-to-object-detection/). Основная логикаНа вход поступает N 3-х канальных RGB картинок $X^T \in \mathbb{R}^N$, где $x^T = (w, h, 3)$. В зависимости от типа SSD нужно масшатабировать картинку, чтобы она была пригодна для input-layer. В нашем случае $x^T = (300, 300, 3)$. Далее каждая картинка прогоняется через свёрточные слои. Авторы использовали немного модифицированную VGG16.$$\begin{aligned}&\tilde{x}_1 \sim MaxPool_1(Conv_{1_2}(Conv_{1_1}(x)))\\&\tilde{x}_2 \sim MaxPool_2(Conv_{2_2}(Conv_{2_1}(\tilde{x}_1)))\\&\tilde{x}_3 \sim MaxPool_3(Conv_{3_3}(Conv_{3_2}(Conv_{3_1}(\tilde{x}_2))))\\&\tilde{x}_4 \sim MaxPool_4(Conv_{4_3}(Conv_{4_2}(Conv_{4_1}(\tilde{x}_3))))\\\end{aligned}$$Здесь, приметим, что $\tilde{x}_4^T \in \mathbb{R}^{38 \times 38 \times 512}$.$$\begin{aligned}&\tilde{x}_5 \sim MaxPool_5(Conv_{5_3}(Conv_{5_2}(Conv_{5_1}(\tilde{x}_4))))\\&\tilde{x}_6 \sim Conv_{6}(\tilde{x}_5)\\&\tilde{x}_7 \sim Conv_{7}(\tilde{x}_6)\end{aligned}$$Таким образом мы сможем извлечь даже мелкие детали. На данный момент $\tilde{x}_7^T \in \mathbb{R}^{19 \times 19 \times 1024}$.Далее на топ архитектуры добавляются вспомогательные слои:$$\tilde{x}_7^T \in \mathbb{R}^{19 \times 19 \times 1024} \rightarrow \underbrace{Conv_{8_2}(Conv_{8_1}(\tilde{x}_7))}_{\tilde{x}_8^T \in \mathbb{R}^{10 \times 10 \times 512}} \rightarrow \underbrace{Conv_{9_2}(Conv_{9_1}(\tilde{x}_8))}_{\tilde{x}_9^T \in \mathbb{R}^{5 \times 5 \times 256}} \rightarrow \underbrace{Conv_{10_2}(Conv_{10_1}(\tilde{x}_9))}_{\tilde{x}_{10}^T \in \mathbb{R}^{3 \times 3 \times 256}} \rightarrow \underbrace{Conv_{11_2}(Conv_{11_1}(\tilde{x}_{10}))}_{\tilde{x}_{11}^T \in \mathbb{R}^{1 \times 1 \times 256}}$$У нас есть 6 помеченных слоёв, на которых мы будем проводить детекцию с разным уровнем детализации. Теперь на свёрнутую картинку накладываются дефолтные [AnchorBoxes](https://www.mathworks.com/help/vision/ug/anchor-boxes-for-object-detection.html) (Приоры):* **Приоры накладываются на помеченные карты характеристик $x$**.* **Каждый приор имеет масштаб $s$, тогда площадь данного приора равна площади квадрата со стороной $\sqrt{s}$**. Например для самой большой карты характеристик (КХ) $\tilde{x}_4$ масштаб приора будет 10\% от размерности изображения. Для следующей КХ ($\tilde{x}_7$) 20\%, для следующей 30\% и так до 90\%.* **В каждой ячейке карты характеристик, находятся несколько приор с разным соотношением сторон**. Все КХ будут иметь приоры с соотношением сторон $\frac{1}{1},\ \frac{1}{2},\ \frac{2}{1}$. Промежуточные КХ ($\tilde{x}_7,\ \tilde{x}_8,\ \tilde{x}_9$) в дополнение имеют $\frac{1}{3},\ \frac{3}{2}$ приоры. И напоследок, каждая КХ имеет дополнительный приор $\frac{1}{1}$.| Карта характеристик | Размерность | Масштаб приора | Соотношения сторон | Количество приор на одну ячейку | Общее количество приор на КХ ||:-------------------:|:--------------:|:--------------:|:-----------------------------------------------------------------------------------:|:-------------------------------:|:----------------------------:|| $\tilde{x}_4$ | $38 \times 38$ | 0.1 | $\frac{1}{1};\ \frac{1}{2};\ \frac{2}{1};$ + доп. приор | 4 | 5766 || $\tilde{x}_7$ | $19 \times 19$ | 0.2 | $\frac{1}{1};\ \frac{1}{2};\ \frac{2}{1};$ + доп. приор | 6 | 2166 || $\tilde{x}_8$ | $10 \times 10$ | 0.375 | $\frac{1}{1};\ \frac{1}{2};\ \frac{2}{1};\ \frac{1}{3};\ \frac{3}{2};$ + доп. приор | 6 | 600 || $\tilde{x}_9$ | $5 \times 5$ | 0.55 | $\frac{1}{1};\ \frac{1}{2};\ \frac{2}{1};\ \frac{1}{3};\ \frac{3}{2};$ + доп. приор | 6 | 150 || $\tilde{x}_{10}$ | $3 \times 3$ | 0.725 | $\frac{1}{1};\ \frac{1}{2};\ \frac{2}{1};$ + доп. приор | 4 | 36 || $\tilde{x}_{11}$ | $1 \times 1$ | 0.9 | $\frac{1}{1};\ \frac{1}{2};\ \frac{2}{1};$ + доп. приор | 4 | 4 || **Общее количество** | - | - | - | - | **8732 приора** |Приоры объявлены с помощью масштабирующего фактора и соотношения сторон1. Со следующими, масштобирующем фактором и соотношением сторон:$$\begin{aligned}&w \cdot h = s^2\\&\frac{w}{h} = a\end{aligned}$$2. От сюда получаем:$$\begin{aligned}&w = s \cdot \sqrt{a}\\&h = \frac{s}{\sqrt{a}}\end{aligned}$$Для каждого приора расчитывается вероятность (confidence) для каждого объекта. В нашем случае только лицо, т.е. какова вероятность, что в пределах приора находится лицо. Далее мы пытаемся отрегулировать данный приор таким образом, чтобы данная вероятность была максимальной. Тем самым мы получаем следующие сдвиги.$$\begin{aligned}&g_{{c}_{x}} = \frac{c_x - \hat{с}_x}{\hat{w}}\\&g_{{c}_{y}} = \frac{c_y - \hat{с}_y}{\hat{h}}\\&g_w = \log{\left(\frac{w}{\hat{w}}\right)}\\&g_h = \log{\left(\frac{h}{\hat{h}}\right)}\end{aligned}$$где $(c_x, c_y, w, h)$ данные приора с наибольшей вероятностю содержания объекта.Как видите, каждое смещение нормализовано на соответствующий размер приора. Что логично, потому что определенное смещение будет менее значительным для более крупного приора, чем для меньшего.Чтобы определить саму область используется IoU (Intersection over Unioin) $\frac{A \cap B}{A \cup B}$. Если IoU приора и реального AnchorBox'a больше 0.5 это значит совпадение, иначе несовпадение.Для обучения используются следующие лоссы:1. Позиция:$$L_{loc}(x, l, g) = \sum_{i \in Pos}^N{\sum_{m \in \{c_x, c_y, w, h\}}{\mathbb{1}\{IoU(x_{ij}^k) > 0.5\}\text{L1}(l_i^m - \hat{g}_j^m})}$$2. Вероятности:$$L_{conf}(x,c) = - \sum_{i \in Pos}^N{\mathbb{1}\{IoU(x_{ij}^p) > 0.5\}\log{(\hat{c}_i^p)}} - \sum_{i \in N \in g}{\log{(\hat{c}_i^0)}}, \qquad \text{где }\hat{c}_i^p = \frac{e^{c_j^p}}{\sum_p{c_j^p}} $$3. Общая:$$L(x,c,l,g) = \frac{1}{N}(L_{conf}(x,c) + \alpha L_{loc}(x,l,g))$$def extract_faces(img): """This function extracts a face from a photo. :param img: the image from which we wanna to derive a face. :return: np.array of an extracted face and confidence that it is a human face. """ model_file = "utils/opencv_face_detector_uint8.pb" config_file = "utils/opencv_face_detector.pbtxt" # This network has been created for the Caffe and Tensorflow, I used the second one net = cv2.dnn.readNetFromTensorflow(model_file, config_file) # Returning results image_data_fin = [] confidence_res = None h, w = img.shape[:2] # https://www.pyimagesearch.com/2017/11/06/deep-learning-opencvs-blobfromimage-works/ blob description # First we resize the image to 300x300 according to the pretrained weights # Second, the scale factor (standard deviation in the z-scoring), I do not use the scale therefore set it as 1.0 # Third, mean-tupple of RGB [mu-Red, mu-Green, mu-Blue] # Forth, indicates that swap first and last channels in 3-channel image is necessary. # Fifth, indicates whether image will be cropped after resize or not blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), [104, 117, 123], False, False) # pass the blob through the network and obtain the detections and predictions net.setInput(blob) detections = net.forward() # loop over the detections for i in range(detections.shape[2]): # extract the confidence (i.e., probability) associated with the prediction # https://docs.opencv.org/trunk/d3/d63/classcv_1_1Mat.html confidence = detections[0, 0, i, 2] # If confidence is higher than 50% than if confidence > 0.5: # compute the (x, y)-coordinates of the bounding box for the object box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) (x, y, x1, y1) = box.astype("int") # create a new image (augmented image, in the way to cut off everything except a face) roi_color = img[y:y1, x:x1] im = resize(roi_color, (img_size, img_size)) image_data_fin.append(im) confidence_res = confidence # If the only one face on a photo then return it (as np.array) and confidence that it is a human face. if len(image_data_fin) != faces_in_image_limit: return [], None else: return image_data_fin, confidence_resДополнительные функции- **print_progress** - рисует прогресбар вида [---> ] 50%.- **count_files** - считает количество файлов в директории.def print_progress(total, current, image, like_type, missing_imgs): """This function print progress whereas files are handling. :param total: total number of files :param current: current number of handled files :param image: an image's name :param like_type: the folder from where we are handling files :param missing_imgs: number of files which were missed. It's required in purpose to reflect a percentage properly. """ def progressBar(current, total, missing_imgs, barLength = 20): """Represent a progress bar, like that [---> ] 50% :param total: total number of files :param current: current number of handled files :param missing_imgs: number of files which were missed. It's required in purpose to reflect a percentage properly. :param barLength: required in purpose to show the bar of the same length (default 20 symbols) """ percent = float(current) * 100 / (total - missing_imgs) arrow = '-' * int(percent/100 * barLength) + '>' spaces = ' ' * (barLength - len(arrow)) sys.stdout.write('\rProgress: [%s%s] %d %%\n' % (arrow, spaces, percent + 1)) sys.stdout.write('\r%d of %d %s files have been handling\n' % (current, total, like_type)) sys.stdout.write('\rImage: %s\n' % image) progressBar(current, total, missing_imgs) sys.stdout.flush() def count_files(path): """Count number of files in a folder (missin invisible files, like '.filename') :param path: path to folder. :return: Evaluated number of files """ return len([name for name in path if not name[0] =="."])Обрабатывающая функцияОсновная функция, которая обрбатывает фотографии, извлекая из них лица. Итогом она возвращает небольшую статистику по обработанным фотографиям:- **toatal amount** - сколько фотографий обработанно всего.- **missed amount** - количество пропущенных фотографий (из которых не получилось извлечь лица)- **handled ratio** - процент успешно обработанных фотографий.- **handled likes** - количество успешно обработанных фотографий из понравившихся.- **handled dislikes** - количество успешно обработанных фотографий из непонравившихся.# For each image, we want to know if each picture is attractive or unattractive # list of images translated into np-array images = [] # labels to each image labels = [] def handle_images(name=''): """The function process all photos and prepares them for training. :param name: the name of an user of a folder (name1_like) """ # The directory where this file is placed currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # Path to the folder with all samples folder data_path = os.path.join(os.path.dirname(currentdir), 'samples') name = name + '_' if name != '' else '' # List of files in like/dislike directory dislikes_images_stack = os.listdir(os.path.join(data_path, name + 'dislike')) likes_images_stack = os.listdir(os.path.join(data_path, name + 'like')) def process_folder(images_stack, like_type, name=''): """The function which processes a folder, by handling images an labeling them. :param images_stack: a list of images :param like_type: the type of folder which is processing. :param name: the name beside the like-type in folder name. :return: confidence-list (confidence that each passed image is a human face) , number of missed images, files processed, total number of images """ number_of_images = count_files(images_stack) files_processed = 0 confidence_list = [] number_of_missing_images = 0 for img in images_stack: if not img.startswith('.'): # Print progress clear_output(wait=True) print_progress(number_of_images, files_processed, img, like_type, number_of_missing_images) try: # obtain a face faces, confidence = extract_faces(cv2.imread(os.path.join(data_path, os.path.join(name + like_type, img)))) except Exception as e: raise e # Check if the only one face has been retrieved if len(faces) > 0 and len(faces) < 2: confidence_list.append(confidence) elif len(faces) == 0: number_of_missing_images += 1 # Labeling for face in faces: images.append(face) if like_type == 'like': labels.append(1) else: labels.append(0) files_processed += 1 return confidence_list, number_of_missing_images, files_processed, number_of_images # Gather infromation regard the processed files (along with processing) conf_list, NoMI, proc_files, NoI = process_folder(dislikes_images_stack, 'dislike', name) conf_list2, NoMI2, proc_files2, NoI2 = process_folder(likes_images_stack, 'like', name) conf_list.extend(conf_list2) conf_list = np.array(conf_list) NoMI += NoMI2 NoI += NoI2 return {'face_convincing': pd.DataFrame([['{:.2f} %'.format(np.mean(conf_list) * 100)], ['{:.2f} %'.format(np.amax(conf_list) * 100)], ['{:.2f} %'.format(np.amin(conf_list) * 100)], ['{:.2f} %'.format(np.std(conf_list) * 100)]], index=['mean', 'max', 'min', 'std'], columns=['percents']), 'images': pd.DataFrame([[NoI], [NoMI], ['{:.2f} %'.format((NoI - NoMI2)/NoI * 100)], [proc_files2], [proc_files]], index=['toatal amount', 'missed amount', 'handled ratio', 'handled likes', 'handled dislikes'], columns=['data'])} recap = handle_images() images = np.array(images) labels = np.array(labels) # images -- shows the information about handled photos # face_convincing -- shows statistics about face retrieving recap['images'] print(images.shape) print(labels.shape)(8239, 256, 256, 3) (8239,)Сохранение фотографий на жёстком дискеdef save_file(data, file_path_name): """Takes all our data here, images and labels. Compresses images in a numpy file. :param data: the data we wanna to save :param file_path_name: path to file where we wanna to store the data """ print("Saving {}.npy".format(file_path_name)) np.save(file_path_name, data) save_file(images, "processed_val_images") save_file(labels, "processed_val_labels")Saving processed_val_images.npy Saving processed_val_labels.npyRead GeoTIFF data through a BMI This notebook describes how to open and read data from GeoTIFF filesusing a [Basic Model Interface](https://bmi.readthedocs.io/) (BMI) through the `BmiGeoTiff` class included in the `bmi-geotiff` package. Setup To ensure all dependencies are met, set up a conda environment using the environment file found in the root directory of this repository:```conda env create --file=environment.yml```Then install the `bmi-geotiff` package:```make install``` Import a set of libraries for later use:import numpy as np from rasterio.plot import showOpen a file Import the `BmiGeoTiff` class from the `bmi-geotiff` package:from bmi_geotiff import BmiGeoTiffCreate an instance of this class.m = BmiGeoTiff()Calling `help` on the instance displays all the BMI methods that are available.help(m)Help on BmiGeoTiff in module bmi_geotiff.bmi object: class BmiGeoTiff(bmipy.bmi.Bmi) | BmiGeoTiff() -> None | | BMI-mediated access to data and metadata in a GeoTIFF file. | | Method resolution order: | BmiGeoTiff | bmipy.bmi.Bmi | abc.ABC | builtins.object | | Methods defined here: | | __init__(self) -> None | Initialize self. See help(type(self)) for accurate signature. | | finalize(self) -> None | Perform tear-down tasks for the model. | | Perform all tasks that take place after exiting the model's time | loop. This typically includes deallocating memory, closing files and | printing reports. | | get_component_name(self) -> str | Name of the component. | | Returns | ------- | str | The name of the component. | | get_current_time(self) -> float | Current time of the model. | | Returns | ------- | float | [...]The first step in using a BMI is calling the `initialize` method.This method requires a configuration file that provides initial values for the `GeoTiff` library wrapped by the BMI.A sample configuration file is provided in the current directory.ls cat config.yamlbmi-geotiff: filename: RGB.byte.tifIn this case, the configuration file simply lists the path to a GeoTIFF file(here, the test file RGB.byte.tif from the [rasterio](https://rasterio.readthedocs.io/) project).Call `initialize` with the sample configuration file:m.initialize("config.yaml")The GeoTIFF file listed in the configuration file has now been opened,and the information is contains can be accessed through BMI methods. Access data through the BMI Now that we've opened the GeoTIFF file, let's access the data and metadata it contains through the BMI.This will take a few steps.It may seem cumbersome at first, but there's payoff at the end.Start by displying the names of the variables exposed through the BMI.m.get_output_var_names()The (long) names used for these variables are instances of [CSDMS Standard Names](https://csdms.colorado.edu/wiki/CSDMS_Standard_Names).Standard Names are intended to be unambiguous; the tradeoff is that they tend to be long.Here, the first variable is for the raster data stored in the file. Find the data type of the raster.dtype = m.get_var_type("gis__raster_data") dtypeWithin the BMI, functions that describe the grids that variables are defined on take an index instead of a variable name.Get the grid index for the raster data variable.grid = m.get_var_grid("gis__raster_data") gridThen find the total size of the raster data.size = m.get_grid_size(grid) sizeNext, get the raster data values.Two notes, however:* As a rule, memory should not be allocated within a BMI. This leads to the un-Pythonic way that we get the data--first creating an empty array, then passing it to a BMI function to receive values.* BMI arrays are flattened. This obviates array ordering issues between languages, but it does make >1D data harder to work with.Allocate an array for the raster data.raster = np.ndarray(size, dtype) rasterGet the data.m.get_value("gis__raster_data", raster)Note that the array is one-dimensional.raster.shapeReshape the data Like all BMI arrays, the raster values returned from the BMI `get_value` function are flattened.Let's restore their original dimensionality. First, determine the dimensionality of the raster variable.rank = m.get_grid_rank(grid) rankGet the dimensions of the raster data, first creating an array to store their values.shape = np.empty(rank, dtype=int) m.get_grid_shape(grid, shape)Reshape the raster data, creating a new array.rasterRGB = raster.reshape(shape) rasterRGB.shapeGet map projection information The data in the GeoTIFF file are georeferenced.The second and third variables exposed through the BMI,"gis__coordinate_reference_system" and "gis__gdal_geotransform", respectively,contain the proj.4 string and the [GDAL geotransform](https://gdal.org/user/raster_data_model.htmlaffine-geotransform) for the data.Get the geotransform through the BMI, following a process similar to what was used to obtain the raster data:* get variable type* get id of grid on which variable is defined* get size of grid* use the above to allocate an array for the transform* get the transformdtype = m.get_var_type("gis__gdal_geotransform") dtype grid = m.get_var_grid("gis__gdal_geotransform") grid size = m.get_grid_size(grid) size transform = np.ndarray(size, dtype) m.get_value("gis__gdal_geotransform", transform) transformVisualize Let's visualize the raster data as an image, with a little help from rasterio.show(rasterRGB, transform=list(transform))Conclusion Last, call the BMI `finalize` function.m.finalize()Fill in any place that says ` YOUR CODE HERE` or YOUR ANSWER HERE, as well as your name and collaborators below.Grading for pre-lecture assignments is all or nothing. Partial credit is available for in-class assignments and checkpoints, but **only when code is commented**.NAME = "" COLLABORATORS = ""---import grading_helper as _test # Space for imports, utility functions, etc. # YOUR CODE HEREEigenfrequenciesConsider a chain of four identical masses connected by five ideal springs. We will neglect the effects of gravity for simplicity. Your task is to make an animation that shows the chain vibrating horizontally at each of its four *eigenfrequencies*.Let $x_0$, $x_1$, $x_2$, and $x_3$ be the horizontal displacement of each mass from equilibrium, numbered from left to right. (For example, in the figure above, $x_0 = x_1 = x_2 = x_3 = 0$. Verify that the potential energy of the chain is$$U = \tfrac12 k x_0^2 + \tfrac12 k(x_1-x_0)^2 + \tfrac12 k(x_2-x_1)^2 + \tfrac12 k(x_3-x_2)^2 + \tfrac12 k x_3^2\,,$$where we have one term for each of the five springs.Using$$F_j=-\frac{\partial U}{\partial x_j} = m\ddot{x_j}\,,$$write down the four equations of motion that govern the four masses. To find the eigenfrequencies $\omega$ of the system, we exploit the fact that the solution to a simple harmonic oscillator takes the form $\ddot{x_i} = -\omega^2 x_i$.Put the equations you wrote above into the form$$\mathbf{Mx}=\frac{\omega^2}{\omega_0^2}\mathbf{x}\,,$$where $\mathbf{M}$ is a matrix with the coefficients from the equation of motion and $\omega_0=\sqrt{k/m}$ is the natural frequency of each spring. We'll choose units where $\omega_0 = 1$. We are thus left with an eigenvalue problem!$$\mathbf{Mx}=\omega^2\mathbf{x}\,$$where $\omega$ are the angular *eigenfrequencies* (be careful, they are the square root of the eigenvalues and they should all be real).But what is the significance of these eigenfrequencies? Recall that the time-dependent solution to a simple harmonic oscillator is the real part of$$x_j(t) = A_j e^{\mathrm{i}\omega t}\,,$$where $A_j$ is the complex amplitude. (The complex nature of $A_j$ allows for different phase—the magnitude of $A_j$ is what we normally call "amplitude".) In other words, when the chain is driven at an arbitrary frequency $\omega$, all four masses move with that same frequency, but different amplitudes and phases. In general, the values of these amplitudes and phases are not obvious.However, if the system is driven at an eigenfrequency, the resulting amplitudes are all real, and the amplitudes of the four masses are exactly the elements of the associated eigenvector.Make an animation that shows the chain moving at each of its four eigenfrequencies (this would be a good place to use `subplot`). Your animation doesn't need to loop smoothly, but should be long enough that the motions of the masses are clear. It should also be clear that the frequencies for the four cases are not all the same.> Not only does the result look cool, but *any* motion of these four masses can be described as a linear combination of the four motions from your animation. In effect, these are the four unique ways the masses can move.# To help you with the visualization, here is some example code that draws # a spring using matplotlib. Feel free to use/modify this code as you see fit. import numpy as np import matplotlib.pyplot as plt def draw_spring(left, right): """ Draws a horizontal spring. Inputs: left = x-coord of left side right = x-coord of right side """ dy = 0.5 x = np.linspace(left, right, 23) y = np.array([0]*4 + [dy, 0, -dy, 0]*4 + [0]*3) # zig-zag shape for spring spring = plt.Line2D(x, y, linestyle="-", color="gray", zorder=-1) ax = plt.gca() # ^ ax.add_line(spring) # draw behind everything else def draw_chain(x_offsets): """ Draw a chain of five springs and four masses. Input: x_offsets = list of displacements from equilibrium """ x = np.arange(2, 10, 2) + x_offsets # draw springs draw_spring(0, x[0],) draw_spring(x[0], x[1]) draw_spring(x[1], x[2]) draw_spring(x[2], x[3]) draw_spring(x[3], 10) # draw masses plt.plot(x, [0, 0, 0, 0], "ko", ms=8) # draw vertical bars at edges plt.vlines([0, 10], -1, 1, "k", lw=5) # example of an unstretched chain draw_chain([0, 0, 0, 0]) plt.axis("scaled") plt.axis("off") plt.show() %%graded # 20 points # YOUR CODE HEREA Journey Into Math For Machine Learning机器学习之数学之旅之 逻辑回归(三) 这是**逻辑回归**的第三部分, 今天的主题下面第六部分, 通过梯度下降法找到最佳的模型参数, 如果你没有看上一节课的内容, 请在github项目根目录的readme文件里寻找链接. **课程预览**: (一). 二项逻辑回归的直觉; (二). 逻辑回归的来历: $odds$和$probability$; (三). 逻辑回归的计算: $logit$函数和$sigmoid$函数及它们的特性; (四). 最大似然估计$(maximum \ likelihood \ estimation)$和损失函数; (五). 困惑度$(perplexity)$的定义; (六). 参数的获取: 梯度下降$(gradient \ descent)$法优化参数; **需要的组件**: numpy, scikit-learn(数据集), matplotlib(可视化), plotly(可视化) **(六). 参数的获取: 梯度下降 (𝑔𝑟𝑎𝑑𝑖𝑒𝑛𝑡 𝑑𝑒𝑠𝑐𝑒𝑛𝑡) 法优化参数** 1. 回顾我们在之前"逻辑回归(一,二)"中推导出的损失函数: $$J(\theta)=-\sum_{i}^{m} Y log(\hat{Y}) - (1-Y) log(1-\hat{Y})$$上式中$i$是数据点序号, 一共$m$个数据点.2. 回顾我们在之前"图解极大似然估计与3D可视化"中画出的$log \ likelihood$, 也就是我们现在损失函数的图像, 注意我们的损失函数是对原始的$log \ likelihood$取负号, 使极大似然值的最大化问题变成最小化问题. ![title](./mle_convex.png) 3. 我们看到上图, 损失函数是一个凸函数, 我们今天使用**梯度下降法**来寻找损失函数极小值, 或者说我们要求出, 当$\theta$取什么值时, 损失函数可以到达极小值. 4. 如果你不知道什么是梯度, 我来快速讲解一下, 看下图, 假设下图的函数是一个山坡, 我们现在站在函数表面某个点, 我们从这个点向上面看, 梯度指的是这个坡最陡峭的地方到底有多陡, 梯度下降指的就是沿着最陡的路径, 不断向下进行搜寻, 直到到达函数的最低点. ![title](./gradient.png) 5. 比如说我们现在有一组参数$\theta^T= \left[ \begin{matrix} bias & \theta_1 & \theta_2 \end{matrix} \right]$ 和损失函数$J(\theta)$, 我们想要知道这组参数$\theta$取怎样的值时, 可以让$J(\theta)$达到最小, 我们需要知道往哪个方向调节参数, 才可以让$J(\theta)$变小, 我们这时就要求出梯度来, 我们要知道在当前点, 函数往哪个方向变化最陡峭, 然后往相反的方向, 也就是下山的方向搜寻, 期望最终能够达到最低点.6. 要求出梯度, 我们要求$J(\theta)$对于$\theta_j$的偏导数$\frac{\partial J(\theta)}{\partial \theta_j}$, 偏导数的意义是, 它描述来函数在某一点的变化率, 说白了就是陡峭程度, 在这里可以这样理解, 我们稍微调节一点点$\theta_j$, 会对$J(\theta)$有多么大的影响, 而偏导数就是这个影响的比例, 或者说山越陡峭, 走一步而上升的海拔高度就越高, 而偏导数就是你走一步上升的海拔高度和步长的比例. **求导过程**: 我们有损失函数$J(\theta)=-\sum_{i}^{m} Y log(\hat{Y}) - (1-Y) log(1-\hat{Y})$, 需要求$J(\theta)$对于$\theta_i$的导数$\frac{\partial J(\theta)}{\partial \theta_i}$, 注意$\hat{Y}=\frac{1}{1+e^{-\theta^T X}}$利用$\frac{d}{dx} \log_a(f(x)) = \frac{1}{f(x)\ln a} f^\prime(x)$, 再将$\hat{Y}=\frac{1}{1+e^{-\theta^T X}}$代入$\log (\hat{Y})$:$$\frac{\partial}{\partial \theta_j} \log (\hat{Y}) = \frac{\partial}{\partial \theta_i}\log(\frac{1}{1+e^{-\theta^T x}}) =\frac{\partial}{\partial \theta_j}\log(1)- \log({1+e^{-\theta^T x}})$$$$\frac{\partial}{\partial \theta_j} \log (\hat{Y})=\frac{\partial}{\partial \theta_j} - \log({1+e^{-\theta^T x}})=-\frac{1}{1+e^{-\theta^T x}} \cdot e^{-\theta^T x} \cdot -x_j=(1-\frac{1}{1+e^{-\theta^T x}})x_j \tag{eq.1}$$ 我们再来求$\frac{\partial}{\partial \theta_j} \log (1-\hat{Y})$$$\frac{\partial}{\partial \theta_j} \log (1-\hat{Y})=\frac{\partial}{\partial \theta_j} \log(\frac{e^{-\theta^T x}}{1+e^{-\theta^T x}})=\frac{\partial}{\partial \theta_j} -\theta^T x- \log (1+e^{-\theta^T x})$$将$(eq.1)$代入上式求导:$$\frac{\partial}{\partial \theta_j} \log (1-\hat{Y})=-x_j+x_j(1-\frac{1}{1+e^{-\theta^T x}})= -\frac{1}{1+e^{-\theta^T x}}x_j \tag{eq.2}$$ 我们将$(eq.1)$和$(eq.2)$求得的$\frac{\partial}{\partial \theta_j} \log (\hat{Y}) , \quad\frac{\partial}{\partial \theta_j} \log (1-\hat{Y})$代入$\frac{\partial}{\partial \theta_j}J(\theta)$, 注意$i$是数据点的序号$j$是特征的数量:$$X= \left[ \begin{matrix} x_{i=1,j=1}, \ x_{i=2,j=1}, \ ... \ , x_{m,j=1}\\ x_{i=1,j=2}, \ x_{i=2,j=2}, \ ... \ , x_{m,j=2}\\ x_{i=1,j=3}, \ x_{i=2,j=3}, \ ... \ , x_{m,j=3}\\ \end{matrix} \right]$$$$\frac{\partial}{\partial \theta_j}J(\theta)=-\sum_{i}^{m}y_i x_{ij}(1-\frac{1}{1+e^{-\theta^T x_i}})-(1-y_i)x_{ij}\frac{1}{1+e^{-\theta^T x_i}}$$展开, 整理得到, 注意$\hat{y}=\frac{1}{1+e^{-\theta^T x}}$:$$\frac{\partial}{\partial \theta_j}J(\theta)=\sum_{i}^{m}(\frac{1}{1+e^{-\theta^T x_i}}-y_i)x_{ij}=\sum_{i}^{m}(\hat{y}_i-y_i)x_{ij}\tag{eq.3}$$ 我们还记得之前$\theta$和$X$的向量表示形式$\theta^T= \left[ \begin{matrix} bias & \theta_1 & \theta_2 \end{matrix} \right]\quadX= \left[ \begin{matrix} 1 \\ x_1 \\ x_2 \end{matrix} \right]$ 我们发现$\theta$里$bias$的对应着$X$里面的$1$, 由此可得出:$$\frac{\partial}{\partial \ bias}J(\theta)=\sum_{i}^{m}(\hat{y}_i-y_i)\tag{eq.4}$$ **搜寻下山过程** 我们现在已经得到了$\theta_j$和$bias$的梯度, 我们用这个梯度来更新我们的参数, 我们定义一个学习率$\eta$, 防止下山的时候跑的太快而跑过头, 一般学习率的取值都比较小, 在我们下面的实验中, 取学习率为$0.01$, 然后重复下面步骤, 直到收敛:$$\theta_j \gets \theta_j - \eta \frac{\partial}{\partial \theta_j}J(\theta)\tag{eq.5}$$$$bias \gets bias - \eta \frac{\partial}{\partial \ bias}J(\theta)\tag{eq.6}$$# 导入所需组件 import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.datasets import make_classification X, y = make_classification(random_state=2) # 只取两个特征值, 二维特征值方便可视化 X = X.T[:2, :] y = np.expand_dims(y, axis=0) print("X", X.shape) print("y", y.shape) # 形成网格, 我们之后用来画分类边界 interval = 0.2 x_min, x_max = X[0, :].min() - .5, X[0, :].max() + .5 y_min, y_max = X[1, :].min() - .5, X[1, :].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, interval), np.arange(y_min, y_max, interval)) # 首先画一下数据点 cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) plt.scatter(X[0, :], X[1, :], c=y.ravel(), cmap=cm_bright, edgecolors='k') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xlabel("theta_1") plt.ylabel("theta_2") plt.show() def sigmoid(z): return 1 / (1 + np.exp(-z)) # 初始化theta为全0 theta = np.zeros([2, 1]) # 初始化偏置为0 bias = np.zeros([1]) # 进行正向计算并求出损失 def forward(X, theta, bias): z = np.dot(theta.T, X) + bias y_hat = sigmoid(z) return y_hat def compute_loss(y, y_hat): e = 1e-8 return - y * np.log(y_hat + e) - (1 - y) * np.log(1 - y_hat + e) # 梯度下降, 参见(eq.3), (eq.4), (eq.5), (eq.6) def backward(X, y, y_hat, theta): m = X.shape[-1] # 求theta的梯度 delta_theta = np.dot(X, (y_hat-y).T) / m #(eq.3)(eq.5) # 求bias的梯度 delta_bias = np.mean(y_hat-y) #(eq.4)(eq.6) return delta_theta, delta_bias for i in range(1000): # 正向 y_hat = forward(X, theta, bias) # 计算损失 loss = np.mean(compute_loss(y, y_hat)) if i%100 == 0: print("step:",i,"loss:",loss) # 梯度下降 delta_theta, delta_bias = backward(X, y, y_hat, theta) # 更新参数 theta -= 0.1 * delta_theta bias -= 0.1 * delta_bias # 画等高线图 data = np.c_[xx.ravel(), yy.ravel()].T # 计算出区域内每一个点的模型预测值 Z = forward(data, theta, bias) Z = Z.reshape(xx.shape) # 定义画布大小 plt.figure(figsize=(10,8)) # 画等高线 plt.contourf(xx, yy, Z, 10, cmap=plt.cm.RdBu, alpha=.8) # 画轮廓 contour = plt.contour(xx, yy, Z, 10, colors="k", linewidths=.5) plt.scatter(X[0, :], X[1, :], c=y.ravel(), cmap=cm_bright, edgecolors='k') # 标出等高线对应的数值 plt.clabel(contour, inline=True, fontsize=10) plt.show()Prep B - Road Closure MergeIt is well known that conflict leads to road damage and closure - with various factions setting up road blocks, and munitions damaging road surfaces, sometimes in major ways. We received from the UN Logistics Cluster (via Dr. , World Bank) the latest road closure information for Yemen, as well as 4 file versions for previous time periods. However, this information is limited to only the largest grade roads in the area (main roads, paved) whereas our own network is derived from Open Street Map which has been purposefully improved over the region. As such, we want to spatially join the information from this road shapefile over to our own network. This is what this script accomplishes. We start by importing the usual suspects. If you have installed GOSTnets via conda, you will not need to append to the system path as we have done below.import pandas as pd import os, sys sys.path.append(r'C:\Users\charl\Documents\GitHub\GOST_PublicGoods\GOSTNets\GOSTNets') sys.path.append(r'C:\Users\charl\Documents\GitHub\GOST') import GOSTnet as gn import importlib import geopandas as gpd from shapely.geometry import Point import rasterio from rasterio import features from shapely.wkt import loads import numpy as np import networkx as nx import timepeartree version: 0.6.1 networkx version: 2.3 matplotlib version: 3.0.3 osmnx version: 0.9Load Road Network We set up net_graph as the pickled network object. We are using the Salted network with time attributes.bp = r'C:\Users\charl\Documents\GOST\Yemen' net_graph = nx.read_gpickle(os.path.join(bp, 'YEM', 'Round 3', 'G_salty_time.pickle'))Here we add a new attribute - ID - for each edge, and then generate a GeoDataFrame from the graph's edges using GOSTnetscounter = 1 for u, v, data in net_graph.edges(data = True): data['ID'] = counter counter+=1 net_graph_edge_gdf = gn.edge_gdf_from_graph(net_graph)Match on Conflict information to Graph In this section, we set up for a large loop, where we iteratively read in and spatially match on the information from each shapefile on to our main graph object.# Create a list of the files and their dates to iterate through settings = [ {'date':'November1st', 'f_name':'Access_20181101.shp'}, {'date':'November8th', 'f_name':'Access_20181108.shp'}, {'date':'November14th', 'f_name':r'Access_20181114.shp'}, {'date':'November25th', 'f_name':'Access_20181125.shp'}, {'date':'December17th', 'f_name':'Access_20181217.shp'}, {'date':'January24th', 'f_name':r'Access_20190124.shp'}]Below is the main loop. Pay attention to in-line commentary to see how it works.# set up a spatial index using the GeoPandas .sindex method smol = net_graph_edge_gdf spatial_index = smol.sindex # iterate through our settings list.... for setting in settings: # Extract from the setting dictionary the filename and the date it relates to f_name = setting['f_name'] date = setting['date'] # read in the UN logistics cluster shapefile for that date using f_name conflict = gpd.read_file(os.path.join(r'C:\Users\charl\Documents\GOST\Yemen\Conflicts\Road Networks (UN Logistics Cluster)',f_name)) # pick out only the roads where their status is either restricted or closed. The rest of the roads are in full working operation conflict = conflict.loc[conflict.status.isin(['Restricted','Closed'])] # Reproject the conflict-affected roads to UTM zone conflict = conflict.to_crs({'init':'epsg:32638'}) # Buffer these roads by 50 meters conflict['buffered'] = conflict.buffer(50) # reset the GeoDataFrame's geometry attribute to the buffered road geometry conflict = conflict.set_geometry('buffered') # reset the projection to WGS 84 conflict = conflict.to_crs({'init':'epsg:4326'}) # split out the restricted and the closed roads into their only GeoDataFrames conflict_restricted = conflict.copy() conflict_restricted = conflict_restricted.loc[conflict_restricted.status.isin(['Restricted'])] conflict_closed = conflict.copy() conflict_closed = conflict_closed.loc[conflict_closed.status.isin(['Closed'])] # now, we iterate through each closed road, and use the spatial index to quickly identify # intersecting graph edges with the buffered closed roads. This may capture a few more roads that strictly necessary, but # that is a price we are willing to pay, and probably reflects the 'on the ground' situation (remember no edge is longer than 2km anyway) close = [] for index, row in conflict_closed.iterrows(): polygon = row.buffered possible_matches_index = list(spatial_index.intersection((polygon.bounds))) possible_matches = smol.iloc[possible_matches_index] precise_matches = possible_matches[possible_matches.intersects(polygon)] i = list(precise_matches.ID) if len(i) !=0: close.append(i) close = [item for sublist in close for item in sublist] # we perform the same operation now on restricted roads restrict = [] for index, row in conflict_restricted.iterrows(): polygon = row.buffered possible_matches_index = list(spatial_index.intersection((polygon.bounds))) possible_matches = smol.iloc[possible_matches_index] precise_matches = possible_matches[possible_matches.intersects(polygon)] i = list(precise_matches.ID) if len(i) !=0: restrict.append(i) restrict = [item for sublist in restrict for item in sublist] # Closed roads are a 'higher order' problem that restricted roads. As such, should the buffer nature # of the process have assigned a road both to the closed AND restricted buckets, we remove it from the restricted bucket. # this shouldn't happen too often, but is entirely possible where a closed road meets a restricted road. for i in restrict: if i in close: restrict.remove(i) # now, we iterate through the edges, and check to see whether their IDs crop up in the list items 'close' and 'restrict' - # and if so, we adjust their time accordingly. for u, v, data in net_graph.edges(data = True): # if a road is on the restricted list, we double the travel time. if data['ID'] in restrict: data['time_%s' % date] = data['time'] * 2 data['MOD_%s' % date] = 'restricted' # if a road is closed, we make the travel time very, very large. elif data['ID'] in close: data['time_%s' % date] = 99999999 data['MOD_%s' % date] = 'closed' # otherwise, we pass else: data['time_%s' % date] = data['time'] data['MOD_%s' % date] = 'normal'Save Down We save the resulting graph down as 'conflict adjusted'. Note, we did not edit the 'time' property, but set a NEW property for the travel time as of the date of the conflict info - so the original time can still be accessed from the conflcit adjusted Graph object.gn.save(net_graph, 'G_salty_time_conflict_adj', os.path.join(bp, 'YEM', 'Round 3'), nodes = False, edges = False)Snap on At this point, we also snap on our facility file and origin files to the conflcit adjusted graph. Although we did not change any nodes, its always nice to be sure you are working with the latest and greatest file version :-)dfiles = ['HeRAMS 2018 April.csv'] dpath = r'C:\Users\charl\Documents\GOST\Yemen\facility_files' wpath = r'C:\Users\charl\Documents\GOST\Yemen\graphtool' for dfile in dfiles: # Read in dest_df = pd.read_csv(os.path.join(os.path.join(dpath, dfile)), encoding = "ISO-8859-1") # Ensure coordinates are floats dest_df.Longitude = dest_df.Longitude.astype(float) dest_df.Latitude = dest_df.Latitude.astype(float) # Drop entries with no coordinates dest_df2 = dest_df.copy() print(len(dest_df2)) dest_df2 = dest_df2.loc[(dest_df2.Longitude != 0)] dest_df2 = dest_df2.loc[(dest_df2.Longitude != None)] dest_df2 = dest_df2.loc[(dest_df2.Longitude <= 60)] dest_df2 = dest_df2.loc[(dest_df2.Longitude >= 35)] dest_df2 = dest_df2.loc[(dest_df2.Latitude <= 30)] dest_df2 = dest_df2.loc[(dest_df2.Latitude >= 5)] print(len(dest_df2)) dest_df = dest_df2 # Generate Geometries dest_df['geometry'] = list(zip(dest_df.Longitude, dest_df.Latitude)) dest_df['geometry'] = dest_df['geometry'].apply(Point) dest_df = gpd.GeoDataFrame(dest_df, geometry = 'geometry', crs = {'init':'espg:4326'}) # Perform snap time.ctime() start = time.time() df = gn.pandana_snap(net_graph, dest_df, 'epsg:4326','epsg:32638', add_dist_to_node_col = True) # Save to file df.to_csv(os.path.join(wpath, dfile.replace('.csv', '_snapped.csv'))) df.to_csv(os.path.join(dpath, dfile.replace('.csv', '_snapped.csv'))) print('time elapsed: %d seconds' % (time.time() - start)) ofile = r'origins_1km.csv' ofiles = [ofile] opath = r'C:\Users\charl\Documents\GOST\Yemen\origins' for ofile in ofiles: # Read in dest_df = pd.read_csv(os.path.join(os.path.join(opath, ofile)), encoding = "ISO-8859-1") dest_df['geometry'] = dest_df['geometry'].apply(loads) dest_df = gpd.GeoDataFrame(dest_df, geometry = 'geometry', crs = {'init':'espg:4326'}) # Perform snap print('Beginning snap') time.ctime() start = time.time() df = gn.pandana_snap(net_graph, dest_df, 'epsg:4326','epsg:32638', add_dist_to_node_col = True) # Save to file df.to_csv(os.path.join(wpath, ofile.replace('.csv', '_snapped.csv'))) df.to_csv(os.path.join(opath, ofile.replace('.csv', '_snapped.csv'))) print('Time elapsed: %d seconds' % (time.time() - start))Beginning snap Time elapsed: 166 seconds![AIcrowd-Logo](https://raw.githubusercontent.com/AIcrowd/AIcrowd/master/app/assets/images/misc/aicrowd-horizontal.png) This dataset and notebook correspond to the [Food Recognition Challenge](https://www.aicrowd.com/challenges/food-recognition-challenge) being held on [AICrowd](https://www.aicrowd.com/). In this Notebook, we will first do an analysis of the Food Recognition Dataset and then use maskrcnn for training on the dataset. The Challenge* Given Images of Food, we are asked to provide Instance Segmentation over the images for the food items.* The Training Data is provided in the COCO format, making it simpler to load with pre-available COCO data processors in popular libraries.* The test set provided in the public dataset is similar to Validation set, but with no annotations.* The test set after submission is much larger and contains private images upon which every submission is evaluated.* Pariticipants have to submit their trained model along with trained weights. Immediately after the submission the AICrowd Grader picks up the submitted model and produces inference on the private test set using Cloud GPUs.* This requires Users to structure their repositories and follow a provided paradigm for submission.* The AICrowd AutoGrader picks up the Dockerfile provided with the repository, builds it and then mounts the tests folder in the container. Once inference is made, the final results are checked with the ground truth.***For more submission related information, please check [the AIcrowd Challenge page](https://www.aicrowd.com/challenges/food-recognition-challenge) and [the starter kit](https://github.com/AIcrowd/food-recognition-challenge-starter-kit/).*** The Notebook> * Installation of MaskRCNN> * Using MatterPort MaskRCNN Library and Making local inference with it> * Local Evaluation Using Matterport MaskRCNN***A bonus section on other resources to read is also added!*** Dataset Download Note: By downloading this data you are argeeing to the competition rules specified [here](https://www.aicrowd.com/challenges/food-recognition-challenge/challenge_rules/150)from google.colab import drive drive.mount('/content/drive/') !cd /content/drive/MyDrive/R-CNN_img/Mask_RCNN # !wget "https://s3.eu-central-1.wasabisys.com/aicrowd-public-datasets/myfoodrepo/round-2/train.tar.gz" -P "/content/drive/MyDrive/R-CNN_img/Mask_RCNN" # !wget "https://s3.eu-central-1.wasabisys.com/aicrowd-public-datasets/myfoodrepo/round-2/val.tar.gz" -P "/content/drive/MyDrive/R-CNN_img/Mask_RCNN" # !wget -q https://s3.eu-central-1.wasabisys.com/aicrowd-public-datasets/myfoodrepo/round-2/train.tar.gz # !wget -q https://s3.eu-central-1.wasabisys.com/aicrowd-public-datasets/myfoodrepo/round-2/val.tar.gz import os # os.makedirs('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data') # os.makedirs('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/val') # os.makedirs('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/train') # !mkdir data # !mkdir data/val # !mkdir data/train # !tar -xf train.tar.gz -C data/train # !tar -xf val.tar.gz -C data/val # !tar -xf /content/drive/MyDrive/R-CNN_img/Mask_RCNN/train.tar.gz -C /content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/train # !tar -xf /content/drive/MyDrive/R-CNN_img/Mask_RCNN/val.tar.gz -C /content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/valInstallation#Directories present import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/'): print(dirname) #Directories present import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk('data/'): print(dirname) import warnings warnings.filterwarnings("ignore") pip install -q -U numpy==1.16.1 import os import sys import random import math import numpy as np import cv2 import matplotlib.pyplot as plt import json from imgaug import augmenters as iaa from tqdm import tqdm import pandas as pd import glob !pip install -q tensorflow-gpu !pip uninstall keras-nightly !pip uninstall -y tensorflow !pip install h5py==2.10.0 !pip install tensorflow==1.15 !pip install tensorflow-gpu==1.14.0 !pip install keras==2.1.6 import tensorflow as tf tf.__version__ DATA_DIR = 'data' # Directory to save logs and trained model ROOT_DIR = 'working' # https://github.com/matterport/Mask_RCNN !cd /content/drive/MyDrive/R-CNN_img/Mask_RCNN ls !git clone https://www.github.com/matterport/Mask_RCNN.git os.chdir('Mask_RCNN') !pip install -q -r requirements.txt !python setup.py -q install # Import Mask RCNN sys.path.append(os.path.join('.', 'Mask_RCNN')) # To find local version of the library from mrcnn.config import Config from mrcnn import utils import mrcnn.model as modellib from mrcnn import visualize from mrcnn.model import log !pip uninstall pycocotools -y # !pip install -q git+https://github.com/waleedka/coco.git#subdirectory=PythonAPI !pip install pycocotools !pip uninstall pycocotools -y !pip install --no-cache-dir pycocotools from mrcnn import utils import numpy as np from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from pycocotools import mask as maskUtilsMaskRCNN To train MaskRCNN, two things we have to define `FoodChallengeDataset` that implements the `Dataset` class of MaskRCNN and `FoodChallengeConfig` that implements the `Config` class.The `FoodChallengeDataset` helps define certain functions that allow us to load the data. The `FoodChallengeConfig` gives the information like `NUM_CLASSES`, `BACKBONE`, etc.class FoodChallengeDataset(utils.Dataset): def load_dataset(self, dataset_dir, load_small=False, return_coco=True): """ Loads dataset released for the AICrowd Food Challenge Params: - dataset_dir : root directory of the dataset (can point to the train/val folder) - load_small : Boolean value which signals if the annotations for all the images need to be loaded into the memory, or if only a small subset of the same should be loaded into memory """ self.load_small = load_small if self.load_small: annotation_path = os.path.join(dataset_dir, "annotation-small.json") else: annotation_path = os.path.join(dataset_dir, "annotations.json") image_dir = os.path.join(dataset_dir, "images") print("Annotation Path ", annotation_path) print("Image Dir ", image_dir) assert os.path.exists(annotation_path) and os.path.exists(image_dir) self.coco = COCO(annotation_path) self.image_dir = image_dir # Load all classes (Only Building in this version) classIds = self.coco.getCatIds() # Load all images image_ids = list(self.coco.imgs.keys()) # register classes for _class_id in classIds: self.add_class("crowdai-food-challenge", _class_id, self.coco.loadCats(_class_id)[0]["name"]) # Register Images for _img_id in image_ids: assert(os.path.exists(os.path.join(image_dir, self.coco.imgs[_img_id]['file_name']))) self.add_image( "crowdai-food-challenge", image_id=_img_id, path=os.path.join(image_dir, self.coco.imgs[_img_id]['file_name']), width=self.coco.imgs[_img_id]["width"], height=self.coco.imgs[_img_id]["height"], annotations=self.coco.loadAnns(self.coco.getAnnIds( imgIds=[_img_id], catIds=classIds, iscrowd=None))) if return_coco: return self.coco def load_mask(self, image_id): """ Loads instance mask for a given image This function converts mask from the coco format to a a bitmap [height, width, instance] Params: - image_id : reference id for a given image Returns: masks : A bool array of shape [height, width, instances] with one mask per instance class_ids : a 1D array of classIds of the corresponding instance masks (In this version of the challenge it will be of shape [instances] and always be filled with the class-id of the "Building" class.) """ image_info = self.image_info[image_id] assert image_info["source"] == "crowdai-food-challenge" instance_masks = [] class_ids = [] annotations = self.image_info[image_id]["annotations"] # Build mask of shape [height, width, instance_count] and list # of class IDs that correspond to each channel of the mask. for annotation in annotations: class_id = self.map_source_class_id( "crowdai-food-challenge.{}".format(annotation['category_id'])) if class_id: m = self.annToMask(annotation, image_info["height"], image_info["width"]) # Some objects are so small that they're less than 1 pixel area # and end up rounded out. Skip those objects. if m.max() < 1: continue # Ignore the notion of "is_crowd" as specified in the coco format # as we donot have the said annotation in the current version of the dataset instance_masks.append(m) class_ids.append(class_id) # Pack instance masks into an array if class_ids: mask = np.stack(instance_masks, axis=2) class_ids = np.array(class_ids, dtype=np.int32) return mask, class_ids else: # Call super class to return an empty mask return super(FoodChallengeDataset, self).load_mask(image_id) def image_reference(self, image_id): """Return a reference for a particular image Ideally you this function is supposed to return a URL but in this case, we will simply return the image_id """ return "crowdai-food-challenge::{}".format(image_id) # The following two functions are from pycocotools with a few changes. def annToRLE(self, ann, height, width): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ segm = ann['segmentation'] if isinstance(segm, list): # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, height, width) rle = maskUtils.merge(rles) elif isinstance(segm['counts'], list): # uncompressed RLE rle = maskUtils.frPyObjects(segm, height, width) else: # rle rle = ann['segmentation'] return rle def annToMask(self, ann, height, width): """ Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask. :return: binary mask (numpy 2D array) """ rle = self.annToRLE(ann, height, width) m = maskUtils.decode(rle) return m class FoodChallengeConfig(Config): """Configuration for training on data in MS COCO format. Derives from the base Config class and overrides values specific to the COCO dataset. """ # Give the configuration a recognizable name NAME = "crowdai-food-challenge" # We use a GPU with 12GB memory, which can fit two images. # Adjust down if you use a smaller GPU. IMAGES_PER_GPU = 4 # Uncomment to train on 8 GPUs (default is 1) GPU_COUNT = 1 BACKBONE = 'resnet50' # Number of classes (including background) NUM_CLASSES = 62 # 1 Background + 61 classes STEPS_PER_EPOCH=150 VALIDATION_STEPS=50 LEARNING_RATE=0.001 IMAGE_MAX_DIM=256 IMAGE_MIN_DIM=256 config = FoodChallengeConfig() config.display()Configurations: BACKBONE resnet50 BACKBONE_STRIDES [4, 8, 16, 32, 64] BATCH_SIZE 4 BBOX_STD_DEV [0.1 0.1 0.2 0.2] COMPUTE_BACKBONE_SHAPE None DETECTION_MAX_INSTANCES 100 DETECTION_MIN_CONFIDENCE 0.7 DETECTION_NMS_THRESHOLD 0.3 FPN_CLASSIF_FC_LAYERS_SIZE 1024 GPU_COUNT 1 GRADIENT_CLIP_NORM 5.0 IMAGES_PER_GPU 4 IMAGE_CHANNEL_COUNT 3 IMAGE_MAX_DIM 256 IMAGE_META_SIZE 74 IMAGE_MIN_DIM 256 IMAGE_MIN_SCALE 0 IMAGE_RESIZE_MODE square IMAGE_SHAPE [256 256 3] LEARNING_MOMENTUM 0.9 LEARNING_RATE 0.001 LOSS_WEIGHTS {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0} MASK_POOL_SIZE 14 MASK_SHAPE [...]You can change other values in the `FoodChallengeConfig` as well and try out different combinations for best results!!mkdir pretrained #os.makedirs('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/Mask_RCNN/pretrained') PRETRAINED_MODEL_PATH = os.path.join("/content/drive/MyDrive/R-CNN_img/Mask_RCNN/Mask_RCNN/pretrained", "mask_rcnn_coco.h5") LOGS_DIRECTORY = os.path.join(ROOT_DIR, "logs") if not os.path.exists(PRETRAINED_MODEL_PATH): utils.download_trained_weights(PRETRAINED_MODEL_PATH) from keras import backend as K K.tensorflow_backend._get_available_gpus() import keras.backend K = keras.backend.backend() if K=='tensorflow': keras.backend.common.image_dim_ordering() model = modellib.MaskRCNN(mode="training", config=config, model_dir=LOGS_DIRECTORY) model_path = PRETRAINED_MODEL_PATH model.load_weights(model_path, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]) dataset_train = FoodChallengeDataset() dataset_train.load_dataset('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/train', load_small=False) dataset_train.prepare()Annotation Path /content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/train/annotations.json Image Dir /content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/train/images loading annotations into memory... Done (t=3.37s) creating index... index created!!ls !cd .. os.chdir('/content/drive/MyDrive/R-CNN_img/Mask_RCNN') !ls len(os.listdir('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/val')) dataset_val = FoodChallengeDataset() # val_coco = dataset_val.load_dataset(dataset_dir='/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/val', load_small=False, return_coco=True) val_coco = dataset_val.load_dataset(dataset_dir='/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/val', load_small=False, return_coco=True) dataset_val.prepare() class_names = dataset_train.class_names # If you don't have the correct classes here, there must be some error in your DatasetConfig assert len(class_names)==62, "Please check DatasetConfig" class_namesLets start training!!print("Training network") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=15, layers='heads') model_path = model.find_last() model_path class InferenceConfig(FoodChallengeConfig): GPU_COUNT = 1 IMAGES_PER_GPU = 1 NUM_CLASSES = 62 # 1 Background + 61 classes IMAGE_MAX_DIM=256 IMAGE_MIN_DIM=256 NAME = "food" DETECTION_MIN_CONFIDENCE=0 inference_config = InferenceConfig() inference_config.display() # Recreate the model in inference mode model = modellib.MaskRCNN(mode='inference', config=inference_config, model_dir=ROOT_DIR) # Load trained weights (fill in path to trained weights here) assert model_path != "", "Provide path to trained weights" print("Loading weights from ", model_path) model.load_weights(model_path, by_name=True) # Show few example of ground truth vs. predictions on the validation dataset dataset = dataset_val fig = plt.figure(figsize=(10, 30)) for i in range(4): image_id = random.choice(dataset.image_ids) original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\ modellib.load_image_gt(dataset_val, inference_config, image_id, use_mini_mask=False) print(original_image.shape) plt.subplot(6, 2, 2*i + 1) visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id, dataset.class_names, ax=fig.axes[-1]) plt.subplot(6, 2, 2*i + 2) results = model.detect([original_image]) #, verbose=1) r = results[0] visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'], dataset.class_names, r['scores'], ax=fig.axes[-1]) import json with open('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/val/annotations.json') as json_file: data = json.load(json_file) d = {} for x in data["categories"]: d[x["name"]]=x["id"] id_category = [0] for x in dataset.class_names[1:]: id_category.append(d[x]) #id_category import tqdm import skimage files = glob.glob(os.path.join('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/val/test_images/images', "*.jpg")) _final_object = [] for file in tqdm.tqdm(files): images = [skimage.io.imread(file) ] #if(len(images)!= inference_config.IMAGES_PER_GPU): # images = images + [images[-1]]*(inference_config.BATCH_SIZE - len(images)) predictions = model.detect(images, verbose=0) #print(file) for _idx, r in enumerate(predictions): image_id = int(file.split("/")[-1].replace(".jpg","")) for _idx, class_id in enumerate(r["class_ids"]): if class_id > 0: mask = r["masks"].astype(np.uint8)[:, :, _idx] bbox = np.around(r["rois"][_idx], 1) bbox = [float(x) for x in bbox] _result = {} _result["image_id"] = image_id _result["category_id"] = id_category[class_id] _result["score"] = float(r["scores"][_idx]) _mask = maskUtils.encode(np.asfortranarray(mask)) _mask["counts"] = _mask["counts"].decode("UTF-8") _result["segmentation"] = _mask _result["bbox"] = [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]] _final_object.append(_result) fp = open('/content/drive/MyDrive/R-CNN_img/Mask_RCNN/output.json', "w") import json print("Writing JSON...") fp.write(json.dumps(_final_object)) fp.close() submission_file = json.loads(open("/content/drive/MyDrive/R-CNN_img/Mask_RCNN/output.json").read()) len(submission_file) type(submission_file) import random import json import numpy as np import argparse import base64 import glob import os from PIL import Image from pycocotools.coco import COCO GROUND_TRUTH_ANNOTATION_PATH = "/content/drive/MyDrive/R-CNN_img/Mask_RCNN/data/val/annotations.json" ground_truth_annotations = COCO(GROUND_TRUTH_ANNOTATION_PATH) submission_file = json.loads(open("/content/drive/MyDrive/R-CNN_img/Mask_RCNN/output.json").read()) results = ground_truth_annotations.loadRes(submission_file) cocoEval = COCOeval(ground_truth_annotations, results, 'segm') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize()loading annotations into memory... Done (t=0.03s) creating index... index created! Loading and preparing results... DONE (t=0.00s) creating index... index created! Running per image evaluation... Evaluate annotation type *segm* DONE (t=0.57s). Accumulating evaluation results... DONE (t=0.36s). Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.077 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.115 Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.083 Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.039 Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.078 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.109 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.112 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets[...]**BONUS :** Resources to Read* [An Introduction to Image Segmentation](https://www.analyticsvidhya.com/blog/2019/04/introduction-image-segmentation-techniques-python/)* [Blog introducing Mask RCNN in COCO dataset](https://www.analyticsvidhya.com/blog/2019/07/computer-vision-implementing-mask-r-cnn-image-segmentation/)* [A good blog by matterport on Mask RCNN and it's implementation](https://engineering.matterport.com/splash-of-color-instance-segmentation-with-mask-r-cnn-and-tensorflow-7c761e238b46)* [Using mmdetection library in Pytorch](https://github.com/open-mmlab/mmdetection/blob/master/docs/GETTING_STARTED.md)我們使用最基礎的 Gradient Descent 方法來找到y=f(x)=Wx+b 當中的 W 和 b , 用來預測 PokenMon 升級後的 CP 值* 主要的學習教材為台大電機系李弘毅教授的教材 : http://speech.ee.ntu.edu.tw/~tlkagk/courses/ML_2017/Lecture/Linear%20Regression.mp4* 這問題可以視為 Regression 問題 * Gradient Descent 的推導會使用到基礎的微分觀念,不了解的話可以參考 : http://www.amath.nchu.edu.tw/~tdoc/4_2.htm* 實驗資料可以從這裡下載 : https://www.openintro.org/stat/data/?data=pokemon 直觀推導方式如下:* $\hat y$ 為 Traing Data 當的答案,也就是範例中的 $newcp $* $x$ 為 $cp$ 值,而我們要找到一個 $f(x)$ 能準確預測 $newcp$,故令 $y=f(x)=w*x+b $* 先定義 Lose Function : $L= (\hat y-f(x))^2 => L(w,b) = (\hat y - (b+w*x))^2 $ * 我們想要將 Lose 值降到最低,直觀的想法是找出 w,b 對於 Lose Function 的變化影響,如果值變化是往變大的方向我們就減上變化量,更數學的說就是看 w,b 的微小的變化對於 Lose 值的變化方向,然後進行反方向的更新。* 使用泰勒展開式來進行 Gradient Descent 的推導 : http://speech.ee.ntu.edu.tw/~tlkagk/courses/ML_2017/Lecture/Gradient%20Descent.pdf* 基於上述直觀的想法,我們對 Lose Function 分別對 w,b 作偏微分,再將此偏微分的結果用來減掉原來的 w,b 最終推導的結果如下圖 我們先取前 50 筆資料,作為實驗素材import numpy as np import pandas as pd pd = pd.read_csv("./pokemon.csv") newcp = np.array(pd['cp_new']) cp = np.array(pd['cp']) cp = cp[:50] newcp = newcp[:50] import matplotlib.pyplot as plt # plt.plot(newcp,cp) plt.scatter(cp, newcp) # plt.ylabel('some numbers') plt.show()實作 Gradient Descent 的公式* 這裡有作一點變形,使用的方法為 Stochastic Gradient Descent* 用比較白話的說就是,並不是拿所有的 Training Data 的 Lose Value 和來算 Gradient ,而是針對每一個 X 就算一次並進行 Gradient Descent 的 update* 除了使用 Stochastic Gradient Descent 方法之中,還使用了 Regularization 的技巧,避免 w 值過大,造成 f(x) 不夠平滑。w = 1 b = 1 n = len(cp) r= 0.000001 for i in range(100000): dw = 0 db = 0 error = 0 for x , y in zip(cp,newcp): # 加上 2*w 是考量到 Regularization dw = -2*x*(y- (w*x+b)) + 2*w db = -2*(y- (w*x+b)) # print dw,db y_head = w*x + b w = w - dw*r b = b - db*r error +=(y_head-y)*(y_head-y) if i % 10000 ==0 : print("w={:0.3f}, b={:0.3f}, error={:0.3f} ".format(w,b,error/n)) y_head = [] for x in cp : predict = w*x +b y_head.append(predict) plt.scatter(y_head, newcp) plt.show()Load the datatrain = pd.read_csv('./train.csv') train.head()Build a quick baselinefrom sklearn.ensemble import RandomForestClassifier # Create a copy to work with X = train.copy() # Save and drop labels y = train.y X = X.drop('y', axis=1) # fill NANs X = X.fillna(-999) # Label encoder for c in train.columns[train.dtypes == 'object']: X[c] = X[c].factorize()[0] rf = RandomForestClassifier() rf.fit(X,y) plt.plot(rf.feature_importances_) plt.xticks(np.arange(X.shape[1]), X.columns.tolist(), rotation=90);There is something interesting about `x8`.# we see it was standard scaled, most likely, if we concat train and test, we will get exact mean=1, and std 1 print 'Mean:', train.x8.mean() print 'std:', train.x8.std() # And we see that it has a lot of repeated values train.x8.value_counts().head(15) # It's very hard to work with scaled feature, so let's try to scale them back # Let's first take a look at difference between neighbouring values in x8 x8_unique = train.x8.unique() x8_unique_sorted = np.sort(x8_unique) np.diff(x8_unique_sorted) # The most of the diffs are 0.04332159! # The data is scaled, so we don't know what was the diff value for the original feature # But let's assume it was 1.0 # Let's devide all the numbers by 0.04332159 to get the right scaling # note, that feature will still have zero mean np.diff(x8_unique_sorted/0.04332159) (train.x8/0.04332159).head(10) # Ok, now we see .102468 in every value # this looks like a part of a mean that was subtracted during standard scaling # If we subtract it, the values become almost integers (train.x8/0.04332159 - .102468).head(10) # let's round them x8_int = (train.x8/0.04332159 - .102468).round() x8_int.head(10) # Ok, what's next? In fact it is not obvious how to find shift parameter, # and how to understand what the data this feature actually store # But ... x8_int.value_counts() # do you see this -1968? Doesn't it look like a year? ... So my hypothesis is that this feature is a year of birth! # Maybe it was a textbox where users enter their year of birth, and someone entered 0000 instead # The hypothesis looks plausible, isn't it? (x8_int + 1968.0).value_counts().sort_index() # After the competition ended the organisers told it was really a year of birthモーター制御のシミュレーション 目的賢いモータードライバICを使えば指示だけ出せば、電流などを計測しつつ適切な制御を簡単確実に行ってくれる。ここでわざわざ単純なスイッチのみのドライバICを使ってFPGA側でモーター制御を試みようというのは、ドライバICの知りえない電流値以外の要素を制御に利用したらどうなるか? という興味本位の実験に他ならない。拙作のLUT-Netは超高速なニューラルネットであり、他センサーの情報を使って通常ドライバICにお任せしてしまう高周波数領域に適用できる可能性がある。もし電流計測することなく電流を見ているドライバと同等の制御ができたりすると面白い。ダイレクトドライブをする限り、モーターコイルの電流値はそのまま物理現象として現れるはずなので例えば、加速度センサやイメージセンサなどの情報を用いてシンプルな制御で脱調の検知や、マイクロステップ部分の制御など、電流計測が必要な制御が代替できないかという話である。と、いうことで基本的な部分を作り始めてみたのですが、これが意外に難しい。専用ドライバICはすごくよくできている。まずコンセプトの実験ができる程度の簡素なものでいいので頑張りたい。 やりたい制御まずは一定周期毎にPWM幅を変えてモーターの制御状態を変える。その際に、目標位置を指定した場合に指定加速度をなるべく超えないように目的位置に移動させるという事がしたい。その際$$x = vt + \frac{1}{2}at^2$$という連続的な物理モデルではなく$$\begin{eqnarray*}x_t &=& x_{t-1} + v_{t-1} \\v_t &=& v_{t-1} + a_{t-1}\end{eqnarray*}$$という漸化式を元に、加減速の制御変換点を求める 計算移動開始時の最大速度を $v$、加速度を $a$ とする。停止まで $n$ ステップを要するとして、移動する長さ $l$ は、等差数列の和であるから$$l = \frac{n(n+1)a}{2}$$ここで $n$ は$$n = \frac{v}{a}$$であるから、停止するための条件が$$\begin{eqnarray}l = \frac{\frac{v}{a}(\frac{v}{a}+1)a}{2} \\2l = \frac{v^2}{a} + v \\v^2 + av - 2al = 0 \\v = \frac{-a \pm \sqrt{a^2 + 8ab}}{2}\end{eqnarray}$$として得られる 実装計算過程で平方根などがあり量子化誤差が吸収できないので、近傍に移動したら最大加速要件を緩和して止める(インチキ)import math import matplotlib.pyplot as plt def calc_len_sim(v, a): '''繰り返しシミュレーションで停止までの移動量(制動距離)を求める Args: v(int): 速度 a(int): 加速度 Returns: int: 移動距離 ''' l = 0 while v > 0: l += v v -= a return l def calc_limit_v_sim(l, a): '''繰り返しシミュレーションで停止可能な最大速度を求める Args: l(int): 距離 a(int): 加速度 Returns: int: 最大速度 ''' v = 0 while calc_len_sim(v, a) < l: v += 1 return v def calc_limit_v_physics(l, a): '''物理学で停止可能な最大速度を求める Args: l(int): 距離 a(int): 加速度 Returns: int: 最大速度 ''' v = int(math.sqrt(2 * a * l)) return v def calc_limit_v_recurrence(l, a): '''漸化式で停止可能な最大速度を求める Args: l(int): 距離 a(int): 加速度 Returns: int: 最大速度 ''' v = int((-a + int(math.sqrt(a*(a + 8*l))))//2) return v def calc_limit_v(l, a): '''デフォルトで漸化式を採用''' return calc_limit_v_recurrence(l, a) # モデルの差や量子化誤差を俯瞰してみる a = 20 list_sim = [] list_phy = [] list_rec = [] for i in range(200): list_sim.append(calc_limit_v_sim(i, a)) list_phy.append(calc_limit_v_physics(i, a)) list_rec.append(calc_limit_v_recurrence(i, a)) plt.figure(figsize=(8, 6)) plt.plot(list_sim, label='simulation') plt.plot(list_phy, label='physics') plt.plot(list_rec, label='recurrence') plt.xlabel('length') plt.ylabel('v limit') plt.legend() plt.savefig("model_difference.png") class motor_model: '''簡単なモデルを作成''' def __init__(self): self.target_en = False # 目標位置は存在するか self.target_x = 0 # 目標位置 self.cur_x = 0 # 現在の位置 self.cur_v = 0 # 現在の速度 self.cur_a = 0 # 現在の加速度 self.max_v = 20 # 最大速度 self.max_a = 3 # 最大加速度 self.max_a_near = 4 # 近距離での最大加速度(誤差吸収用) self.log_x = [] # 位置のログ self.log_v = [] # 速度のログ self.log_a = [] # 加速度のログ def clear_log(self): self.log_pos = [] self.log_speed = [] self.log_acc = [] def step(self): max_a = self.max_a if self.target_en: # 目標位置が有効なら加速制御 d = self.target_x - self.cur_x lim_v = calc_limit_v(d, self.max_a) self.cur_a = lim_v - self.cur_v print('d:%d lim_v:%d a:%d' %(d, lim_v, self.cur_a)) # 近傍なら閾値を変えて丸める(インチキ) if d <= self.max_a and d >= -self.max_a: self.cur_a = d - self.cur_v max_a = self.max_a_near # 上限でクリップしてステップを進める self.cur_a = min(max(self.cur_a, -max_a), +max_a) self.cur_v += self.cur_a self.cur_v = min(max(self.cur_v, -self.max_v), +self.max_v) self.cur_x += self.cur_v self.log_x.append(self.cur_x) self.log_v.append(self.cur_v) self.log_a.append(self.cur_a) return self.cur_x def set_target_x(self, target_x, target_en=True): self.target_x = target_x self.target_en = target_en def set_max_a(self, max_a, max_a_near=-1): self.max_a = max_a if max_a_near < 0: max_a_near = max_a + ((max_a + 1) // 2) # 暫定で1.5倍 self.max_a_near = max_a_near def set_max_v(self, max_v): self.max_v = max_v def set_cur_v(self, a): self.cur_a = a def set_cur_a(self, a): self.cur_a = a def get_cur_x(self): return self.cur_x def get_cur_v(self): return self.cur_v def get_cur_a(self): return self.cur_a def get_log_x(self): return self.log_x def get_log_v(self): return self.log_v def get_log_a(self): return self.log_a def print_status(motor): print('x:%d v:%d a:%d' % (motor.get_cur_x(), motor.get_cur_v(), motor.get_cur_a())) # 適当に移動してみる motor = motor_model() motor.set_max_v(20*65536) motor.set_max_a(1*65536) print('max_v :', motor.max_v) print('max_a :', motor.max_a) pos_list =[] spd_list =[] for i in range(5): pos_list.append(motor.step()) print_status(motor) motor.set_target_x(1000*65536) t = 1 for i in range(5): print('[%d]' % t) pos_list.append(motor.step()) t+=1 print_status(motor) while motor.get_cur_v() != 0 or motor.get_cur_a() != 0: print('[%d]' % t) pos_list.append(motor.step()) t+=1 print_status(motor) for i in range(5): pos_list.append(motor.step()) plt.figure(figsize=(6, 8)) plt.subplot(211) plt.title("position") plt.plot(motor.get_log_x()) plt.subplot(212) plt.title("speed") plt.plot(motor.get_log_v()) plt.tight_layout() plt.savefig('graph0.png') plt.show() # 最大速度に達する前に減速しないといけないパターン motor = motor_model() motor.set_max_v(20) motor.set_max_a(1) pos_list =[] spd_list =[] for i in range(5): pos_list.append(motor.step()) motor.set_target_x(100) for i in range(5): pos_list.append(motor.step()) while motor.get_cur_v() != 0 or motor.get_cur_a() != 0: pos_list.append(motor.step()) for i in range(5): pos_list.append(motor.step()) plt.figure(figsize=(6, 8)) plt.subplot(211) plt.title("position") plt.plot(motor.get_log_x()) plt.subplot(212) plt.title("speed") plt.plot(motor.get_log_v()) plt.tight_layout() plt.savefig('graph1.png') plt.show() calc_limit_v 100 1 13 13 calc_limit_v 99 1 13 13 calc_limit_v 97 1 13 13 calc_limit_v 94 1 13 13 calc_limit_v 90 1 12 12 calc_limit_v 85 1 12 12 calc_limit_v 79 1 12 12 calc_limit_v 72 1 11 11 calc_limit_v 64 1 10 10 calc_limit_v 55 1 10 10 calc_limit_v 45 1 9 9 calc_limit_v 36 1 8 8 <[...]Reading the dataset and preprocessing it.ds = pd.read_csv('car data.csv') ds.head() ds.isnull().sum() ds = ds.drop('Car_Name', axis = 1) ds.head() print(ds['Fuel_Type'].unique()) print(ds['Seller_Type'].unique()) print(ds['Transmission'].unique()) ds['current'] = 2020 ds.head() ds['Nof_Years'] = ds['current'] - ds['Year'] ds.head() ds = ds.drop(['current','Year'], axis = 1) ds.head() ds = pd.get_dummies(ds, drop_first = True) ds.head()Visualizing the data.plt.scatter(ds['Selling_Price'] , ds['Nof_Years']) plt.plot(3 ,6) plt.scatter(ds['Kms_Driven'] , ds['Selling_Price']) plt.xlabel('km driven') plt.ylabel('selling price') plt.legend() sns.distplot(ds) sns.pairplot(ds) plt.figure(figsize=(20,20)) sns.heatmap(ds.corr(), annot = True, cmap = 'RdYlGn')Performing feature selection.x = ds.iloc[:,1:] y = ds.iloc[:,0] x.head() model = ExtraTreesRegressor() model.fit(x,y) imp = pd.Series(model.feature_importances_,index = x.columns) imp.nlargest(5).plot(kind = 'barh') x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2)Hyperparameter tuning.rf = RandomForestRegressor() estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num =12)] maxf = ['auto','sqrt'] dept = [int(x) for x in np.linspace(5, 30, num = 6)] mins = [2, 5, 10, 15, 100] minl = [1, 2, 5, 10] param = {'n_estimators': estimators, 'max_features': maxf, 'max_depth': dept, 'min_samples_split': mins, 'min_samples_split' :minl } randcv = RandomizedSearchCV(estimator = rf, param_distributions = param, scoring = 'neg_mean_squared_error', n_iter = 10, cv = 5, verbose = 2, random_state = 42, n_jobs = 1) randcv.fit(x_train, y_train) y_hat = randcv.predict(x_test)Visualizing the results & saving the model.sns.distplot(y_test-y_hat) plt.scatter(y_test,y_hat) pik = open('car_r.p','wb') pickle.dump(randcv,pik) pik.close()Library ▸ Mathematics ▸ Geometry ▸ 2D ▸ Objects ▸ Polygon Setupimport Library.Mathematics as Mathematics Point = Mathematics.Geometry.D2.Objects.Point PointSet = Mathematics.Geometry.D2.Objects.PointSet Polygon = Mathematics.Geometry.D2.Objects.Polygon Transformation = Mathematics.Geometry.D2.Transformation--- Polygon **Constructors**polygon = Polygon([Point(0.0, 0.0), Point(1.0, 0.0), Point(1.0, 1.0), Point(0.0, 1.0)]) polygon = Polygon([Point(0.0, 0.0), Point(1.0, 0.0), Point(1.0, 1.0), Point(0.0, 1.0)], [[Point(0.5, 0.5), Point(0.5, 0.6), Point(0.6, 0.5)]]) Polygon.Undefined() ;**Operators**polygon == polygon ; polygon != polygon ;**Methods**polygon.isDefined() ; polygon.intersectsPolygon(polygon) ; polygon.containsPoint(Point(0.0, 0.0)) ; polygon.containsPointSet(PointSet([Point(0.0, 0.0)])) ; polygon.getInnerRingCount() ; polygon.getEdgeCount() ; polygon.getVertexCount() ; polygon.getOuterRing() ; polygon.getInnerRingAt(0) ; polygon.getEdgeAt(0) ; polygon.getVertexAt(0) ; # polygon.getEdges() ; polygon.getVertices() ; polygon.toString() ; polygon.applyTransformation(Transformation.Identity()) ;You are given a license key represented as a string S which consists only alphanumeric character and dashes. The string is separated into N+1 groups by N dashes.Given a number K, we would want to reformat the strings such that each group contains exactly K characters, except for the first group which could be shorter than K, but still must contain at least one character. Furthermore, there must be a dash inserted between two groups and all lowercase letters should be converted to uppercase.Given a non-empty string S and a number K, format the string according to the rules described above.Example 1:Input: S = "5F3Z-2e-9-w", K = 4Output: "5F3Z-2E9W" Explanation: The string S has been split into two parts, each part has 4 characters.Note that the two extra dashes are not needed and can be removed.Example 2:Input: S = "2-5g-3-J", K = 2Output: "2-5G-3J"Explanation: The string S has been split into three parts, each part has 2 characters except the first part as it could be shorter as mentioned above.Note:The length of string S will not exceed 12,000, and K is a positive integer.String S consists only of alphanumerical characters (a-z and/or A-Z and/or 0-9) and dashes(-).String S is non-empty.#BRUTEFORCE def key_formatting(ls,k): keys=ls.replace("-","") tail =len(keys)-1 op=[] while tail >=0: op.append(keys[tail]) if tail !=0 and (tail % k) ==0: op.append("-") tail -=1 return ''.join(map(str, op[::-1])).upper() print(key_formatting("",2)) #Note the aove solution does not pass one use case # when input is 2-5g-3-J def key_formatting(ls,k): keys=ls.replace("-","")[::-1].upper() tail =len(keys)-1 op=[] while tail >=0: op.append(keys[tail]) if tail !=0 and (tail % k) ==0: op.append("-") tail -=1 return ''.join(map(str, op)) print(key_formatting("2-5g-3-J",2))2-5G-3JTwo tricks and a pitfall Trick 1: The for-else-loopPython allows a for-else-loop. This is used for for-loops with a breaking condition to check whether the breaking condition was met or the loop ran through. Example:We have a list of models `models = ['MESSAGE', 'GCAM', 'REMIND']` and want to find out if any model name is shorter than 4 characters. If we find one, we want to print the first one we find. If we do not fine one, we want to say that we came up empty handed. Side Note:Of course the following example can be solved more efficiently using for example list comprehension. It is only meant to illustrate the point of a for-else-loop construct.models = ['MESSAGE', 'GCAM', 'REMIND'] for m in models: if len(m) < 4: print(f"Model '{m}' has length {len(m)}.") break else: print("No model shorter than 4 characters found.")No model shorter than 4 characters found.Trick 2 f-stringsF-strings are pythons latest (since version 3.6 added trough [PEP 498](https://www.python.org/dev/peps/pep-0498/)) way of formatting strings. Here's a few short examples of what f-strings can do and why they might be useful for you: Basic use* printing a variable value* in-place evaluation of expressionssome_variable = 'some value' x = 15.3 print(f'{some_variable}') print(f'{17*x - 14}')some value 246.10000000000002Print variable name and value* useful for debugging and error messagesprint(f'{some_variable = }') print(f'{17*x - 14 = }') error_msg = f'There was a problem with {some_variable = }' print(error_msg)some_variable = 'some value' 17*x - 14 = 246.10000000000002 There was a problem with some_variable = 'some value'Padding* useful for creating nicely formatted text output (to the console or to a file)x = 'test' y = 'test 2' # left print(f'{x:>10}') print(f'{y:>10}') # right print(f'{x:*<10}') # both sides print(f'{x:=^10}') # fancy padding depending on a variable n = 25 print(f'{x:~^{n}}')test test 2 test****** ===test=== ~~~~~~~~~~test~~~~~~~~~~~Number formatting and conversion* also useful for creating nicely formatted outputa = 42 b = 42.19648624816541 # Number formatting print(f"{a:04d}") # zero padded integer print(f"{a:06.2f}") # zero padded floating point print(f"{b:.1f}") # zero padded floating point, rounds automatically (**very handy**) # Number conversion print(f"{a:x}") # hex print(f"{a:X}") # hex (uppercase) print(f"{a:b}") # binary print(f"{a:c}") # ascii print(f"{a:o}") # octal print(f"{a:010b}") # combined with padding, padding with 0 to a total of 10 digits0042 042.00 42.2 2a 2A 101010 * 52 0000101010The Pitfall: Mutable default argumentsThe python interpreter evaluates default arguments only **once** at the beginning of the interpretation process.Mutable default arguments don't change, even though we might expect them to.Mutable data types are:* lists* dictionaries* set* user-defined classes* results from function calls (see the datetime.now() example below)This can lead to unexpected beheavior where the defaults are not re-evaluated every time the function is called. For more details refer to [this explanation](https://stackoverflow.com/questions/1132941/least-astonishment-and-the-mutable-default-argument) on stackoverflow. Side Note: As above both of the following items are a bit contrived. However, they are again simply meant to provide a simple illustration of the problem rather than give a realistic use-case in the wild.# really simple function, appends a variable x to a list l # the list l is optional, if we don't provide one, the function # takes an empty one. def add_to_list(x, l=[]): l.append(x) return l print(add_to_list(1)) # we would expect [1], we get [1] print(add_to_list("hello")) # we would expect ["hello"], we get [1, "hello"] from datetime import datetime from time import sleep # another really simple function which takes a datetime object # and just prints it. Per default we want to print the current # date and time def print_time(dt: datetime = datetime.now()): print(dt) print_time() # we print the time once sleep(5) # here we would expect the print to show a time 5 seconds later, # however, we get the same time as before. This is because datetime.now() # is only evaluated once. print_time()2021-08-12 09:30:33.406968 2021-08-12 09:30:33.406968The solution: the 'if arg is None'-patternIn order to avoid the problem of mutable default arguments not resetting we need to reset them explicitly. For this we use the 'if arg is None'-pattern.```pythondef f(arg=None): if arg is None: arg = whatever you want the default to be```from datetime import datetime from time import sleep # if arg is None pattern def add_to_list(x, l: list = None) -> list: if l is None: l = [] l.append(x) return l def print_time(dt = None): if dt is None: dt = datetime.now() print(dt) print(add_to_list(1)) print(add_to_list(2)) print_time() sleep(5) print_time()[1] [2] 2021-08-12 09:30:38.445944 2021-08-12 09:30:43.451309Bayesian inference of average causal effect with instrumental variablesimport stan import numpy as np from scipy.stats import beta import matplotlib.pyplot as pltWe will implement inference of average causal effect using the finite-response treatment given by Pearl, Causality 2009. Warm-up: using PyStan & beta-binomial model Below is a minimal working example for using PyStan for inference on a coin flip with a beta-distributed prior.x = np.random.choice([0,1], p=[0.2,0.8], size=1000) # dataDeclare the model using the Stan probabilistic programming languagecoin_flip = """ data { int N; int x[N]; } parameters { real theta; } model { theta ~ beta(0.5, 0.5); x ~ bernoulli(theta); } """Compile the modelcoin_data = {"x": x, "N":len(x)} posterior = stan.build(coin_flip, data=coin_data, random_seed=1)Perform inferencefit = posterior.sample(num_chains=4, num_samples=1000)Compare to theorydf = fit.to_frame() a = 0.5 + np.sum(x) b = 0.5 + len(x) - np.sum(x) theta_sp = np.linspace(0.79, 0.86) posterior_theory = beta.pdf(theta_sp, a, b) fig, ax = plt.subplots() df['theta'].hist(ax=ax, bins='auto', label='samples', density=True) ax.plot(theta_sp, posterior_theory, '-k', label='theory') ax.set_xlabel(r'$\theta$') ax.set_ylabel(r'$P(\theta)$') ax.legend()As expected Finite-response treatment of instrumental variable model Imagine we are running a clinical trial, and the variable $Z \in \{0,1\}$ represents the treatment assigned to a patient. Let $X \in \{0,1\}$ denote whether the patient takes the treatment, and $Y \in \{0,1\}$ denote the observed response. There are factors (both observed and unobserved) $U$ that influence the way a subject responds to treatments, which may also affect the patient's choice to take the treatment. When $z \neq x$ we have imperfect compliance. Pearl (Causality, 2009) shows that regardless of the domain of $U$, it can always be partitioned into four equivalence classes for the relationship between $X$ and $Y$, and similar for $Z$ and $X$, resulting in a 16 possible subpopulations to describe $U$ completely: Let's generate the data $X$, $Y$, $Z$ according to these canonical partitions, so we have access to the ground truth for inferencefrom scipy.stats import multinomial, dirichlet p_subpop = dirichlet.rvs(0.5*np.ones(16), size=1, random_state=42) p_subpop = p_subpop.squeeze(0) multinomial.rvs(1, p_subpop, size=N, random_state=42) N = 1000 R = multinomial.rvs(1, p_subpop, size=N, random_state=42).reshape((N,4,4)) _, rx, ry = np.where(R) R[0] rx[0], ry[0] def make_response_mapping(u: int, r: int): if r==0: return 0 elif r==1 and u==0: return 0 elif r==1 and u==1: return 1 elif r==2 and u==0: return 1 elif r==2 and u==1: return 0 elif r==3: return 1 else: raise ValueError _make_response_mapping = np.vectorize(make_response_mapping) x = _make_response_mapping(z, rx) y = _make_response_mapping(x, ry) z.shape, x.shape, y.shapeMake PyStan modelcanonical_partition_instrument = """ data { int N; int x[N]; int y[N]; int z[N]; } parameters { vector[16] vr; int r; } model { r ~ categorical(vr); if((r == 1) || (r == 5) || (r==9) || (r==13)) x=0; else if (((r == 2) || (r == 6) || (r==10) || (r==14)) && (z==0)) x=0; else if (((r == 2) || (r == 6) || (r==10) || (r==14)) && (z==1)) x=1; else if (((r == 3) || (r == 7) || (r==11) || (r==15)) && (z==0)) x=1; else if (((r == 3) || (r == 7) || (r==11) || (r==15)) && (z==1)) x=0; else if((r == 4) || (r == 8) || (r==12) || (r==16)) x=1; if((r == 1) || (r == 2) || (r==3) || (r==4)) y=0; else if (((r == 5) || (r == 6) || (r==7) || (r==8)) && (x==0)) y=0; else if (((r == 5) || (r == 6) || (r==7) || (r==8)) && (x==1)) y=1; else if (((r == 9) || (r == 10) || (r==11) || (r==12)) && (x==0)) y=1; else if (((r == 9) || (r == 10) || (r==11) || (r==12)) && (x==1)) y=0; else if (((r == 13) || (r == 14) || (r==15) || (r==16)) && (x==0)) y=1; } """Compile the modelcanonical_partition_instrument_data = {"x": x, "y":y, "z":z, "N":N} posterior = stan.build(canonical_partition_instrument, data=canonical_partition_instrument_data, random_seed=1)Interactive fracture analysisimport pandas as pd %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns from ipywidgets import interact import ipywidgets as widgets import mplstereonet as mpl from mpl_toolkits.axes_grid1.inset_locator import inset_axesImport fracture data using Pandasdf = pd.read_csv('../data/image_data.csv', index_col='Unnamed: 0') df.head() df.shape df.describe()Inspect fracture data with matplotlib/seaborndf.columns sns.pairplot(data=df, vars=['DIP', 'AZIM'], hue='DIPTYPE', palette='colorblind', markers=['x','o','v','^','s','p'], height=3 ) plt.show()Inspect fracture data with interactive mplstereonet@interact(diptype=['FRACTURE', 'HEALEDFRACTURE', 'DRILL.IND.FRAC.', 'BREAKOUT', 'BED', 'BED_LOW_CONF'], alpha=widgets.FloatSlider(min=0, max=1, step=0.05, continuous_update=False), poles=False, show_mean=False, density=False, ) def plot_data(diptype, alpha, poles, show_mean, density): """ plot a stereonet of image data args: diptype, alpha, poles, show_mean, density returns: None """ # set up the plot fig, ax = mpl.subplots(figsize=(6,6), ncols=1, nrows=1) # get data strike, dip = df.AZIM.loc[df['DIPTYPE'] == diptype], df.DIP.loc[df['DIPTYPE'] == diptype] # great circles ax.plane(strike, dip, 'g-', linewidth=1, alpha=alpha) # plot decoration ax.grid(color='k', alpha=0.2) data_count = strike.count() mean_azim = strike.mean() mean_dip = dip.mean() ax.text(0, 10, f'N points: {data_count}\nmean Azim: {mean_azim:.1f}\nmean Dip: {mean_dip:.1f}') # options if poles: ax.pole(strike, dip, 'kx', markersize=3, alpha=1) if show_mean: ax.plane(mean_azim, mean_dip, 'r--', lw=3, alpha=0.7) ax.pole(mean_azim, mean_dip, 'rs', markersize=5, alpha=0.7) if density: im = ax.density_contourf(strike, dip, measurement='poles', alpha=0.3) axins = inset_axes(ax, width="5%", height="45%", loc='lower left', bbox_to_anchor=(1.05, 0., 1, 1), bbox_transform=ax.transAxes, borderpad=0, ) cbar = fig.colorbar(im, cax=axins) cbar.ax.get_yaxis().labelpad = 15 cbar.ax.set_ylabel('Orientation density distribution', rotation=90) return NoneMaterials API - Exercise 2: Using the MPRester and Pymatgen to Find Materials With Exotic Mechanical PropertiesThe tetragonal SiO$_2$ polymorph $\alpha$-cristobalite is one of the very few crystalline materials known to have a negative average Poisson's ratio, which means that its cross-section expands under tensile strain rather than contracting. This property can be extremely useful in a variety of applications such as scratch-resistant coatings and high-toughness ceramics. Why does $\alpha$-cristobalite exhibit this property while other materials do not? The prevailing hypothesis is that $\alpha$-cristobalite's negative Poisson's ratio is a result of its crystal structure. If that's the case, then perhaps we can find other materials with this exotic property by looking for materials with similar structures and then calculating their Poisson's ratios. Step 1: Retrieve the structure of $\alpha$-cristobaliteFirst, we should find the ground state structure for $\alpha$-cristobalite, which has the spacegroup $P4_12_12$ [92]. Remember, stable materials have *low* energy above hull (`energy_above_hull`)._Hint: Query using `chemsys_formula` and `spacegroup_number` for the desired compound and then select the one with the lowest `energy_above_hull` if there are multiple results. Consider using the `sort_field` and `ascending` input parameters of the query method._# Your code here from mp_api.matproj import MPRester with MPRester("") as mpr: ac = mpr.query(____) ac_structure = ____ ac_mpid = ____Step 2: Build a structure comparison engine and test it outThe code below creates a structure matcher object that can be used to compare if two structures are simlar (loose tolerances = similar, tight tolerances = identical). Please verify that the structure matcher works for identical structure by comparing the structure of AC with itself. _Hint: Replace with the two structures you want to compare, separated by a comma._##### Don't edit code below ##### from pymatgen.analysis.structure_matcher import StructureMatcher from pymatgen.analysis.structure_matcher import FrameworkComparator # This line initializes our comparator. Please don't play with its tolerances until after you are done comparison_engine = StructureMatcher(ltol=.2, stol=.5, angle_tol=10, primitive_cell=True, scale=True, attempt_supercell=True, comparator=FrameworkComparator()) ##### Don't edit code above ##### print(comparison_engine.fit(ac_structure, ac_structure))We know that the high-temperature phase of cristobalite, $\beta$-cristobalite [(mp-546794)](https://materialsproject.org/materials/mp-546794/), has a very similar structure to $\alpha$-cristobalite. Let's see if the structure matcher agrees. Please retreive the structure for `mp-546794` and then compare it with our prototype structure. _Hint: Use the `get_structure_by_material_id` method of the `MPRester`. Also, no need to re-initialize the structure matcher, just call it's `fit` method again._# Your code here with MPRester("") as mpr: bc_structure = ____ print(comparison_engine.fit(bc_structure, ac_structure))Just to make sure we haven't increased the tolerances too much, please try it against a random compound to make sure it's not matching dissimilar structures._Hint: "mp-4991" is a good random MPID you can use_# Your code here with MPRester("") as mpr: bc_structure = ____ print(comparison_engine.fit(bc_structure, ac_structure))Step 3: Get a set of candidate structures to compareNow that we have our comparator, we need some candidates to screen! Imagine that we have an experimental colleague, , who is an expert at synthesizing vanadate compounds. We have a hunch that some of the vanadates coming out of Dr. Tsarpinski's lab might have similar structures to $\alpha$-cristobalite and therefore might have negative Poisson's ratios. Let's see if we're right:For our search, we want to start with a set of structures that are:* Computationally tractable, so not too many sites (i.e `nsites = (0, 50)` is a good range)* Not too unlikely to be synthesizable (energy above hull <=100 meV/atom, i.e. `energy_above_hull = (0.0, 0.1)`)* Have a "vanadate" composition, i.e. `chemsys_formula = "*V3O8"`Construct and execute a query to get the `structure`, `material_id`, and `formula_pretty` for all materials that match these criteria:# Your code here with MPRester("") as mpr: vanadate_ids = [entry.material_id for entry in mpr.query(____)] vanadates = mpr.query(____)Step 4: Screen the vanadates for similar structures to $\alpha$-cristobalite and then check if our hypothesis is valid by querying for their Poisson's ratios. Now that we have a list of vanadates, let's screen it for similar structures. After we have the similar structures, make one final query to the Materials API to retrieve the `formula_pretty` and `homogeneous_possion` property for each one. _Hint: Create an empty list for matches and then iterate through the vanadate entries with a for loop. If its structure is similar to $\alpha$-cristobalite, append its `material_id` to a list of matches. After you have a match list, query the Materials API for entries with a `material_id` that is in your matches._# Your code here matches = [] for entry in vanadates: if comparison_engine.fit(ac_structure, entry.structure): ____ with MPRester("") as mpr: elastic_data = mpr.query(____) for e in elastic_data: print(e.material_id, e.formula_pretty, e.homogeneous_poisson)ArXiv API: parsing and pagingimport urllib.request import feedparser import time # Base api query url base_url = 'http://export.arxiv.org/api/query?' cat_list = ['math.AG', # Mathematics - Algebraic Geometry 'math.AT', # Mathematics - Algebraic Topology 'math.AP', # Mathematics - Analysis of PDEs 'math.CT', # Mathematics - Category Theory 'math.CA', # Mathematics - Classical Analysis and ODEs 'math.CO', # Mathematics - Combinatorics 'math.AC', # Mathematics - Commutative Algebra 'math.CV', # Mathematics - Complex Variables 'math.DG', # Mathematics - Differential Geometry 'math.DS', # Mathematics - Dynamical Systems 'math.FA', # Mathematics - Functional Analysis 'math.GM', # Mathematics - General Mathematics 'math.GN', # Mathematics - General Topology 'math.GT', # Mathematics - Geometric Topology 'math.GR', # Mathematics - Group Theory 'math.HO', # Mathematics - History and Overview 'math.IT', # Mathematics - Information Theory 'math.KT', # Mathematics - K-Theory and Homology 'math.LO', # Mathematics - Logic 'math.MP', # Mathematics - Mathematical Physics 'math.MG', # Mathematics - Metric Geometry 'math.NT', # Mathematics - Number Theory 'math.NA', # Mathematics - Numerical Analysis 'math.OA', # Mathematics - Operator Algebras 'math.OC', # Mathematics - Optimization and Control 'math.PR', # Mathematics - Probability 'math.QA', # Mathematics - Quantum Algebra 'math.RT', # Mathematics - Representation Theory 'math.RA', # Mathematics - Rings and Algebras 'math.SP', # Mathematics - Spectral Theory 'math.ST', # Mathematics - Statistics 'math.SG', # Mathematics - Symplectic Geometry ] titles_list = [] for cat in cat_list: # Search parameters search_query = 'cat:' + cat # search_query = 'cat:math.AG' start = 0 # start at the first result total_results = 4000 # want _ total results results_per_iteration = 500 # results at a time wait_time = 3 # number of seconds to wait between calls print('Searching arXiv for %s' % search_query) for i in range(start,total_results,results_per_iteration): # print("Results %i - %i" % (i,i+results_per_iteration) query = 'search_query=%s&start=%i&max_results=%i' % (search_query,i,results_per_iteration) # perform a GET request using the base_url and query response = urllib.request.urlopen(base_url+query) # parse the response using feedparser feed = feedparser.parse(response) # Run through each entry, and print out title for entry in feed.entries: # print(entry.title) entry_clean_1 = entry.title.replace('\n ','') # entry_clean_2 = entry_clean_1.replace(',','') titles_list.append(entry_clean_1) # Remember to play nice and sleep a bit before you call the api again! # print('Sleeping for %i seconds' % wait_time) time.sleep(wait_time) # print(titles_list) # with open('titles_test.csv','w') as file: # for title in titles_list: # file.write(title) # file.write('\n') import pandas as pd # titles_df = pd.read_csv('titles_test.csv', header = None, names = ['title'], encoding='utf-8') titles_df = pd.DataFrame(titles_list, columns = ['title']) print(titles_df.head()) print(titles_df.tail()) print(titles_df.info()) corpus_raw = u"" for i in range(titles_df.shape[0]): line = titles_df.loc[i, 'title'] corpus_raw += line + '. ' import nltk # from nltk.tokenize import sent_tokenize, word_tokenize tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') raw_sentences = tokenizer.tokenize(corpus_raw) # same as sent_tokenize # print(raw_sentences) print(len(raw_sentences)) import re, unidecode # Convert sentences to words from nltk.corpus import stopwords # from nltk.stem import WordNetLemmatizer stop_words = stopwords.words('english') # lemmatizer = WordNetLemmatizer() def sentence_to_wordlist(raw): clean_1 = unidecode.unidecode(raw) # to remove diacritical marks clean_2 = re.sub("[^a-zA-Z0-9]", " ", clean_1) words = clean_2.split() words = [w for w in words if w not in stop_words and len(w)>3] # words = [lemmatizer.lemmatize(w) for w in words] words = [w.lower() for w in words] return words sentence_to_wordlist('Higgs line bundles, Green-Lazarsfeld sets,and maps of Kähler manifolds to curves.') sent_in_words = [] for raw_sentence in raw_sentences: s = sentence_to_wordlist(raw_sentence) sent_in_words.append(s) print(len(sent_in_words)) token_count = sum([len(sentence) for sentence in sent_in_words]) print(token_count) # number of tokens; if repeated, token is counted twice from gensim.models import Phrases from gensim.models.phrases import Phraser phrases = Phrases(sent_in_words, min_count = 1, threshold = 2) bigram = Phraser(phrases) import pickle pickle.dump(bigram, open("bigram.pkl", "wb")) sentences_bigram = list(bigram[sent_in_words]) sample = ['higgs', 'line', 'bundles', 'green', 'lazarsfeld', 'sets', 'maps', 'kahler', 'manifolds', 'curves'] print(bigram[sample]) # print(list(bigram[sent_in_words])) print(len(sentences_bigram)) print(sum([len(sentence) for sentence in sentences_bigram])) import gensim.models.word2vec as w2v import multiprocessing # Dimensionality of the word vectors; WHY 25? num_features = 25 # Default is 100 # Minimum word count threshold; ignore all words with total frequency lower than this min_word_count = 2 # Default is 5 # Number of threads to run in parallel num_workers = multiprocessing.cpu_count() print(num_workers) # How many words before and after a given word are included as context words of the given word context_size = 5 # This is the default value # Downsample/subsample setting for frequent words; higher-frequency words are randomly downsampled downsampling = 0.001 # This is the default value # Seed for random number generator seed = 1 # Use gensim's Word2Vec model; topic modeling titles2vec = w2v.Word2Vec( sg=1, # skip-gram, as opposed to continuous BOW seed=seed, workers=num_workers, size=num_features, min_count=min_word_count, window=context_size, sample=downsampling) # Each word/bigram is associated with a vector with 25 components; not each title! titles2vec.build_vocab(sentences_bigram) # initializing model on sentences_bigram corpus len(titles2vec.wv.vocab) # length of vocab inversely related to min_count parameter titles2vec.train(sentences_bigram, total_examples=titles2vec.corpus_count, epochs=titles2vec.iter) # print(titles2vec.corpus_count) # corpus_count is number of titles # print(titles2vec.iter)- What does this number mean? How do I interpret this number?# Save the model titles2vec.save("titles2vec_25.w2v") # Load the saved model titles2vec = w2v.Word2Vec.load("titles2vec_25.w2v")Visualizationsimport matplotlib.pyplot as plt import seaborn as sns from sklearn.manifold import TSNE # Create a TSNE instance tsne = TSNE(n_components = 2, learning_rate = 400) # 200 is the default learning rate; may play around with this*** # flatten to two dimensions for plotting purposes # print(type(titles2vec.wv.syn0)) # print(titles2vec.wv.syn0.shape) ### for word in titles2vec.wv.vocab: ### print(titles2vec.wv.vocab[word].index, word, titles2vec.wv.vocab[word].count) # index is based on frequency! all_word_vectors_matrix = titles2vec.wv.syn0 # access output en masse as a 2d numpy array # Train t-SNE all_word_vectors_matrix_2d = tsne.fit_transform(all_word_vectors_matrix) # Select the 0th feature xs = all_word_vectors_matrix_2d[:,0] # Select the 1st feature ys = all_word_vectors_matrix_2d[:,1] #sns.set_context("poster") plt.scatter(xs,ys, s = 1) plt.show() # Can create dataframe from list of tuples, not just from dictionaries points = pd.DataFrame( [ (word, coords[0], coords[1]) for word, coords in [ (word, all_word_vectors_matrix_2d[titles2vec.wv.vocab[word].index]) for word in titles2vec.wv.vocab ] ], columns = ["word", "x", "y"] ) print(points.head()) print(points.tail()) # Save t-SNE coordinates points.to_csv('w2v_points.csv',index=False) sns.set_context("poster") # points.plot.scatter("x", "y", s=10, figsize=(20, 12)) plt.scatter(points['x'], points['y'], s = 1) plt.show() # annotate words in our t-SNE point-cloud plt.figure(figsize=(20, 12)) for x, y, word in zip(points['x'], points['y'], points['word']): plt.annotate(word, (x, y), fontsize=5) plt.scatter(points['x'], points['y'], s = 2) plt.show() # Plot zoomed regions def plot_region(x_bounds, y_bounds): s = points[ (points['x'] >= x_bounds[0]) & (points['x'] <= x_bounds[1])] s = s[(s['y'] >= y_bounds[0]) & (s['y'] <= y_bounds[1])] plt.figure(figsize=(10, 8)) ax = plt.scatter(s['x'],s['y'],s=33) # for i, point in s.iterrows(): # ax.text(point.x + 0.005, point.y + 0.005, point.word, fontsize=10) for x, y, word in zip(s['x'], s['y'], s['word']): plt.annotate(word, (x + 0.005, y + 0.005), fontsize=10) plt.show() # 's' is a "restricted" dataframe, restricted in both x- and y-directions # nditer for iterating through rows of numpy array # plot_region((-10,10),(-10,10)) # Given a query word, plot nearby words def plot_nearby(word,side_len=4): if side_len <= 0: print('---------------------side_len must be greater than zero!---------------------') elif word not in titles2vec.wv.vocab: print('---------------------Cannot find %s in the vocabulary---------------------' % word) else: coordinates = points[points['word']==word].reset_index() # reset index to 0, only one row x, y = coordinates.loc[0,'x'], coordinates.loc[0,'y'] x1, x2, y1, y2 = x-side_len/2, x+side_len/2, y-side_len/2, y+side_len/2 plot_region(x_bounds=(x1,x2), y_bounds=(y1,y2)) # except Exception as e: # don't fully understand this; shouldn't it be a ValueError if word isn't in vocabulary? # print('---------------------Cannot find %s in the vocabulary---------------------' % word) plot_nearby('integrable',1)Building the recommendation system: nearest 5 Already associated a vector (with 25 components) with each word/bigram in the corpus. Now SUM these vectors to get vector associated with paper title.import numpy as np # stop_words = set(stopwords.words("english")) stop_words = stopwords.words("english") def title_to_vec(title, model=titles2vec, dim=25): result = np.zeros(dim) # result = [0] * dim clean_1 = unidecode.unidecode(title) # to remove diacritical marks clean_2 = re.sub("[^a-zA-Z0-9]", " ", clean_1) words = bigram[clean_2.split()] words = [w.lower() for w in words if w not in stop_words and len(w)>3] for word in words: if word in model.wv.vocab: w2v = model.wv[word] result += w2v return result # maybe remove .tolist() titles_df['title_vec'] = titles_df['title'].apply(lambda x: title_to_vec(x, model = titles2vec, dim = 25)) # print(titles_df.iloc[:5,1]) # print(titles_df['title_vec'][:5]) # print(titles_df.head()) title_vec_dict = {} for title, vec in zip(titles_df['title'],titles_df['title_vec']): title_vec_dict[title] = vecAny search entry is regarded as a title. Then compare vectors using cosine similarity.def cos_sim(v1, v2): """Returns a cosine similarity of two vectors""" norm1, norm2 = np.linalg.norm(v1), np.linalg.norm(v2) norm_product = norm1 * norm2 # norm_ratio = min(norm1, norm2) / max(norm1, norm2) !!!!!! if norm_product == 0: return 0 else: return np.dot(v1, v2) / norm_product # Just as we cleaned titles and converted them to vectors, we must clean the search entry def clean_title(title): clean_1 = unidecode.unidecode(title) # to remove diacritical marks clean_2 = re.sub("[^a-zA-Z0-9]", " ", clean_1) words = bigram[clean_2.split()] return " ".join([w.lower() for w in words if w not in stop_words and len(w)>3]) def get_most_similar_title(title, dataframe = titles_df, model = titles2vec, dim = 25, k = 5): """Returns the k most similar titles to the new_title""" # new_title = clean_title(title) new_vec = title_to_vec(title, model, dim) sim_list = titles_df[titles_df['title'] != '']['title_vec'].apply(lambda x: cos_sim(new_vec, x)) sim_list_sorted = sim_list.sort_values(ascending = False) # return sim_list indices = list(sim_list_sorted.index)[:k] similar_titles = [titles_df.loc[i, 'title'] for i in indices] # print("Search: " + new_title) # print('Similar Articles:\n') # for i, title in enumerate(similar_titles): # print('('+str(i+1)+') '+title+'\n') return similar_titles print(titles_df.head()) print(titles_df.info()) get_most_similar_title('curves') titles_df.to_csv('titles_df_test.csv', index = False) test_df = pd.read_csv('titles_df_test.csv') print(test_df.info()) pickle.dump(title_vec_dict, open("title_vec_dict", "wb"))Blechpy Poisson HMM TutorialThis tutorial will cover how to setup and fit spike data to a poisson HMM.To use this you must first already have a blechpy.dataset object created with data that is past the spike sorting stage.# Imports import blechpy from blechpy.analysis import poissonHMM as phmm import pandas as pd import numpy as np # First get the path to your recording folder rec_dir = '/data/Katz_Data/Stk11_Project/RN10/RN10_ctaTest_190220_131512'Fitting a single HMMTo fit a single HMM you will using the PoissonHMM object. This object will house all the necessary parameters for an HMM except for the data being fitted. Gathering the dataFirst you will need to collect the data for the HMM to fit. This should be a spike array (numpy array, dtype=int32) with 3-dimensions: Trial, Cell, Time bin with each value being the number of spikes in the time bin. If you have sorted your units and then used dat.make_unit_arrays(), then your spike arrays are stored in you h5 file with 1ms time bins from -2000 ms to 5000 ms. You will need to grab this spike array and cut it down to the right time window that you want to model as well as only the units you want to use. Additionally, you may need to rebin this array to have a different time step (especially with sparse firing units). phmm has a useful function for gathering this:din_channel = 0 # Channel of the digital input that the trials you wish to fit are on unit_type = 'single' # This can be single (for all single units), # pyramidal (only single unit regular-spiking cells) # or interneuron (only single unit fast-spiking cells) # The parameters below are optional time_start = 0 # Time start in ms time_end = 2000 # Time end in ms dt = 0.01 # desired bin size for the return spike array in seconds, default is 0.001 seconds spike_array, dt, time = phmm.get_hmm_spike_data(rec_dir, unit_type, din_channel, time_start, time_end, dt)Initializing and fitting the modelNow you can go ahead and initialize and fit your HMM. Something to note: I have not yet figured out the best parameters to use to test convergence. So for now at each iteration the changes in every matrix (transition, emission and initial distribution) are computed and when the total change in every matrix is below the threshold then fitting stops. Alternatively, fitting stops if the maximum number of iterations is reached. For now the default convergence threshold is 1e-4 which works well for the simulated data. I have not yet had actual data meet this criteria. Also the cost of the model is computed on each iteration. The cost is computed by predicting the state at each time point and then using the emission (rate) matrix to predict the firing rate in each bin and them computing the distance of this prediction from the actual firing rate. This is then summed over time bins and averaged over trials to get the final cost of the model. This would probably provide a better measure of convergence, but I have not yet determined the best threshold for change in cost at which to stop fitting. Also this may lead to overfitting. But cost does provide a means of comparing models since BIC is only a good measure to compare models with the same number of states and time bins.n_states = 3 # Number of predicted states in your data # Initializing the model model = phmm.PoissonHMM(n_states) # Notice you're not giving it the data yet # Fitting the model convergence_threshold = 1e-4 # This is optional, the default is 1e-5, for my final models I used 1e-10. # This is the threshold for fitting such that when the change in log_likelihood # betweeen iterations is below this then fitting ends. max_iter = 1000 # This is also optional, the default is 1000 model.fit(spike_array, dt, time, max_iter=max_iter, thresh=convergence_threshold)Understanding your modelNow that the model is fitted there are some useful aspects to know about it. Important Attributes- model.transition - transtion matrix giving probability of switching from one state to another- model.emission - This is actually a rate matrix expressing the predicted firing of each neuron in each state- model.initial_distribution - This gives the probability of being in each state at the start- model.cost - This has the last computed cost of the model- model.BIC - This has the last computed Bayesian Information Criteria of the model- model.best_sequences - This has the best predicted sequence for each trial- model.max_log_prob - Max log probability of the sequences (not sure this is computed correctly) Useful fucntions in the model best_sequences, max_log_prob = model.get_best_paths(spike_array, dt) forward_probs = model.get_forward_probabilities(spike_array, dt) bakward_probs = model.get_backward_probabilities(spike_array, dt) gamma_probs = model.get_gamma_probabilites(spike_array, dt) Additionally the model keeps a limited history of previous iterations can be be rolled back in case you pass a minima. Use: `model.set_to_lowest_cost()` or `model.set_to_lowest_BIC()` Finally if you would like to re-fit the model, be sure to randomize it again before refitting:- `model.randomize(spike_array, dt)`- `model.fit(spike_array, dt)` Fitting and saving HMMsThat's just a breakdown of a `PoissonHMM` object. A much better way is to use the `HmmHandler`. This interface handles fitting HMMs for all digital inputs (tastes) as well as trying different parameters sets, plotting and saving all data to an hdf5 store. *This store is seperate from your data h5 file*The HMM handler also takes care of parallelizing HMM fitting and creating plots for fitted HMMs The ParametersThe HmmHandler is passed parameters as a dict or a list of dicts. You can provide as many or as few of these parameters as you want. The defaults are drawn from `phmm.HMM_PARAMS`. These important parameters are:- hmm_id - This is set automatically by the handler- taste - This will be set automatically as all tastes in the dataset, but you can specific a single one if you'd like. **See Updates below**.- channel - This is always set automatically- unit_type - can be 'single', 'pyramidal', or 'interneurons'- dt - time bin size to use in seconds- threshold - the convergence threshold to use for fitting- max_iter - max number of iterations while fitting- n_cells - Set automatically- n_trials - Set automatically- time_start - Time start to cut data- time_end - Time end to cut data- n_repeats - Number of repeats to fit of this HMM, best is chosen automatically by lowest BIC- n_states - Number of predicted states to fit- fitted - set automatically when fitting is compelete Notice that a lot are set automatically, so your input dict can only contain the parameters you want to deviate from the defaults, the rest will be filled in. See the below cell's output to see the default dictphmm.HMM_PARAMS # Example of defining parameter set params = [{'unit_type': 'pyramidal', 'time_end': 2500, 'n_states': x} for x in [2,3]] print(params)[{'unit_type': 'pyramidal', 'time_end': 2500, 'n_states': 2}, {'unit_type': 'pyramidal', 'time_end': 2500, 'n_states': 3}]Now we can initialize and run the handlerKeep in mind after using the handler you can load it again at anytime and add new parameters and re-run, only the model that haven't already been fitted will be run.# Initializing handler = phmm.HmmHandler(rec_dir) # Save directory is automatically made inside the recording directory, # but you can also specificy another place witht eh save_dir keyword argument. # You can also pass the params directly when initializing the handler, but # I just split it here you can see how to add new parameters later. handler.add_params(params) # Running the handler handler.run() # to overwrite existing models pass overwrite=True # To plot data handler.plot_saved_models() # Looking at the parameters already in the handler parameter_overview = handler.get_parameter_overview() # this is a pandas DataFrame # Looking at the parameters and fitted model stats data_overview = handler.get_data_overview() # also a pandas DataFrame with extra info such as cost and BIC # The matrices defining each HMM and the best sequences can be access from teh HDF5 store directly. They can also be access programatically with: hdf5_file = handler.h5_file hmm, time, params = handler.get_hmm(0) # the hmm_id number goes here # The hmm object has an attribute stat_arrays with various information including best_sequences, # gamma_probabilities, max_log_prob (on each iteration), time, and row_id # Now you have the PoissonHMM object with the fitted model parameters and can do anything with it. # Only information lost is the model history, every model is set to the best model in the history before savingchecking dataframesprint('The completed offers dataframe has {} rows and {} columns'.format\ (completed_offers.shape[0],completed_offers.shape[1])) completed_offers.head() print('The offers_received dataframe has {} rows and {} columns'.format\ (offers_received.shape[0],offers_received.shape[1])) offers_received.head() print('The offers_viewed dataframe has {} rows and {} columns'.format\ (offers_viewed.shape[0],offers_viewed.shape[1])) offers_viewed.head() print('The transaction dataframe has {} rows and {} columns'.format\ (transaction.shape[0],transaction.shape[1])) transaction.head()IPython Notebook SetupThis commands are used needed for plots to appear in the notebook. Estimating the Parameters of a GJR-GARCH ModelThis example will highlight the steps needed to estimate the parameters of a GJR-GARCH(1,1,1) model with a constant mean. The volatility dynamics in a GJR-GARCH model are given by $$\sigma_{t}^{2}=\omega+\sum_{i=1}^{p}\alpha_{i}\epsilon_{t-i}^{2}+\sum_{j=1}^{o}\gamma_{j}r_{t-j}^{2}I_{\left[\epsilon_{t-j}<0\right]}+\sum_{k=1}^{q}\beta_{k}\sigma_{t-k}^{2}.$$Returns are assumed to be conditionally normal, $r_{t}|\mathcal{F}_{t-1}\sim N\left(\mu,\sigma_{t}^{2}\right)$, $\epsilon_{t}=r_{t}-\mu$ and parameters are estimated by maximum likelihood. To estimate the parameters, it is necessary to:1. Produce some starting values2. Estimate the parameters using (quasi-) maximum likelihood3. Compute standard errors using a “sandwich” covariance estimator (also known as the [BollerslevWooldridge::1992] covariance estimator)The first task is to write the log-likelihood which can be used in an optimizer. The log-likelihood function will compute the volatility recursion and the log-likelihood. It will also, optionally, return the $T$ by 1 vector of individual log-likelihoods which are useful when approximating the scores.import matplotlib.pyplot as plt import numpy as np import pandas as pd from numpy import size, log, pi, sum, array, zeros, diag, asarray, sqrt, \ copy from numpy.linalg import inv from scipy.optimize import minimizeThe conditional log-likelihood of a normal random variable is$$\ln f\left(r_{t}|\mu,\sigma_{t}^{2}\right)=-\frac{1}{2}\left(\ln2\pi+\ln\sigma_{t}^{2}+\frac{\left(r_{t}-\mu\right)^{2}}{\sigma_{t}^{2}}\right),$$which is negated in the code since the optimizers all minimize.def gjr_garch_likelihood(parameters, data, sigma2, out=None): """Negative log-likelihood for GJR-GARCH(1,1,1) model""" mu = parameters[0] omega = parameters[1] alpha = parameters[2] gamma = parameters[3] beta = parameters[4] T = size(data,0) eps = data - mu # Data and sigma2 are T by 1 vectors for t in range(1,T): sigma2[t] = (omega + alpha * eps[t-1]**2 + gamma * eps[t-1]**2 * (eps[t-1]<0) + beta * sigma2[t-1]) logliks = 0.5*(log(2*pi) + log(sigma2) + eps**2/sigma2) loglik = sum(logliks) if out is None: return loglik else: return loglik, logliks, copy(sigma2)The keyword argument `out` has a default value of `None`, and is used to determine whether to return 1 output or 3. This is common practice since the optimizer requires a single output -- the log-likelihood function value, but it is also useful to be able to output other useful quantities, such as $\left\{ \sigma_{t}^{2}\right\}$.The optimization is constrained so that $\alpha+\gamma/2+\beta\leq 1$, and the constraint is provided in a separate function.def gjr_constraint(parameters): """ Constraint that alpha+gamma/2+beta<=1""" alpha = parameters[2] gamma = parameters[3] beta = parameters[4] return array([1-alpha-gamma/2-beta]) constraint = {"type": "ineq", "fun": gjr_constraint}Note that the constraint function takes the same inputs as the negative of the log-likelihood function, even though only parameters is required to compute the constraint.It is necessary to discuss one other function before proceeding with the main block of code. The asymptotic variance is estimated using the “sandwich” form which is commonly expressed as$$\mathcal{J}^{-1}\mathcal{I}\mathcal{J}^{-1}$$where $\mathcal{J}$ is the expected Hessian and $\mathcal{I}$ is the covariance of the scores. Both are numerically approximated, and the strategy for computing the Hessian is to use the definition that $$\mathcal{J}_{ij}\approx\frac{f\left(\theta+e_{i}h_{i}+e_{j}h_{j}\right)-f\left(\theta+e_{i}h_{i}\right)-f\left(\theta+e_{j}h_{j}\right)+f\left(\theta\right)}{h_{i}h_{j}}$$where $h_{i}$ is a scalar “step size” and $e_{i}$ is a vector of 0s except for element $i$, which is 1. A 2-sided version of this approximation, which takes both forward and backward steps and then averages, is below. For more on numerical derivatives, see [FlanneryPressTeukolskyTeukolsky::1992].def hessian_2sided(fun, theta, args): f = fun(theta, *args) h = 1e-5*np.abs(theta) thetah = theta + h h = thetah - theta K = size(theta,0) h = np.diag(h) fp = zeros(K) fm = zeros(K) for i in range(K): fp[i] = fun(theta+h[i], *args) fm[i] = fun(theta-h[i], *args) fpp = zeros((K,K)) fmm = zeros((K,K)) for i in range(K): for j in range(i,K): fpp[i,j] = fun(theta + h[i] + h[j], *args) fpp[j,i] = fpp[i,j] fmm[i,j] = fun(theta - h[i] - h[j], *args) fmm[j,i] = fmm[i,j] hh = (diag(h)) hh = hh.reshape((K,1)) hh = hh @ hh.T H = zeros((K,K)) for i in range(K): for j in range(i,K): H[i,j] = (fpp[i,j] - fp[i] - fp[j] + f + f - fm[i] - fm[j] + fmm[i,j])/hh[i,j]/2 H[j,i] = H[i,j] return HFinally, the code that does the actual work can be written. The first block imports the data, flips it using a slicing operator, and computes 100 times returns. Scaling data can be useful to improve optimizer performance, and ideally estimated parameters should have similar magnitudes (i.e. $\omega\approx.01$ and $\alpha\approx.05$).# Import data ftse = pd.read_csv('FTSE_1984_2012.csv', parse_dates=[0]) # Set index ftse.index = ftse.pop('Date') # Flip upside down ftse = ftse.iloc[::-1] # Compute returns ftse_price = ftse['Adj Close'] ftse_return = 100 * ftse_price.pct_change().dropna()Good starting values are important. These are my guesses based on experience fitting these types of models models. An alternative is to attempt a crude grid search and use the best (smallest) log-likelihood value from the grid search.# Starting values starting_vals = array([ftse_return.mean(), ftse_return.var() * .01, .03, .09, .90])Bounds are used in estimation to ensure that all parameters in the conditional variance are $\geq 0$ and to set sensible upper bounds on the mean and $\omega$. The vector `sigma2` is then initialized, and the arguments are placed in a tuple.# Estimate parameters finfo = np.finfo(np.float64) bounds = [(-10*ftse_return.mean(), 10*ftse_return.mean()), (finfo.eps, 2*ftse_return.var() ), (0.0,1.0), (0.0,1.0), (0.0,1.0)] T = ftse_return.shape[0] sigma2 = np.ones(T) * ftse_return.var() # Pass a NumPy array, not a pandas Series args = (np.asarray(ftse_return), sigma2) opt = minimize(gjr_garch_likelihood, starting_vals, constraints=constraint, bounds = bounds, args = args) estimates = opt.xThe optimized log-likelihood and the time series of variances are computed by calling the objective using the keyword argument `out=True`.loglik, logliks, sigma2final = gjr_garch_likelihood(estimates, ftse_return, sigma2, out=True)Next, the numerical scores and the covariance of the scores are computed. These exploit the definition of a derivative, so that for a scalar function, $$\frac{\partial f\left(\theta\right)}{\partial\theta_{i}}\approx\frac{f\left(\theta+e_{i}h_{i}\right)-f\left(\theta\right)}{h_{i}}.$$ The covariance is computed as the outer product of the scores since the scores should have mean 0 when evaluated at the solution to the optimization problem.step = 1e-5 * estimates scores = zeros((T,5)) for i in range(5): h = step[i] delta = np.zeros(5) delta[i] = h loglik, logliksplus, sigma2 = gjr_garch_likelihood(estimates + delta, \ np.asarray(ftse_return), sigma2, out=True) loglik, logliksminus, sigma2 = gjr_garch_likelihood(estimates - delta, \ np.asarray(ftse_return), sigma2, out=True) scores[:,i] = (logliksplus - logliksminus)/(2*h) I = (scores.T @ scores)/TThe next block calls `hessian_2sided` to estimate the Hessian, and then computes the asymptotic covariance.J = hessian_2sided(gjr_garch_likelihood, estimates, args) J = J/T Jinv = inv(J) vcv = Jinv @ I @ Jinv/TThe penultimate step is to pretty print the results and to produce a plot of the conditional variances.output = np.vstack((estimates,sqrt(diag(vcv)),estimates/sqrt(diag(vcv)))).T print('Parameter Estimate Std. Err. T-stat') param = ['mu','omega','alpha','gamma','beta'] for i in range(len(param)): print(f'{param[i]:<11} {output[i,0]:>0.6f} {output[i,1]:0.6f} {output[i,2]: 0.5f}')This final block produces a plot of the annualized conditional standard deviations.# Register date converters from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() # Produce a plot dates = ftse.index[1:] fig = plt.figure() ax = fig.add_subplot(111) volatility = pd.DataFrame(np.sqrt(252 * sigma2), index=dates) ax.plot(volatility) ax.autoscale(tight='x') fig.autofmt_xdate() fig.tight_layout(pad=1.5) ax.set_ylabel('Volatility') ax.set_title('FTSE Annualized Volatility (GJR GARCH(1,1,1))') plt.show()Encoder and decoder classfrom sklearn.cluster import KMeans class ImageCompressionKmeans(object): @staticmethod def encoder(imgPath:str, size:tuple = (256, 256)): """Encoder a image using kmeans algorith.""" img = ImageUtils.preprocessImage(imgPath=imgPath, size=size) #get image dimension rows = img.shape[0] cols = img.shape[1] #reshape the image img = img.reshape(img.shape[0] * img.shape[1], -1) #kmeans instance kmeans = KMeans(n_clusters=128, n_init=10, max_iter=200) #fitting with the image kmeans.fit(img) #getting the custers and labels clusters = np.asarray(kmeans.cluster_centers_, dtype=np.uint8) labels = np.asarray(kmeans.labels_, dtype=np.uint8) labels = labels.reshape(rows, cols) return clusters, labels @staticmethod def decoder(clusters:np.ndarray, labels:np.ndarray): """Reconstruct the image previusly encoder with kmeans algorith. For this, clusters and labels are needed to reconstruct. """ #image shape with zeros matrix image = np.zeros((labels.shape[0], labels.shape[1], 3), dtype=np.uint8) #loop to construct the image for i in range(labels.shape[0]): for j in range(labels.shape[1]): image[i, j, :] = clusters[labels[i, j], :] return imageTesting#encoder a input image and saves the outputs in npy format clusters, labels = ImageCompressionKmeans.encoder('./teste.jpg') np.save('clusters.npy', clusters) np.save('labels.npy', labels) #decoder a saved output image from kmeans compression and reconstruct it c = np.load('clusters.npy') l = np.load('labels.npy') imgReconstructed = ImageCompressionKmeans.decoder(clusters=c, labels=l) io.imshow(imgReconstructed) import os print("files size compressed = {} bytes".format(os.path.getsize('./clusters.npy') + os.path.getsize('./labels.npy'))) print("original image file = {} bytes".format(os.path.getsize('./teste.jpg')))files size compressed = 66176 bytes original image file = 158593 bytesMétodo de limpar fraseMétodo resposavel por limpar o texto e tornar mais eficiente, retirando acentos, limpando o texto de e patronizando para tornar as frases mais eficientes.def text_clear(text): pattern = re.compile('[^a-zA-Z0-9 ]') text = normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII') return pattern.sub(' ', text) conteudos = leitura.titulo + " " + leitura.subTitulo + " " + leitura.conteudo conteudos = conteudos .apply(lambda text: "" if isinstance(text, float) else text_clear(text).lower()) ids = leitura.idNoticiaTokenização do conteudo.noticias = conteudos.apply(nltk.word_tokenize) freq_term = noticias.apply(Counter)Idexação dos termosindex = {} for i in range(len(noticias)): id_noticia = ids[i] for palavra in noticias[i]: palavra = palavra.lower() if palavra not in index: index[palavra] = {} id_rec = index[palavra].get(id_noticia) if not id_rec: docs = index[palavra] docs[id_noticia] = freq_term[i][palavra]Metodo que gera os um dicionario com os pesos dos index-termsMétodo auxiliar que gera um novo discionario com o index e os pesos associados. Util para analise binaria ou TF. ßdef generador_docs_peso(frase, gerador_peso): termos = frase.split(" ") docs_peso = {} for i in range(len(termos)): termo = termos[i] docs = index[termo] for doc_id in docs: tf = docs[doc_id] if doc_id not in docs_peso: docs_peso[doc_id] = np.array([0 if j != i else gerador_peso(tf) for j in range(len(termos))]) else: doc_vector = docs_peso[doc_id] docs_peso[doc_id] = np.array([doc_vector[j] if j != i else gerador_peso(tf) for j in range(len(termos))]) return docs_pesoMétodo que gera um vetor com tf dos index.Método que gera o TF de cada index e desse jeito gera um vetor contendo.def generator_tf(phase): term = phase.split(' ') doc_tf = {} for i in range(len(term)): docs = index[term[i]] for doc_id in docs: tf = docs[doc_id] if doc_id not in doc_tf: doc_tf[doc_id] = np.array([0 if j != i else tf for j in range(len(term))]) else: doc_vector = doc_tf[doc_id] doc_tf[doc_id] = np.array([doc_vector[j] if j != i else tf for j in range(len(term))]) return doc_tfMétodo que gera um vetor com valor binario dos index.O método utiliza como auxiliar um gerador que coloca os peso em relaxão ao index-terms e assim gera para cada um o peso correspondente. Sabendo que pesodef generator_binario(frase): def generador_peso(tf): return 1 return generador_docs_peso(frase, generador_peso)Método que gera um vetor com idf dos index.Utilizando a formula de idf considerando a soma de todos os elementos e o log.def generator_idf(phase): terms = phase.split(' ') idf = np.array([math.log((len(noticias)+1)/len(index[term])) for term in terms]) return idfMétodo que gera um vetor binário de consulta. Considerando os 0 ou 1def generator_query(phase): terms = phase.split(' ') query = np.array([1 if index.get(term) else 0 for term in terms]) return queryMétodo que gera um vetor com o bm25 dos termosdef generato_bm25(phase): docs_tf = generator_tf(phase) k = 5 bm25_vetor = {doc_id: np.array([((k+1)*tf)/(tf+k) for tf in tf_vetor]) for doc_id, tf_vetor in docs_tf.items()} return bm25_vetorBusca pelo index binario dos termos. Ele chama o método que gera o binario dos termos em um vetor e assim da o rank e retornando a lista da busca dos elementos.def seach_bin(phase): docs_tf = generator_binario(phase) query = generator_query(phase) doc_rank = sorted(list(docs_tf.items()), key=lambda doc: np.dot(doc[1], query), reverse=True)[:5] return [doc[0] for doc in doc_rank]Busca pelo tf dos termos. Ele chama o método que gera o tf dos termos em um vetor e assim da o rank e retornando a lista da busca dos elementos.def seach_tf(phase): docs_tf = generator_tf(phase) query = generator_query(phase) doc_rank = sorted(list(docs_tf.items()), key=lambda doc: np.dot(doc[1], query), reverse=True) return [doc[0] for doc in doc_rank]Busca pelo tf e idf dos termos. Ele chama o método que gera o tf e o idf dos termos em um vetor e assim da o rank e retornando a lista da busca dos elementos.def seach_tf_idf(phase): doc_tf = generator_tf(phase) doc_idf = generator_idf(phase) doc_rank = sorted(list(doc_tf.items()), key=lambda doc: np.dot(doc[1], doc_idf), reverse=True)[:5] return [doc[0] for doc in doc_rank]Busca pelo BM25Ele chama o método que gera o bm25 dos termos em um vetor e assim da o rank e retornando a lista da busca dos elementos.def seach_bm25(phase): doc_bm25 = generato_bm25(phase) query = generator_query(phase) doc_rank = sorted(list(doc_bm25.items()), key=lambda doc: np.dot(doc[1], query), reverse=True)[:5] return [doc[0] for doc in doc_rank]Metodo de comparação fornecido na atividade.def apk(actual, predicted, k=10): """ Computes the average precision at k. This function computes the average prescision at k between two lists of items. Parameters ---------- actual : list A list of elements that are to be predicted (order doesn't matter) predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists """ if len(predicted)>k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i,p in enumerate(predicted): if p in actual and p not in predicted[:i]: num_hits += 1.0 score += num_hits / (i+1.0) if not actual: return 0.0 return score / min(len(actual), k) def mapk(actual, predicted, k=10): """ Computes the mean average precision at k. This function computes the mean average prescision at k between two lists of lists of items. Parameters ---------- actual : list A list of lists of elements that are to be predicted (order doesn't matter in the lists) predicted : list A list of lists of predicted elements (order matters in the lists) k : int, optional The maximum number of predicted elements Returns ------- score : double The mean average precision at k over the input lists """ return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)])Testes de analise de presição Teste pela busca com o metodo TFbusca_tf = [seach_tf(text_clear(frase)) for frase in gabarito.str_busca] print("Local: %.4f" %(mapk(gabarito.tf, busca_tf, k=5))) print("Google:%.4f" %(mapk(gabarito.google, busca_tf, k=5)))Local: 0.6520 Google:0.0480Teste pela busca com o metodo binario.busca_bi = [seach_bin(text_clear(frase)) for frase in gabarito.str_busca] print("Local: %.4f" %(mapk(gabarito.busca_binaria, busca_bi, k=5))) print("Google:%.4f" %(mapk(gabarito.google, busca_bi, k=5)))Local: 0.2400 Google:0.0400Teste pela busca com o metodo TF-IDFbusca_tf_idf = [seach_tf_idf(text_clear(frase)) for frase in gabarito.str_busca] print("Local: %.4f" %(mapk(gabarito.tfidf, busca_tf_idf, k=5))) print("Google:%.4f" %(mapk(gabarito.google, busca_tf_idf, k=5)))Local: 0.6160 Google:0.0580Teste pela busca com o metodo BM25busca_bm25 = [seach_bm25(text_clear(frase)) for frase in gabarito.str_busca] print("Local: %.4f" %(mapk(gabarito.bm25, busca_bm25 , k=5))) print("Google:%.4f" %(mapk(gabarito.google, busca_bm25 , k=5)))Local: 0.6787 Google:0.1180Classification Models Logistic Regression Model - Baseline model Train/Validation set First, we want to tune our model such that we minimize the variance, which is sensitivity of the prediction score to the change in training set, so, we will use cross-validation. We will use the validation curve to help us choose the best number of folds.lr = LogisticRegressionCV(Cs=10, scoring='accuracy', max_iter=3000, refit=True) param_name = 'cv' param_range = list(range(3, 21)) train_score, test_score = validation_curve( lr, features, target, param_name, cv= None, param_range=param_range) train_score_mean = np.mean(train_score, axis= 1) test_score_mean = np.mean(test_score, axis= 1) # Plot number of folds VS. cross-validated scores for training and Validation sets. plt.figure() plt.xlabel("Number of folds") plt.ylabel("Cross validated accuracy score") plt.plot(np.arange(3,21), train_score_mean) plt.plot(np.arange(3,21), test_score_mean, color = 'red') train_test_diff = train_score_mean - test_score_mean # Plot number of folds VS. difference of cross-validated scores between train and Dev sets. plt.figure() plt.xlabel("Number of folds") plt.ylabel("Diff. Cross validated accuracy score") plt.plot(np.arange(3,21), train_test_diff)It seems that the minimum variance is obtained at K = 7 folds. Fit our model and the use the best CV value.logistic_reg = LogisticRegressionCV( Cs=10, cv= 7, scoring='accuracy', max_iter=3000, refit=True) logistic_reg.fit(features, target) # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_lr = logistic_reg.predict_proba(features)[:, 1] lr_fpr, lr_tpr, lr_thresholds = sklearn.metrics.roc_curve(target, y_scores_lr) # Finding the AUC for the logistic classification model. lr_auc = sklearn.metrics.auc(x=lr_fpr, y=lr_tpr) lr_acc = np.mean(logistic_reg.scores_[1]) print('Area Under Curve: {}, Accuracy: {}'.format(lr_auc, lr_acc))Feature selection Here, we will try different method to select the features with the highest explainatory power. We will try the following methods, then we select the best method:1. VarianceThreshold2. SelectKBest3. RFECV4. SelectFromModel VarianceThreshold methodthreshold = np.arange(1, 10, 0.5) *1e-1 scores = [] for i in threshold: selector = sklearn.feature_selection.VarianceThreshold(threshold= i) selected_features = selector.fit_transform(features) logistic_reg.fit(selected_features, target) y_pred = logistic_reg.predict(selected_features) scores.append(sklearn.metrics.accuracy_score(target, y_pred)) # Plot variance threshold VS. cross-validated scores for training sets. plt.figure() plt.xlabel("variance threshold") plt.ylabel("Cross validated accuracy score") plt.plot(np.arange(1, 10, 0.5) *1e-1, np.array(scores)) np.max(np.array(scores))The highest accuracy is obtained after execluding features whose variance is less than 0.1 SelectKbest methodnumber_of_features = list(range(1,17)) scores_k = [] for i in number_of_features: selector = sklearn.feature_selection.SelectKBest(k=i) selected_features = selector.fit_transform(features, target) logistic_reg.fit(selected_features, target) y_pred = logistic_reg.predict(selected_features) scores_k.append(sklearn.metrics.accuracy_score(target, y_pred)) # Plot number of selected features VS. cross-validated scores for training sets. plt.figure() plt.xlabel("Number of Selected Features") plt.ylabel("Cross validated accuracy score") plt.plot(list(range(1,17)), scores_k) print("Maximum accuracy score is :", max(scores_k)) print("Optimal number of features :", np.argmax(np.array(scores_k)) + 1)The highest accuracy score is obtained after selecting the best 14 features. RFECV methodselector = sklearn.feature_selection.RFECV(logistic_reg, step= 1, cv= 5) selector.fit(features, target) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(selector.grid_scores_) + 1), selector.grid_scores_) print("Optimal number of features : %d" % selector.n_features_) print("Maximum accuracy score is :", np.max(selector.grid_scores_))SelectFromModel methodthreshold = np.arange(1, 5, 0.1) *1e-1 scores_sfm = [] for i in threshold: selector = sklearn.feature_selection.SelectFromModel(logistic_reg, threshold= i) selector.fit(features, target) selected_features = features.loc[:, selector.get_support()] logistic_reg.fit(selected_features, target) y_pred = logistic_reg.predict(selected_features) scores_sfm.append(sklearn.metrics.accuracy_score(target, y_pred)) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Threshold Value") plt.ylabel("Cross validation score") plt.plot(np.arange(1, 5, 0.1) *1e-1, scores_sfm) print("Maximum accuracy score is :", np.max(np.array(scores_sfm))) print("Optimal threshold :", threshold[np.argmax(np.array(scores_sfm))])We conclude the best feature selection method is SelectFromModel with threshold = 0.28.selector = sklearn.feature_selection.SelectFromModel(logistic_reg, threshold= 0.25) selector.fit(features, target) selected_features = features.loc[:, selector.get_support()] logistic_reg.fit(selected_features, target)Make prediction for test datay_pred_nb = pd.DataFrame(logistic_reg.predict( test.loc[:, selector.get_support()]), columns=['Survived'], dtype='int64') lr_model = pd.concat([passengerID, y_pred_nb], axis=1) lr_model.to_csv('logistic.csv', index= False)Gaussian Naive Bayes Modelnb = GaussianNB() nb_params = {'priors': [[0.7, 0.3], [0.6, 0.4], [0.5, 0.5], [0.4, 0.6], [0.3, 0.7]]} gs_nb = GridSearchCV(nb, param_grid=nb_params, scoring='accuracy', cv=kf, refit=True) gs_nb.fit(features, target) # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_nb = gs_nb.predict_proba(features)[:, 1] nb_fpr, nb_tpr, nb_thresholds = roc_curve(target, y_scores_nb) # Finding the AUC for the naive bayes classification model. nb_auc = auc(x=nb_fpr, y=nb_tpr) nb_acc = gs_nb.best_score_ print('Area Under Curve: {}, Accuracy: {}'.format(nb_auc, nb_acc))Make prediction for test datay_pred_nb = pd.DataFrame(gs_nb.predict( test.drop(columns=['PassengerId'])), columns=['Survived'], dtype='int64') nb_model = pd.concat([test.PassengerId, y_pred_nb], axis=1)KNN Classification Model Train/Validation set First, we want to tune our model such that we minimize the variance, which is sensitivity of the prediction score to the change in training set, so, we will use cross-validation. We will use the validation curve to help us choose the best number of neighbours (K).knn = KNeighborsClassifier() param_name = 'n_neighbors' param_range = list(range(3, 21)) train_score, test_score = validation_curve( knn, features, target, param_name, cv= 10, param_range=param_range) train_score_mean = np.mean(train_score, axis= 1) test_score_mean = np.mean(test_score, axis= 1) # Plot number of neighbours VS. cross-validated scores for training and Validation sets. plt.figure() plt.xlabel("Number of neighbours") plt.ylabel("Cross validated accuracy score") plt.plot(np.arange(3,21), train_score_mean, color = 'blue') plt.plot(np.arange(3,21), test_score_mean, color = 'red') train_test_diff = train_score_mean - test_score_mean # Plot number of folds VS. difference of cross-validated scores between train and Dev sets. plt.figure() plt.xlabel("Number of neighbours") plt.ylabel("Diff. Cross validated accuracy score") plt.plot(np.arange(3,21), train_test_diff)It seems that the minimum variance is obtained at number of neighbours K = 16. Feature selection Here, we will try different method to select the features with the highest explainatory power. We will try the following methods, then we select the best method:1. VarianceThreshold2. SelectKBest3. RFECV4. SelectFromModel VarianceThreshold methodthreshold = [0.001, 0.005, 0.01, 0.05, 0.1, 0.2] scores = [] for i in threshold: selector = sklearn.feature_selection.VarianceThreshold(threshold= i) selected_features = selector.fit_transform(features) knn.fit(selected_features, target) y_pred = knn.predict(selected_features) scores.append(sklearn.metrics.accuracy_score(target, y_pred)) # Plot variance threshold VS. cross-validated scores for training sets. plt.figure() plt.xlabel("variance threshold") plt.ylabel("Cross validated accuracy score") plt.plot([0.001, 0.005, 0.01, 0.05, 0.1, 0.2], np.array(scores)) np.max(np.array(scores))SelectKbest methodnumber_of_features = list(range(1,13)) scores_k = [] for i in number_of_features: selector = sklearn.feature_selection.SelectKBest(k=i) selected_features = selector.fit_transform(features, target) knn.fit(selected_features, target) y_pred = knn.predict(selected_features) scores_k.append(sklearn.metrics.accuracy_score(target, y_pred)) # Plot number of selected features VS. cross-validated scores for training sets. plt.figure() plt.xlabel("Number of Selected Features") plt.ylabel("Cross validated accuracy score") plt.plot(list(range(1,13)), scores_k) print("Maximum accuracy score is :", max(scores_k)) print("Optimal number of features :", np.argmax(np.array(scores_k)) + 1)We conclude that, the highest accuracy is obtained after execluding features whose variance is less than 0.05 Fit the model with the selected features.knn = KNeighborsClassifier(n_neighbors= 14) selector = sklearn.feature_selection.VarianceThreshold(threshold= 0.05) selected_features = selector.fit_transform(features) knn.fit(selected_features, target)KNN hyperparamters tunning We'll use randomized search to tune the hyperparamters of KNN.knn_params = {'n_neighbors': [14] , 'weights': [ 'uniform', 'distance'], 'leaf_size': [20, 30, 40, 50, 60], 'p': [1, 2, 3]} rs_knn = RandomizedSearchCV(knn, param_distributions= knn_params, scoring='accuracy', cv= 20, n_iter= 100, refit=True) rs_knn.fit(selected_features, target) rs_knn.best_params_ rs_knn.best_score_ # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_knn = rs_knn.predict_proba(selected_features)[:, 1] knn_fpr, knn_tpr, knn_thresholds = sklearn.metrics.roc_curve(target, y_scores_knn) # Finding the AUC for the naive bayes classification model. knn_auc = sklearn.metrics.auc(x=knn_fpr, y=knn_tpr) knn_acc = rs_knn.best_score_ print('Area Under Curve: {}, Accuracy: {}'.format(knn_auc, knn_acc))Make prediction for test datay_pred_knn = pd.DataFrame(rs_knn.predict( test.loc[:, selector.get_support()]), columns=['Survived'], dtype='int64') knn_model = pd.concat([passengerID, y_pred_knn], axis=1) knn_model.to_csv('knn.csv', index= False)Support Vector Machine Classification modelsvm = SVC(probability=True) svm_params = {'C': [0.1, 1, 10, 100, 500], 'kernel': ['rbf'], 'degree': [ 1, 2, 3, 4], 'gamma': [0.05, 0.1, 1, 5], 'shrinking': [True, False]} rs_svm = RandomizedSearchCV(svm, param_distributions=svm_params, scoring='accuracy', cv=kf, refit=True, n_iter=2000) rs_svm.fit(features, target) joblib.dump(rs_svm, 'svmmodel.pkl') rs_svm = joblib.load('svmmodel.pkl') # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_svm = rs_svm.predict_proba(features)[:, 1] svm_fpr, svm_tpr, svm_thresholds = roc_curve(target, y_scores_svm) # Finding the AUC for the SVM classification model. svm_auc = auc(x=svm_fpr, y=svm_tpr) svm_acc = rs_svm.best_score_ print('Area Under Curve: {}, Accuracy: {}'.format(svm_auc, svm_acc))Make Prediction for test datay_pred_svm = pd.DataFrame(rs_svm.predict( test.drop(columns=['PassengerId'])), columns=['Survived'], dtype='int64') svm_model = pd.concat([test.PassengerId, y_pred_svm], axis=1)Decision Tree Classification Modeldt = DecisionTreeClassifier()Feature selection for decision trees VarianceThreshold methodthreshold = [0.001, 0.01,0.1,0.5] scores = [] for i in threshold: selector = sklearn.feature_selection.VarianceThreshold(threshold= i) selected_features = selector.fit_transform(features) dt.fit(selected_features, target) y_pred = dt.predict(selected_features) scores.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot([0.001, 0.01,0.1,0.5], np.array(scores)) np.max(np.array(scores))The highest accuracy is obtained after execluding features whose variance is less than 0.1 SelectKbest methodnumber_of_features = list(range(1,17)) scores_k = [] for i in number_of_features: selector = sklearn.feature_selection.SelectKBest(k=i) selected_features = selector.fit_transform(features, target) dt.fit(selected_features, target) y_pred = dt.predict(selected_features) scores_k.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot(list(range(1,17)), scores_k) max(scores_k) print("Optimal number of features :", np.argmax(np.array(scores_k)) + 1)The highest accuracy score is obtained after selecting the best 12 features. RFECV methodselector = sklearn.feature_selection.RFECV(dt, step= 1, cv= 7) selector.fit(features, target) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(selector.grid_scores_) + 1), selector.grid_scores_) print("Optimal number of features : %d" % selector.n_features_) np.max(selector.grid_scores_)We conclude the VarianceThreshold and SelectKbest methods results in the same accuracy score. We will use the SelectKbest method with K = 12.selector = sklearn.feature_selection.SelectKBest(k= 12) selected_features = selector.fit_transform(features, target)Decision tree hyperparamters tunning We will use randomized search method and we will follow coarse-to-fine strategy.dt_params = {'criterion': ['gini'], 'min_samples_split': [ 14, 15,16], 'max_features': ['auto', 'log2', None]} gs_dt = RandomizedSearchCV(dt, param_distributions= dt_params, scoring='accuracy', cv= StratifiedKFold(7), refit=True, n_iter= 50) gs_dt.fit(selected_features, target) gs_dt.best_score_ gs_dt.best_params_Variance check We will use the max_depth of tree to minimize the variance. We'll use the validation curve to select the best value of max_depth.dt = DecisionTreeClassifier(min_samples_split= 15, max_features= None) param_name = 'max_depth' param_range = list(range(1, 11)) train_score, test_score = validation_curve( dt, selected_features, target, param_name, cv= 7, param_range = param_range) train_score_mean = np.mean(train_score, axis= 1) test_score_mean = np.mean(test_score, axis= 1) plt.plot(np.arange(1,11), train_score_mean) plt.plot(np.arange(1,11), test_score_mean, color = 'red') train_test_diff = train_score_mean - test_score_mean plt.plot(np.arange(1,11), train_test_diff)From the above graphs, we find that the minimum is at max_depth of 1 but the bias is high. So, we will choose max_depth of 4 because the variance is reasonable and the bias is much lower than that of max_depth = 1.dt = DecisionTreeClassifier(min_samples_split= 15, max_features= None, max_depth= 4) dt.fit(selected_features,target) # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_dt = dt.predict_proba(selected_features)[:, 1] dt_fpr, dt_tpr, dt_thresholds = sklearn.metrics.roc_curve(target, y_scores_dt) # Finding the AUC for the Decision Tree classification model. dt_auc = sklearn.metrics.auc(x=dt_fpr, y=dt_tpr) dt_acc = gs_dt.best_score_ print('Area Under Curve: {}, Accuracy: {}'.format(dt_auc, dt_acc))Make Prediction for test datay_pred_dt = pd.DataFrame(dt.predict( test.loc[:,selector.get_support()]), columns=['Survived'], dtype='int64') dt_model = pd.concat([passengerID, y_pred_dt], axis=1) dt_model.to_csv('dt.csv', index= False)Random Forest Classification Modelrf = RandomForestClassifier() rf_params = {'n_estimators': [10, 100, 200], 'criterion': ['gini', 'entropy'], 'min_samples_split': [ 2, 5, 10], 'max_features': ['sqrt', 'log2', None], 'class_weight': [{0: 0.6, 1: 0.4}, {0: 0.6, 1: 0.4}]} rs_rf = RandomizedSearchCV(rf, param_distributions=rf_params, scoring='accuracy', cv=kf, refit=True, n_iter=2000) rs_rf.fit(features, target) joblib.dump(rs_rf, 'randomdorestmodel.pkl') rs_rf = joblib.load('randomdorestmodel.pkl') # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_rf = rs_rf.predict_proba(features)[:, 1] rf_fpr, rf_tpr, rf_thresholds = roc_curve(target, y_scores_rf) # Finding the AUC for the Random Forest classification model. rf_auc = auc(x=rf_fpr, y=rf_tpr) rf_acc = rs_rf.best_score_ print('Area Under Curve: {}, Accuracy: {}'.format(rf_auc, rf_acc))Make Prediction for test datay_pred_rf = pd.DataFrame(rs_rf.predict( test.drop(columns=['PassengerId'])), columns=['Survived'], dtype='int64') rf_model = pd.concat([test.PassengerId, y_pred_rf], axis=1)Bagging Classification Modelbg = BaggingClassifier() bg_params = {'n_estimators': [10, 100, 500]} gs_bg = GridSearchCV(bg, param_grid=bg_params, scoring='accuracy', cv=kf, refit=True) gs_bg.fit(features, target) # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_bg = gs_bg.predict_proba(features)[:, 1] bg_fpr, bg_tpr, bg_thresholds = roc_curve(target, y_scores_bg) # Finding the AUC for the Bagging classification model. bg_auc = auc(x=bg_fpr, y=bg_tpr) bg_acc = gs_bg.best_score_ print('Area Under Curve: {}, Accuracy: {}'.format(bg_auc, bg_acc))Make Prediction for test datay_pred_bg = pd.DataFrame(gs_bg.predict( test.drop(columns=['PassengerId'])), columns=['Survived'], dtype='int64') bg_model = pd.concat([test.PassengerId, y_pred_bg], axis=1)Adaboost Classifierada = AdaBoostClassifier() ada_params = {'n_estimators': [100, 500, 1000, 10000], 'learning_rate': [0.1, 0.5, 0.7, 1]} gs_ada = GridSearchCV(ada, param_grid=ada_params, cv=kf, scoring='accuracy', refit=True) gs_ada.fit(features, target) joblib.dump(gs_ada, 'adaboost.pkl') gs_ada = joblib.load('adaboost.pkl') # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_ada = gs_ada.predict_proba(features)[:, 1] ada_fpr, ada_tpr, ada_thresholds = roc_curve(target, y_scores_ada) # Finding the AUC for the AdaBoost classification model. ada_auc = auc(x=ada_fpr, y=ada_tpr) ada_acc = gs_ada.best_score_ print('Area Under Curve: {}, Accuracy: {}'.format(bg_auc, bg_acc))Make Predictions for test datay_pred_ada = pd.DataFrame(gs_ada.predict( test.drop(columns=['PassengerId'])), columns=['Survived'], dtype='int64') ada_model = pd.concat([test.PassengerId, y_pred_ada], axis=1)Gradient Boost Classifier Train/Validation set Here, we want to tune our model to minimize the variance, which is sensitivity of the prediction score to the change in training set, so, we will use cross-validation. We will use the validation curve to help us choose the best validation fraction of the model.gboost = ens.GradientBoostingClassifier() param_name = 'validation_fraction' param_range = np.arange(1, 5.5, 0.5)*1e-1 train_score, test_score = validation_curve( gboost, features, target, param_name, cv=10, param_range=param_range) train_score_mean = np.mean(train_score, axis= 1) test_score_mean = np.mean(test_score, axis= 1) # Plot validation fraction VS. cross-validated scores for training and Validation sets. plt.figure() plt.xlabel("Validation fraction") plt.ylabel("Cross validated accuracy score") plt.plot(np.arange(1,5.5, 0.5)*1e-1, train_score_mean, color = 'blue') plt.plot(np.arange(1,5.5, 0.5)*1e-1, test_score_mean, color = 'red') train_test_diff = train_score_mean - test_score_mean # validation fraction VS. difference of cross-validated scores between train and Dev sets. plt.figure() plt.xlabel("Validation fraction") plt.ylabel("Diff. Cross validated accuracy score") plt.plot(np.arange(1,5.5, 0.5)*1e-1, train_test_diff)It seems the minimum variance is at valdiation fraction = 0.45.We will choose validation fraction of 0.45. Feature selection for Gradient Boost VarianceThreshold method validation_fraction= 0.35, n_iter_no_change= 150, tol= 0.001, random_state= 10gboost = ens.GradientBoostingClassifier(validation_fraction= 0.45) threshold = [0.001, 0.01,0.1,0.5] scores = [] for i in threshold: selector = sklearn.feature_selection.VarianceThreshold(threshold= i) selected_features = selector.fit_transform(features) gboost.fit(selected_features, target) y_pred = gboost.predict(selected_features) scores.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot([0.001, 0.01,0.1,0.5], np.array(scores)) np.max(np.array(scores))The highest accuracy is obtained after execluding features whose variance is less than 0.001 SelectKbest methodnumber_of_features = list(range(1,13)) scores_k = [] for i in number_of_features: selector = sklearn.feature_selection.SelectKBest(k=i) selected_features = selector.fit_transform(features, target) gboost.fit(selected_features, target) y_pred = gboost.predict(selected_features) scores_k.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot(list(range(1,13)), scores_k) max(scores_k) print("Optimal number of features :", np.argmax(np.array(scores_k)) + 1)The highest accuracy score is obtained after selecting the best 11 features. RFECV methodselector = sklearn.feature_selection.RFECV(gboost, step= 1, cv= 7) selector.fit(features, target) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(selector.grid_scores_) + 1), selector.grid_scores_) print("Optimal number of features : %d" % selector.n_features_) np.max(selector.grid_scores_)SelectFromModel methodthreshold = np.arange(1, 10, 0.1) *1e-2 scores_sfm = [] for i in threshold: selector = sklearn.feature_selection.SelectFromModel(gboost, threshold= i) selector.fit(features, target) selected_features = features.loc[:, selector.get_support()] gboost.fit(selected_features, target) y_pred = gboost.predict(selected_features) scores_sfm.append(sklearn.metrics.accuracy_score(target, y_pred)) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Threshold Value") plt.ylabel("Cross validation score") plt.plot(np.arange(1, 10, 0.1) *1e-2, scores_sfm) print("Maximum accuracy score is :", np.max(np.array(scores_sfm))) print("Optimal threshold :", threshold[np.argmax(np.array(scores_sfm))])We conclude that SelectFromModel method results in the highest accuracy score with threshold = 0.018.# Fit the model with best 15 features. selector = sklearn.feature_selection.SelectFromModel(gboost, threshold= 0.018) selected_features = selector.fit_transform(features, target) gboost.fit(selected_features, target)GB hyperparamters tunning Again, we will use randomized search and we'll follow a coarse to fine strategy.gboost_params = {'learning_rate': [0.1 , 0.2, 0.25 ], 'n_estimators': [ 50, 100, 200], 'max_features': [None, 'log2', 'sqrt'], 'loss': ['deviance', 'exponential']} rs_gboost = RandomizedSearchCV(gboost, param_distributions= gboost_params, cv= 10, scoring='accuracy', refit=True, n_iter= 100) rs_gboost.fit(selected_features, target) rs_gboost.best_params_ # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_gboost = rs_gboost.predict_proba(selected_features)[:, 1] gboost_fpr, gboost_tpr, gboost_thresholds = sklearn.metrics.roc_curve( target, y_scores_gboost) # Finding the AUC for the Gradient Boost classification model. gboost_auc = sklearn.metrics.auc(x=gboost_fpr, y=gboost_tpr) gboost_acc = rs_gboost.best_score_ print('Area Under Curve: {}, Accuracy: {}'.format(gboost_auc, gboost_acc))Make Predictions for test datay_pred_gboost = pd.DataFrame(rs_gboost.predict( test.loc[:,selector.get_support()]), columns=['Survived'], dtype='int64') gboost_model = pd.concat([passengerID, y_pred_gboost], axis=1) gboost_model.to_csv('gboost.csv', index= False)XGBoost Classifier Feature selection for XGBoost VarianceThreshold methodxgboost = xgb.XGBClassifier() threshold = [0.001, 0.01,0.1,0.5] scores = [] for i in threshold: selector = sklearn.feature_selection.VarianceThreshold(threshold= i) selected_features = selector.fit_transform(features) xgboost.fit(selected_features, target) y_pred = xgboost.predict(selected_features) scores.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot([0.001, 0.01,0.1,0.5], np.array(scores)) np.max(np.array(scores))The highest accuracy is obtained after execluding features whose variance is less than 0.001 SelectKbest methodnumber_of_features = list(range(1,13)) scores_k = [] for i in number_of_features: selector = sklearn.feature_selection.SelectKBest(k=i) selected_features = selector.fit_transform(features, target) xgboost.fit(selected_features, target) y_pred = xgboost.predict(selected_features) scores_k.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot(list(range(1,13)), scores_k) max(scores_k) print("Optimal number of features :", np.argmax(np.array(scores_k)) + 1)The highest accuracy score is obtained after selecting the best 11 features. RFECV methodselector = sklearn.feature_selection.RFECV(xgboost, step= 1, cv= 7) selector.fit(features, target) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(selector.grid_scores_) + 1), selector.grid_scores_) print("Optimal number of features : %d" % selector.n_features_) np.max(selector.grid_scores_)SelectFromModel methodthreshold = np.arange(1, 10, 0.1) *1e-2 scores_sfm = [] for i in threshold: selector = sklearn.feature_selection.SelectFromModel(xgboost, threshold= i) selector.fit(features, target) selected_features = features.loc[:, selector.get_support()] xgboost.fit(selected_features, target) y_pred = xgboost.predict(selected_features) scores_sfm.append(sklearn.metrics.accuracy_score(target, y_pred)) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Threshold Value") plt.ylabel("Cross validation score") plt.plot(np.arange(1, 10, 0.1) *1e-2, scores_sfm) print("Maximum accuracy score is :", np.max(np.array(scores_sfm))) print("Optimal threshold :", threshold[np.argmax(np.array(scores_sfm))])We conclude that SelectKBest method results in the highest accuracy score with K = 12, which is the total number of features. Fit the model with all featuresxgboost.fit(features, target) xgboost.score(features, target)Make Predictions for test datay_pred_xgboost = pd.DataFrame(xgboost.predict(test), columns=['Survived'], dtype='int64') xgboost_model = pd.concat([passengerID, y_pred_xgboost], axis=1) xgboost_model.to_csv('xgb.csv', index= False)LightGBM Classifier Feature selection for LightGBM VarianceThreshold methodlgboost = lgb.LGBMClassifier() threshold = [0.001, 0.01,0.1,0.5] scores = [] for i in threshold: selector = sklearn.feature_selection.VarianceThreshold(threshold= i) selected_features = selector.fit_transform(features) lgboost.fit(selected_features, target) y_pred = lgboost.predict(selected_features) scores.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot([0.001, 0.01,0.1,0.5], np.array(scores)) np.max(np.array(scores))The highest accuracy is obtained after execluding features whose variance is less than 0.001 SelectKbest methodnumber_of_features = list(range(1,13)) scores_k = [] for i in number_of_features: selector = sklearn.feature_selection.SelectKBest(k=i) selected_features = selector.fit_transform(features, target) lgboost.fit(selected_features, target) y_pred = lgboost.predict(selected_features) scores_k.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot(list(range(1,13)), scores_k) max(scores_k) print("Optimal number of features :", np.argmax(np.array(scores_k)) + 1)The highest accuracy score is obtained after selecting the best 11 features. RFECV methodselector = sklearn.feature_selection.RFECV(lgboost, step= 1, cv= 7) selector.fit(features, target) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(selector.grid_scores_) + 1), selector.grid_scores_) print("Optimal number of features : %d" % selector.n_features_) np.max(selector.grid_scores_)SelectFromModel methodthreshold = [0.001, 0.01, 0.05, 0.1 , 0.5] scores_sfm = [] for i in threshold: selector = sklearn.feature_selection.SelectFromModel(lgboost, threshold= i) selector.fit(features, target) selected_features = features.loc[:, selector.get_support()] lgboost.fit(selected_features, target) y_pred = lgboost.predict(selected_features) scores_sfm.append(sklearn.metrics.accuracy_score(target, y_pred)) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Threshold Value") plt.ylabel("Cross validation score") plt.plot([0.001, 0.01, 0.05, 0.1 , 0.5], scores_sfm) print("Maximum accuracy score is :", np.max(np.array(scores_sfm))) print("Optimal threshold :", threshold[np.argmax(np.array(scores_sfm))])We conclude that SelectKBest method results in the highest accuracy score with K = 11.# Fit the model with the best 11 features selected. selector = sklearn.feature_selection.SelectKBest(k= 11) selected_features = selector.fit_transform(features, target) lgboost.fit(selected_features, target)Make Prediction for test datay_pred_lgboost = pd.DataFrame(lgboost.predict( test.loc[:,selector.get_support()]), columns=['Survived'], dtype='int64') lgboost_model = pd.concat([passengerID, y_pred_lgboost], axis=1) lgboost_model.to_csv('lgboost.csv', index= False)Catboost Classifier Feature selection for LightGBM VarianceThreshold methodctboost = ctb.CatBoostClassifier() threshold = [0.001, 0.01,0.1,0.5] scores = [] for i in threshold: selector = sklearn.feature_selection.VarianceThreshold(threshold= i) selected_features = selector.fit_transform(features) ctboost.fit(selected_features, target) y_pred = ctboost.predict(selected_features) scores.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot([0.001, 0.01,0.1,0.5], np.array(scores)) np.max(np.array(scores))The highest accuracy is obtained after execluding features whose variance is less than 0.001 SelectKbest methodnumber_of_features = list(range(1,13)) scores_k = [] for i in number_of_features: selector = sklearn.feature_selection.SelectKBest(k=i) selected_features = selector.fit_transform(features, target) ctboost.fit(selected_features, target) y_pred = ctboost.predict(selected_features) scores_k.append(sklearn.metrics.accuracy_score(target, y_pred)) plt.plot(list(range(1,13)), scores_k) max(scores_k) print("Optimal number of features :", np.argmax(np.array(scores_k)) + 1)The highest accuracy score is obtained after selecting the best 12 features. RFECV methodselector = sklearn.feature_selection.RFECV(ctboost, step= 1, cv= 7) selector.fit(features, target) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(selector.grid_scores_) + 1), selector.grid_scores_) print("Optimal number of features : %d" % selector.n_features_) np.max(selector.grid_scores_)SelectFromModel methodthreshold = [0.001, 0.01, 0.05, 0.1 , 0.5] scores_sfm = [] for i in threshold: selector = sklearn.feature_selection.SelectFromModel(lgboost, threshold= i) selector.fit(features, target) selected_features = features.loc[:, selector.get_support()] lgboost.fit(selected_features, target) y_pred = lgboost.predict(selected_features) scores_sfm.append(sklearn.metrics.accuracy_score(target, y_pred)) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Threshold Value") plt.ylabel("Cross validation score") plt.plot([0.001, 0.01, 0.05, 0.1 , 0.5], scores_sfm) print("Maximum accuracy score is :", np.max(np.array(scores_sfm))) print("Optimal threshold :", threshold[np.argmax(np.array(scores_sfm))])We conclude that SelectKBest method results in the highest accuracy score with K = 11.ctboost.fit(features, target)Make Prediction for test datay_pred_ctboost = pd.DataFrame(ctboost.predict( test.drop(columns=['PassengerId'])), columns=['Survived'], dtype='int64') ctboost_model = pd.concat([test.PassengerId, y_pred_ctboost], axis=1)Voting Classifierv = VotingClassifier(estimators=[ ('lr', lr), ('NB', gs_nb), ('KNN', gs_knn), ('SVM', rs_svm), ('DT', gs_dt), ('RF', rs_rf), ('BG', gs_bg), ('AdaBoost', gs_ada), ('GBM', gs_gboost), ('XGBM', gs_xgb), ('LightGBM', lgboost), ('CatBoost', ctboost)], voting='soft') v.fit(features, target) joblib.dump(v, 'votingclassifier.pkl') v = joblib.load('votingclassifier.pkl') # Finding the ROC curve for different threshold values. # probability estimates of the positive class. y_scores_v = v.predict_proba(features)[:, 1] v_fpr, v_tpr, v_thresholds = roc_curve(target, y_scores_v) # Finding the AUC for the Voting classification model. v_auc = auc(x=v_fpr, y=v_tpr) print('Area Under Curve: {}'.format(v_auc))Make Prediction for test datay_pred_v = pd.DataFrame(v.predict(test.drop(columns=['PassengerId'])), columns=[ 'Survived'], dtype='int64') v_model = pd.concat([test.PassengerId, y_pred_v], axis=1)Stackingx_train, x_validate, y_train, y_validate = train_test_split( features, target, test_size=0.3) lr = LogisticRegressionCV(cv=kf) nb = GaussianNB() knn = KNeighborsClassifier( n_neighbors=14, leaf_size=20, p=1, weights='uniform') svm = SVC(kernel='rbf', gamma=0.1, degree=1, C=500, shrinking=True) gb = GradientBoostingClassifier(n_estimators=200, learning_rate=0.5) adab = AdaBoostClassifier(n_estimators=500, learning_rate=0.7) bg = BaggingClassifier(n_estimators=100) gboost = GradientBoostingClassifier( validation_fraction=0.1, n_iter_no_change=20, tol=0.005) xgboost = xgb.XGBClassifier() lgboost = lgb.LGBMClassifier() ctboost = ctb.CatBoostClassifier(iterations=200, learning_rate=0.1, depth=10) rf = RandomForestClassifier() lr.fit(x_train, y_train) nb.fit(x_train, y_train) knn.fit(x_train, y_train) svm.fit(x_train, y_train) gb.fit(x_train, y_train) adab.fit(x_train, y_train) bg.fit(x_train, y_train) gboost.fit(x_train, y_train) xgboost.fit(x_train, y_train) lgboost.fit(x_train, y_train) ctboost.fit(x_train, y_train) rf.fit(x_train, y_train) pred1 = lr.predict(x_validate) pred2 = nb.predict(x_validate) pred3 = knn.predict(x_validate) pred4 = svm.predict(x_validate) pred5 = gb.predict(x_validate) pred6 = adab.predict(x_validate) pred7 = bg.predict(x_validate) pred8 = gboost.predict(x_validate) pred9 = xgboost.predict(x_validate) pred10 = lgboost.predict(x_validate) pred11 = ctboost.predict(x_validate) pred12 = rf.predict(x_validate) test_pred1 = lr.predict(test.drop(columns=['PassengerId'])) test_pred2 = nb.predict(test.drop(columns=['PassengerId'])) test_pred3 = knn.predict(test.drop(columns=['PassengerId'])) test_pred4 = svm.predict(test.drop(columns=['PassengerId'])) test_pred5 = gb.predict(test.drop(columns=['PassengerId'])) test_pred6 = adab.predict(test.drop(columns=['PassengerId'])) test_pred7 = bg.predict(test.drop(columns=['PassengerId'])) test_pred8 = gboost.predict(test.drop(columns=['PassengerId'])) test_pred9 = xgboost.predict(test.drop(columns=['PassengerId'])) test_pred10 = lgboost.predict(test.drop(columns=['PassengerId'])) test_pred11 = ctboost.predict(test.drop(columns=['PassengerId'])) test_pred12 = rf.predict(test.drop(columns=['PassengerId'])) stacked_predictions = np.column_stack((pred1, pred2, pred3, pred4, pred5, pred6, pred7, pred8, pred9, pred10, pred11, pred12)) stacked_test_predictions = np.column_stack((test_pred1, test_pred2, test_pred3, test_pred4, test_pred5, test_pred6, test_pred7, test_pred8, test_pred9, test_pred10, test_pred11, test_pred12)) # Meta model meta_model = LogisticRegressionCV(cv=kf) meta_model.fit(stacked_predictions, y_validate)Make predictions for test datay_pred_stack = pd.DataFrame(meta_model.predict( stacked_test_predictions), columns=['Survived'], dtype='int64') stack_model = pd.concat([test.PassengerId, y_pred_stack], axis=1)Models Comaprison Models scorepd.DataFrame([(lr_auc, lr_acc), (nb_auc, nb_acc), (knn_auc, knn_acc), (dt_auc, dt_acc), (rf_auc, rf_acc), (svm_auc, svm_acc), (bg_auc, bg_acc), (ada_auc, ada_acc), (v_auc, 'NA')], columns=['AUC', 'Accuracy'], index=['Logistic Regression', 'Naive Bayes', 'KNN', 'Decision Tree', 'Random Forest', 'SVM', 'Bagging', 'AdaBoost', 'Voting'])Plotting the ROC curveplt.figure(figsize=(8, 5)) plt.title('Receiver Operating Characteristic Curve') plt.plot(lr_fpr, lr_tpr, 'b', label='LR_AUC = %0.2f' % lr_auc) plt.plot(nb_fpr, nb_tpr, 'g', label='NB_AUC = %0.2f' % nb_auc) plt.plot(knn_fpr, knn_tpr, 'orange', label='KNN_AUC = %0.2f' % knn_auc) plt.plot(svm_fpr, svm_tpr, 'y', label='SVM_AUC = %0.2f' % svm_auc) plt.plot(dt_fpr, dt_tpr, 'brown', label='DT_AUC = %0.2f' % dt_auc) plt.plot(rf_fpr, rf_tpr, 'grey', label='RF_AUC = %0.2f' % rf_auc) plt.plot(bg_fpr, bg_tpr, 'black', label='BG_AUC = %0.2f' % bg_auc) plt.plot(ada_fpr, ada_tpr, 'pink', label='Ada_AUC = %0.2f' % ada_auc) plt.plot(v_fpr, v_tpr, 'purple', label='Voting_AUC = %0.2f' % v_auc) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.title('ROC Curve') plt.show()Lab 03: VGGTrong bài thực hành này:- Cài đặt, train VGG16 với data MNIST- Các kĩ thuật regularization: L2, dropout- Tạo callback của keras để lưu checkpointReference:- ., and . 2014b. Very deep convolutionalnetworks for large-scale image recognition. arXivpreprint arXiv:1409.1556.https://arxiv.org/abs/1409.1556 1. VGG-16 trên MNISTTrong phần này chúng ta sẽ xây dựng và huấn luyện model VGG-16 trên dataset MNIST (ảnh được resize) 1.1 VGG16 cài sẵn trong Keras#import thư viện cần thiết ## thư viện machine learning và hỗ trợ import tensorflow as tf from tensorflow import keras import numpy as np ## thư viện để vẽ đồ thị import matplotlib.pyplot as pltTự động tạo một model VGG-16 bằng hàm cài sẵn trong keras- include_top: True/False: có thêm 3 lớp fully-conndedted ở cuối model không- weights: None/'imagenet': None: khởi tạo tham số ngẫu nhiên; 'imagenet': load trọng số của model được train với imagenet- input_tensor: truyền lớp Input vào nếu muốn- input_shape: xác định kích thước input- pooling: None/'max'/'avg': chế độ pooling trong các lớp pool- classes: số lớp outputvgg16 = keras.applications.vgg16.VGG16(include_top=True, weights=None, input_tensor=None, input_shape=(32,32,1), pooling='max', classes=10) vgg16.summary()Model: "vgg16" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 32, 32, 1)] 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 32, 32, 64) 640 _________________________________________________________________ block1_conv2 (Conv2D) (None, 32, 32, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 16, 16, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 16, 16, 128) 147584 _____________________________________________________________[...]1.2 Xây dựng VGG bằng thư viện keras.layersHình trên trích từ bài báo gốc của VGG. Cột D và E chính là cấu trúc model VGG16 và VGG19. - Filter size của tất cả các lớp convolution là 3x3- Số filter của từng block lần lượt là 64, 128, 256, 512, 512- Các hàm kích hoạt đều là ReLU, trừ lớp output dùng hàm kích hoạt softmax- L2 Regularizer (weight decay) các lớp dense: 0.0005- Dropout probability sau các lớp dense: 0.5Giả sử ảnh input của chúng ta có kích thước (32x32x1)Xây dựng các lớp## import l2 regularizer ## l2 sẽ được khai báo truyền vào khi khởi tạo lớp from keras.regularizers import l2 l2_regularizer_rate = 0.0005 ## Tạo lớp input kích thước (None, 32, 32, 1) inputs = keras.layers.Input(shape=(32,32,1)) ### Block 1 #### Gồm 2 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 64, kernel size 3x3, hàm kích hoạt ReLU conv1_1 = keras.layers.Convolution2D(filters=64, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(inputs) conv1_2 = keras.layers.Convolution2D(filters=64, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv1_1) maxpool1 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv1_2) ### Block 2 #### Gồm 2 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 128, kernel size 3x3, hàm kích hoạt ReLU conv2_1 = keras.layers.Convolution2D(filters=128, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(maxpool1) conv2_2 = keras.layers.Convolution2D(filters=128, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv2_1) maxpool2 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv2_2) ### Block 3 #### Gồm 3 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 256, kernel size 3x3, hàm kích hoạt ReLU conv3_1 = keras.layers.Convolution2D(filters=256, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(maxpool2) conv3_2 = keras.layers.Convolution2D(filters=256, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv3_1) conv3_3 = keras.layers.Convolution2D(filters=256, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv3_2) maxpool3 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv3_3) ### Block 4 #### Gồm 3 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 512, kernel size 3x3, hàm kích hoạt ReLU conv4_1 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(maxpool3) conv4_2 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv4_1) conv4_3 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv4_2) maxpool4 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv4_3) ### Block 5 #### Gồm 3 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 512, kernel size 3x3, hàm kích hoạt ReLU conv5_1 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(maxpool4) conv5_2 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv5_1) conv5_3 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv5_2) maxpool5 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv5_3) ### Block cuối #### Gồm 2 lớp fully-connected và 1 lớp output (cũng fully-connected) #### Số neurons trong 3 lớp lần lượt là 4096, 4096, 10 flatten6 = keras.layers.Flatten()(maxpool5) dense6_1 = keras.layers.Dense(units=4096, activation='relu', kernel_regularizer=l2(l2_regularizer_rate))(flatten6) dropout6_1 = keras.layers.Dropout(rate=0.5)(dense6_1) ## Lớp Dropout (chỉ chạy khi train), rate: xác suất bị drop dense6_2 = keras.layers.Dense(units=4096, activation='relu', kernel_regularizer=l2(l2_regularizer_rate))(dropout6_1) dropout6_2 = keras.layers.Dropout(rate=0.5)(dense6_2) ## Lớp Dropout (chỉ chạy khi train), rate: xác suất bị drop softmax = keras.layers.Dense(units=10, activation='softmax')(dropout6_2) ## Compile model model = keras.models.Model(inputs=inputs, outputs=softmax) model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001), ##tự khai báo Optimizer với learning rate 10^-4 loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=["accuracy"]) ## In toàn bộ cấu trúc của model print("Cấu trúc của model: ") model.summary()Cấu trúc của model: Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_2 (InputLayer) [(None, 32, 32, 1)] 0 _________________________________________________________________ conv2d (Conv2D) (None, 32, 32, 64) 640 _________________________________________________________________ conv2d_1 (Conv2D) (None, 32, 32, 64) 36928 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ conv2d_3 (Conv2D) (None, 16, 16, 128) 147584 ________________________________________[...]1.3 Resize MNIST# Tải dataset MNIST từ tensorflow ## MNIST là bài toán dự đoán một ảnh thể hiện ký tự số nào ## tải MNIST dataset từ keras (X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data() ##resacle ảnh thành ảnh thực trong đoạn [0,1] X_train, X_test = X_train/255.0, X_test/255.0 ##in dataset print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)(60000, 28, 28) (60000,) (10000, 28, 28) (10000,)Do VGG-16 yêu cầu ảnh kích thước tối thiểu 32x32. Chúng ta resize ảnh thành 32x32 để cho vào VGG## import thư viện OpenCV trên python #!pip3 install opencv-python ### Thử resize một ảnh import cv2 resized_img = cv2.resize(X_train[0], dsize=(32,32)) print("Kích thước ảnh sau resize: ", resized_img.shape) ## Resize toàn bộ ảnh train tập train và test X_train = np.array([cv2.resize(img, dsize=(32,32)) for img in X_train]) X_test = np.array([cv2.resize(img, dsize=(32,32)) for img in X_test]) print("Kích thước tập sau khi resize: ", X_train.shape, X_test.shape) ## In xem ảnh còn ổn không sau khi resize plt.imshow(X_train[0]) plt.show() ## Reshape ảnh để phù hợp với input của model (thêm một trục) X_train = np.expand_dims(X_train, axis=-1) X_test = np.expand_dims(X_test, axis=-1) print("Kích thước tập sau khi reshape: ", X_train.shape, X_test.shape) plt.imshow(X_train[0,:,:,0]) plt.show() #Tách một phần tập train thành tập valid from sklearn.model_selection import train_test_split X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.1) ## Reshape ảnh để phù hợp với input của model (thêm một trục)Kích thước tập sau khi resize: (60000, 32, 32) (10000, 32, 32)1.4 Train### Tạo một callback ModelCheckpoint: callback này sẽ lưu model mỗi khi tìm được một model tốt hơn #### filepath: đường dẫn file muốn lưu #### monitor: đại lượng quyết định xem model có "tốt hơn" hay không #### mode='auto'/'max'/'min': đại lượng monitor lớn hay nhỏ là tốt hơn #### verbose: có thông báo mỗi lần lưu ko #### save_best_only: chỉ lưu model tốt nhất mc = keras.callbacks.ModelCheckpoint(filepath="vgg16_mnist.h5", monitor='val_loss', mode='min', verbose=0, save_best_only=True) ## Train ## Khuyến cáo chạy COLAB (hoặc tương tự) history = model.fit(X_train, y_train, batch_size=100, epochs=10, validation_data=(X_valid, y_valid), callbacks=[mc]) ##sử dụng callback ModelCheckpoint trong quá trình train ## Đánh giá model trên tập test valid_loss, valid_acc = model.evaluate(X_valid, y_valid) test_loss, test_acc = model.evaluate(X_test, y_test) print("Valid: loss {} acc {} -- Test: loss {} valid {}".format(valid_loss, valid_acc, test_loss, test_acc)) ## Load lại model tốt nhất đã lưu print("best model: ") model.load_weights("vgg16_mnist.h5") valid_loss, valid_acc = model.evaluate(X_valid, y_valid) test_loss, test_acc = model.evaluate(X_test, y_test) print("Valid: loss {} acc {} -- Test: loss {} valid {}".format(valid_loss, valid_acc, test_loss, test_acc))Epoch 1/10 540/540 [==============================] - 28s 37ms/step - loss: 1.5045 - accuracy: 0.7465 - val_loss: 0.5561 - val_accuracy: 0.9637 Epoch 2/10 540/540 [==============================] - 20s 36ms/step - loss: 0.4056 - accuracy: 0.9763 - val_loss: 0.2966 - val_accuracy: 0.9820 Epoch 3/10 540/540 [==============================] - 20s 36ms/step - loss: 0.2334 - accuracy: 0.9856 - val_loss: 0.1751 - val_accuracy: 0.9885 Epoch 4/10 540/540 [==============================] - 20s 36ms/step - loss: 0.1473 - accuracy: 0.9887 - val_loss: 0.1371 - val_accuracy: 0.9860 Epoch 5/10 540/540 [==============================] - 19s 36ms/step - loss: 0.0993 - accuracy: 0.9907 - val_loss: 0.0928 - val_accuracy: 0.9883 Epoch 6/10 540/540 [==============================] - 20s 36ms/step - loss: 0.0709 - accuracy: 0.9922 - val_loss: 0.0789 - val_accuracy: 0.9895 Epoch 7/10 540/540 [==============================] - 20s 36ms/step - loss: 0.0588 - accuracy: 0.9921 - val_loss: 0.0584 - val_accuracy:[...]Bài tập1. Xây dựng và huấn luyện VGG19 với MNIST bằng thư viện keras.layers (viết gọn xíu!).2. Chỉnh các tham số (lrn_rate, l2 weight decay, epochs, batch_size, ...) để model đạt accuracy 0.994 trên tập valid.# Xây dựng các lớp ## l2 sẽ được khai báo truyền vào khi khởi tạo lớp from keras.regularizers import l2 l2_regularizer_rate = 0.0005 ## Tạo lớp input kích thước (None, 32, 32, 1) inputs = keras.layers.Input(shape=(32,32,1)) ### Block 1 #### Gồm 2 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 64, kernel size 3x3, hàm kích hoạt ReLU conv1_1 = keras.layers.Convolution2D(filters=64, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(inputs) conv1_2 = keras.layers.Convolution2D(filters=64, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv1_1) maxpool1 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv1_2) ### Block 2 #### Gồm 2 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 128, kernel size 3x3, hàm kích hoạt ReLU conv2_1 = keras.layers.Convolution2D(filters=128, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(maxpool1) conv2_2 = keras.layers.Convolution2D(filters=128, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv2_1) maxpool2 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv2_2) ### Block 3 #### Gồm 4 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 256, kernel size 3x3, hàm kích hoạt ReLU conv3_1 = keras.layers.Convolution2D(filters=256, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(maxpool2) conv3_2 = keras.layers.Convolution2D(filters=256, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv3_1) conv3_3 = keras.layers.Convolution2D(filters=256, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv3_2) conv3_4 = keras.layers.Convolution2D(filters=256, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv3_3) maxpool3 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv3_4) ### Block 4 #### Gồm 4 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 512, kernel size 3x3, hàm kích hoạt ReLU conv4_1 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(maxpool3) conv4_2 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv4_1) conv4_3 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv4_2) conv4_4 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv4_3) maxpool4 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv4_4) ### Block 5 #### Gồm 3 lớp convolution và 1 lớp maxpoool #### Lớp convolution số filter 512, kernel size 3x3, hàm kích hoạt ReLU conv5_1 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(maxpool4) conv5_2 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv5_1) conv5_3 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv5_2) conv5_4 = keras.layers.Convolution2D(filters=512, kernel_size=[3,3], strides=[1,1], padding='same', activation=tf.nn.relu)(conv5_3) maxpool5 = keras.layers.MaxPool2D(pool_size=[2,2], strides=[2,2])(conv5_4) ### Block cuối #### Gồm 2 lớp fully-connected và 1 lớp output (cũng fully-connected) #### Số neurons trong 3 lớp lần lượt là 4096, 4096, 10 flatten6 = keras.layers.Flatten()(maxpool5) dense6_1 = keras.layers.Dense(units=4096, activation='relu', kernel_regularizer=l2(l2_regularizer_rate))(flatten6) dropout6_1 = keras.layers.Dropout(rate=0.5)(dense6_1) ## Lớp Dropout (chỉ chạy khi train), rate: xác suất bị drop dense6_2 = keras.layers.Dense(units=4096, activation='relu', kernel_regularizer=l2(l2_regularizer_rate))(dropout6_1) dropout6_2 = keras.layers.Dropout(rate=0.5)(dense6_2) ## Lớp Dropout (chỉ chạy khi train), rate: xác suất bị drop softmax = keras.layers.Dense(units=10, activation='softmax')(dropout6_2) ## Compile model model = keras.models.Model(inputs=inputs, outputs=softmax) model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001), ##tự khai báo Optimizer với learning rate 10^-4 loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=["accuracy"]) ## In toàn bộ cấu trúc của model print("Cấu trúc của model: ") model.summary() # Tải dataset MNIST từ tensorflow ## MNIST là bài toán dự đoán một ảnh thể hiện ký tự số nào ## tải MNIST dataset từ keras (X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data() ##resacle ảnh thành ảnh thực trong đoạn [0,1] X_train, X_test = X_train/255.0, X_test/255.0 ##in dataset print(X_train.shape, y_train.shape, X_test.shape, y_test.shape) ## import thư viện OpenCV trên python #!pip3 install opencv-python ### Thử resize một ảnh import cv2 resized_img = cv2.resize(X_train[0], dsize=(32,32)) print("Kích thước ảnh sau resize: ", resized_img.shape) ## Resize toàn bộ ảnh train tập train và test X_train = np.array([cv2.resize(img, dsize=(32,32)) for img in X_train]) X_test = np.array([cv2.resize(img, dsize=(32,32)) for img in X_test]) print("Kích thước tập sau khi resize: ", X_train.shape, X_test.shape) ## In xem ảnh còn ổn không sau khi resize plt.imshow(X_train[0]) plt.show() ## Reshape ảnh để phù hợp với input của model (thêm một trục) X_train = np.expand_dims(X_train, axis=-1) X_test = np.expand_dims(X_test, axis=-1) print("Kích thước tập sau khi reshape: ", X_train.shape, X_test.shape) plt.imshow(X_train[0,:,:,0]) plt.show() #Tách một phần tập train thành tập valid from sklearn.model_selection import train_test_split X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.1) ## Reshape ảnh để phù hợp với input của model (thêm một trục) ### Tạo một callback ModelCheckpoint: callback này sẽ lưu model mỗi khi tìm được một model tốt hơn #### filepath: đường dẫn file muốn lưu #### monitor: đại lượng quyết định xem model có "tốt hơn" hay không #### mode='auto'/'max'/'min': đại lượng monitor lớn hay nhỏ là tốt hơn #### verbose: có thông báo mỗi lần lưu ko #### save_best_only: chỉ lưu model tốt nhất mc = keras.callbacks.ModelCheckpoint(filepath="vgg19_mnist.h5", monitor='val_loss', mode='min', verbose=0, save_best_only=True) ## Train ## Khuyến cáo chạy COLAB (hoặc tương tự) history = model.fit(X_train, y_train, batch_size=100, epochs=10, validation_data=(X_valid, y_valid), callbacks=[mc]) ##sử dụng callback ModelCheckpoint trong quá trình train ## Đánh giá model trên tập test valid_loss, valid_acc = model.evaluate(X_valid, y_valid) test_loss, test_acc = model.evaluate(X_test, y_test) print("Valid: loss {} acc {} -- Test: loss {} valid {}".format(valid_loss, valid_acc, test_loss, test_acc)) ## Load lại model tốt nhất đã lưu print("best model: ") model.load_weights("vgg19_mnist.h5") valid_loss, valid_acc = model.evaluate(X_valid, y_valid) test_loss, test_acc = model.evaluate(X_test, y_test) print("Valid: loss {} acc {} -- Test: loss {} valid {}".format(valid_loss, valid_acc, test_loss, test_acc))Epoch 1/10 540/540 [==============================] - 25s 43ms/step - loss: 1.7509 - accuracy: 0.5903 - val_loss: 0.4442 - val_accuracy: 0.9648 Epoch 2/10 540/540 [==============================] - 23s 43ms/step - loss: 0.3328 - accuracy: 0.9737 - val_loss: 0.2455 - val_accuracy: 0.9818 Epoch 3/10 540/540 [==============================] - 24s 44ms/step - loss: 0.1943 - accuracy: 0.9852 - val_loss: 0.1747 - val_accuracy: 0.9813 Epoch 4/10 540/540 [==============================] - 23s 43ms/step - loss: 0.1313 - accuracy: 0.9881 - val_loss: 0.1228 - val_accuracy: 0.9873 Epoch 5/10 540/540 [==============================] - 24s 44ms/step - loss: 0.0871 - accuracy: 0.9914 - val_loss: 0.0872 - val_accuracy: 0.9900 Epoch 6/10 540/540 [==============================] - 24s 44ms/step - loss: 0.0702 - accuracy: 0.9913 - val_loss: 0.0808 - val_accuracy: 0.9902 Epoch 7/10 540/540 [==============================] - 23s 43ms/step - loss: 0.0581 - accuracy: 0.9925 - val_loss: 0.0684 - val_accuracy:[...]Practica sobre K-Means y GMMimport numpy as np import pandas as pd import random import seaborn as sns; sns.set() import matplotlib.pyplot as plt from sklearn.mixture import GaussianMixture base_data_or = pd.read_csv('p1_estaturas.csv') base_data = base_data_or.to_numpy() #base_data sns.scatterplot(x="Edad", y="Estatura", palette="Set1",s=75, data=base_data_or) def k_predict(k, valores): # (x-x*) (y - y*) diferencias = valores[:,None]- k # (x-x*)^2 + (y - y*)^2 suma_cuadrados = np.sum(np.square(diferencias),axis=2) # distancia euclidiana sqrt( suma_cuadrados ) distancia_euc = np.sqrt(suma_cuadrados) # seleccion del k con la distancia mas cercana assigned = np.argmin(distancia_euc,axis=1) return assigned def kmeans(data,epochs = 5,k = 5,status=False, seed=False): if seed: random.seed(101) j = np.array([]) total = len(data) # seleccion aleatoria de elementos como k iniciales randomlist = random.sample(range(0, total-1), k) listk = np.array([data[i] for i in randomlist]) # registro para saber donde iniciaron los k init_k = np.copy(listk) for i in range(epochs): # (x-x*) (y - y*) diferencias = data[:,None]- listk # (x-x*)^2 + (y - y*)^2 suma_cuadrados = np.sum(np.square(diferencias),axis=2) # distancia euclidiana sqrt( suma_cuadrados ) distancia_euc = np.sqrt(suma_cuadrados) # seleccion del k con la distancia mas cercana assigned = np.argmin(distancia_euc,axis=1) #print(assigned) # sumatoria de las distancias minimas de cada elemento # :distancia_euc tiene todas las k-esimas distancias, # por eso se toma el minimo. en este caso j = np.append(j,sum(np.min(distancia_euc,axis=1))/total) if status: print(j[i]) # reasignacion de k for i in range(k): # filtro del los datos para cada k subgrupo = data[assigned==i] # nuevo centroide de k listk[i] = np.average(subgrupo,axis=0) #print(j) return (listk,init_k,j, assigned)Con los datos crudosks,k_init,j,assigned = kmeans(base_data, epochs = 5,k = 5,status=True, seed=False) print(ks) print('J: ',j)2.428022583283438 1.5089217747476418 1.080303162383244 1.046552656723866 1.0416659277670361 [[ 1.75625 34.75 ] [ 1.68285714 28.28571429] [ 1.73666667 25.44444444] [ 1.74 22. ] [ 1.78666667 24. ]] J: [2.42802258 1.50892177 1.08030316 1.04655266 1.04166593]Grafica Jplt.plot(j)Grafica de clustersSe prepresenta el universo de elementos agrupados por colores y por medio de X los centroides definidosbase_data_or['k'] = assigned sns.scatterplot(x="Edad", y="Estatura", palette="Set1",s=75, hue="k", data=base_data_or) # definicion de k sns.scatterplot(x=ks[:,1], y=ks[:,0], marker="x",s=100) # puntos iniciales #sns.scatterplot(x=init_k[:,1], y=init_k[:,0], marker="s",s=75,color=".2" )Escalado de datosesc_data = (base_data - np.min(base_data, axis=0))/ ( np.max(base_data, axis=0) - np.min(base_data, axis=0) ) esc_data[0:3]k=5ks5,k_init5,j5,assigned5 = kmeans(esc_data, epochs = 5,k = 5,status=True,seed=False) print(ks5) print('J: ',j5)0.2055910825943832 0.15304699796436652 0.14789249814173838 0.14159402125307602 0.13979030197941986 [[0.23888889 0.30701754] [0.81666667 0.81578947] [0.57777778 0.25438596] [0.57333333 0.61052632] [0.85 0.18421053]] J: [0.20559108 0.153047 0.1478925 0.14159402 0.1397903 ]Grafica Jplt.plot(j5) sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], palette="Set1",s=75, hue=assigned5) # definicion de k sns.scatterplot(x=ks5[:,1], y=ks5[:,0], marker="x",s=100) # puntos iniciales #sns.scatterplot(x=init_k[:,1], y=init_k[:,0], marker="s",s=75,color=".2" )En base diversas ejecuciones y por el medio elbow se estima que un buen k seria de 2, considerando que el algoritmo converve en menos de 5 epochs.Aunque en varias ocuaciones elbow sugiere hacer un solo grupo; pero seria lo mismo que no hacer nada por lo que se deja la opcion de crear almenos 2 grupos k=2ks2,k_init2,j2,assigned2 = kmeans(esc_data, epochs = 5,k = 2,status=True,seed=False) print('J: ',j2) plt.plot(j2) sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], palette="Set1",s=75, hue=assigned2) # definicion de k sns.scatterplot(x=ks2[:,1], y=ks2[:,0], marker="x",s=100)k=3ks3,k_init3,j3,assigned3 = kmeans(esc_data, epochs = 5,k = 3,status=False,seed=False) sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], palette="Set1",s=75,hue=assigned3) print('J: ',j3)J: [0.21740756 0.17329056 0.16589268 0.16589268 0.16589268]k=6ks6,_,_,assigned6 = kmeans(esc_data, epochs = 5,k = 6,status=False,seed=False) sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], palette="Set1",s=75,hue=assigned6)k=4ks4,_,_,assigned4 = kmeans(esc_data, epochs = 5,k = 4,status=False,seed=False) sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], palette="Set1",s=75,hue=assigned4)GMMgmm2 = GaussianMixture(n_components=2).fit(esc_data) k_gmm2 = gmm2.predict(esc_data) sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], hue=k_gmm2, palette="Set1",s=75)k=5gmm5 = GaussianMixture(n_components=5).fit(esc_data) k_gmm5 = gmm5.predict(esc_data) sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], hue=k_gmm5, palette="Set1",s=75)k=3gmm3 = GaussianMixture(n_components=3).fit(esc_data) k_gmm3 = gmm3.predict(esc_data) sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], hue=k_gmm3, palette="Set1",s=75)Conclusiones Para la realizacion de los clustes fue importante realizar el escalado de los datos ya que la mayoria de los valores de estatura no superan el valor de 2 y los de edad estan alrededor de 30. Antes de esto los grupos se separaban de forma vertical; depues del escalado de los datos el patron cambio. Segun el k que se defina se logran diferentes resultados; para el caso de k=3, los grupos se pueden interpretar como:1. Los de edad mas joven con alturas altas, 2. Los de edad media con altura baja3. Los de edad mayorsns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], palette="Set1",s=75,hue=assigned3)Debido a la naturaleza de los datos los clusters no son tan claros debido a que sabemos que la edad no esta relacionada directamente con la edad, independientemente de la altura estos llegaran a una dada mayor. Por esta misma razon los cluster convergen de forma diferente en distintas ejecuciones. A la vez que los datos convergen en pocas iteraciones por esta misma razon y la cantidad de datos. Con respecto al metodo del codo Debido a que los resultados no son consistentes en cada ejecucion la representacion de J varia para estos datos entre 1 y 3 indicando que k esta funciona para estos datos.La seleccion de k debe de ser 1, 2 o 3; pero no mas de eso. En base a estos datos no se logra tener mas variaciones. A pesar de que es posible calcular grupos para k>3 la cohesion de los grupos no es tanta como para que sean significativos. Estimacion Datospruebas = pd.read_csv('p1_pruebas.csv') pruebas = pruebas.to_numpy() # se escalan los datos dentro de la misma escala que los datos de entrenamiento pruebas = (pruebas - np.min(base_data, axis=0))/ ( np.max(base_data, axis=0) - np.min(base_data, axis=0) ) pruebas[0:3]Predicciones de ***GMM*** para un modelos de k = 3gmm3.predict(pruebas)Preciccion de probabilidades de pertenecia a cada grupo segun *GMM*gmm3.predict_proba(pruebas)Prediccion de **k-means** manual de k=3k_predict(ks3, pruebas)Debido a que el proceso de inicializacion es aleatorio los grupos de cada uno no necesariamente corresponden a otra ejecucion. En otras palabras a pesar que de se creen los mismo *clusters* no necesariamente seran en el mismo orden; es posible que k1 de una ejecucion sea diferente a otro k1.En este caso lo que vemos es que `gmm3.predict(pruebas)` genera `[0, 1, 2, 2, 2, 2, 2, 2]``k_predict(ks3, pruebas)` genera `[1, 1, 2, 2, 1, 0, 0, 2]`Se corrio el modelo a manera de crear en el mismo orden los clusters para tener un mejor parametro de comparacion. Como se ve en la siguientes graficas, los clustres estan creados de forma diferente# GMM con datos de prueba sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], hue=k_gmm3, palette="Set1",s=75) sns.scatterplot(x=pruebas[:,1], y=pruebas[:,0], marker="x",s=100) # knn con datos de prueba sns.scatterplot(x=esc_data[:,1], y=esc_data[:,0], palette="Set1",s=75,hue=assigned3) sns.scatterplot(x=pruebas[:,1], y=pruebas[:,0], marker="x",s=100)Prediccion con k=5En este caso si se logran grupos diferentes, en la grafica anterior de GMM con k=5 se persive que los grupos no son uniformesprint(gmm5.predict(pruebas)) print(k_predict(ks5, pruebas)) gmm5.predict_proba(pruebas)Parte 2 Deteccion de Anomaliasfrom scipy.stats import multivariate_normal from sklearn.metrics import f1_score # para quitar la notacion cientifica np.set_printoptions(suppress=True) base_data = pd.read_csv('p1_estaturas.csv').to_numpy() pruebas = pd.read_csv('p1_pruebas.csv').to_numpy() anomalias = pd.read_csv('p1_anomalias.csv').to_numpy() prue_y = np.zeros(len(pruebas)) anom_y = np.ones(len(anomalias)) data_anomalias = np.append(base_data,anomalias, axis=0) data_pruebas = np.append(base_data,pruebas, axis=0) #data_pruebas.shape def estimateGaussian(dataset): mu = np.mean(dataset, axis=0) sigma = np.cov(dataset.T) return mu, sigma def multivariateGaussian(dataset,mu,sigma): p = multivariate_normal(mean=mu, cov=sigma) return p.pdf(dataset) def selectThresholdByCV(probs,gt): best_epsilon = 0 best_f1 = 0 f = 0 stepsize = (max(probs) - min(probs)) / 2000 #print(stepsize) epsilons = np.arange(min(probs),max(probs),stepsize) for epsilon in np.nditer(epsilons): #print(epsilon) predictions = (probs < epsilon) #print(predictions) f = f1_score(gt, predictions,average='binary') #print(f,epsilon) #print('------') if f > best_f1: #print('----mejor') best_f1 = f best_epsilon = epsilon return best_f1, best_epsilonPruebas con datos **normales + valtest**mu, sigma = estimateGaussian(data_pruebas) p = multivariateGaussian(data_pruebas,mu,sigma) print(mu, sigma) #print(p) p_cv = multivariateGaussian(pruebas,mu,sigma) fscore, ep = selectThresholdByCV(p_cv,prue_y) #print(fscore,ep) # seleccion de anomalias outliers = np.asarray(np.where(p < ep)) outliersPara este caso los datos no presentan anomaliasplt.figure() plt.plot(data_pruebas[:,1],data_pruebas[:,0],'bx') plt.plot(data_pruebas[outliers,1],data_pruebas[outliers,0],'ro') plt.show()Pruebas con datos **normales + anomalias**mu, sigma = estimateGaussian(data_anomalias) p = multivariateGaussian(data_anomalias,mu,sigma) print(mu, sigma) #print(p) p_cv = multivariateGaussian(anomalias,mu,sigma) fscore, ep = selectThresholdByCV(p_cv,anom_y) #print(fscore,ep) print(ep) # seleccion de anomalias outliers = np.asarray(np.where(p < ep)) outliers plt.figure() plt.plot(data_anomalias[:,1],data_anomalias[:,0],'bx') plt.plot(data_anomalias[outliers,1],data_anomalias[outliers,0],'ro') plt.show() En este caso se encuentran las anomalias con los datos data_anomalias[outliers]This tutorial introduces numpy, a Python library for performing numerical computations in Python In order to be able to use numpy we need to import the library using the special word `import`. Also, to avoid typing `numpy` every time we want to use one if its functions we can provide an alias using the special word `as`:import numpy as npNow, we have access to all the functions available in `numpy` by typing `np.name_of_function`. For example, the equivalent of `1 + 1` in Python can be done in `numpy`:np.add(1,1)Although this might not seem very useful, however, even simple operations like this one, can be much quicker in `numpy` than in standard Python when using lots of numbers. To access the documentation explaining how a function is used, its input parameters and output format we can press `Shift+Tab` after the function name"np.addBy default the result of a function or operation is shown underneath the cell containing the code. If we want to reuse this result for a later operation we can assign it to a variable:a = np.add(2,3)The contents of this variable can be displayed at any moment by typing the variable name in a new cell:aThe core concept in numpy is the `array` which is equivalent to lists of numbers but can be multidimensional. To declare a numpy array we do:np.array([1,2,3,4,5,6,7,8,9])Most of the functions and operations defined in numpy can be applied to arrays. For example, with the previous operation:arr1 = np.array([1,2,3,4]) arr2 = np.array([3,4,5,6]) np.add(arr1, arr2)But a more simple and convenient notation can also be used:arr1 + arr2Arrays can be sliced and diced. We can get subsets of the arrays using the indexing notation which is `[start:end:stride]`. Let's see what this means:arr = np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) print(arr[5]) print(arr[5:]) print(arr[:5]) print(arr[::2])Experiment playing with the indexes to understand the meaning of start, end and stride. What happend if you don't specify a start? What value numpy uses instead? Note that numpy indexes start on `0`, the same convention used in Python lists. Indexes can also be negative, meaning that you start counting by the end. For example, to select the last 2 elements in an array we can do:arr[-2:]Can you figure out how to select all the elements in the previous array excluding the last one, [15]? What about doing the same but now every 3rd element? Hint: Result should be ?`[0,3,6,9,12]` Numpy arrays can have multiple dimensions. For example, we define a 2-dimensional `(1,9)` array using nested square bracket:np.array([[1,2,3,4,5,6,7,8,9]])To visualise the shape or dimensions of a numpy array we can add the suffix `.shape`print(np.array([1,2,3,4,5,6,7,8,9]).shape) print(np.array([[1,2,3,4,5,6,7,8,9]]).shape) print(np.array([[1],[2],[3],[4],[5],[6],[7],[8],[9]]).shape)Any array can be reshaped into different shapes using the function `reshape`:np.array([1,2,3,4,5,6,7,8]).reshape((2,4))If you are concerned about having to type so many squared brackets, there are more simple and convenient ways of doing the same:print(np.array([1,2,3,4,5,6,7,8,9]).reshape(1,9).shape) print(np.array([1,2,3,4,5,6,7,8,9]).reshape(9,1).shape) print(np.array([1,2,3,4,5,6,7,8,9]).reshape(3,3).shape)Also there are shortcuts for declaring common arrays without having to type all their elements:print(np.arange(9)) print(np.ones((3,3))) print(np.zeros((2,2,2)))Can you try to declare a 3-dimensional array of shape (5,3,3)? Assign it to a variable Create another one with the same shape and use the numpy function to add both arrays: Some useful functions in Numpy for calculating the mean, standard deviation and sum of the elements of an array. These operation can be performed only in certain axis.arr = np.arange(9).reshape((3,3)) print(arr) print(np.mean(arr)) print(np.std(arr)) print(np.mean(arr, axis=0)) print(np.mean(arr, axis=1)) print(np.sum(arr))Numpy data types Numpy arrays can contain numerical values of different types. These types can be divided in these groups: * Integers * Unsigned * 8 bits: `uint8` * 16 bits: `uint16` * 32 bits: `uint32` * 64 bits: `uint64` * Signed * 8 bits: `int8` * 16 bits: `int16` * 32 bits: `int32` * 64 bits: `int64`* Floats * 32 bits: `float32` * 64 bits: `float64` We can specify the type of an array when we declare it or change the type of an existing one with the following expressions:arr = np.ones((10,10,10), dtype=np.uint8) arr[4,4,4] = -1 print(arr[4,4,4]) arr = arr.astype(np.int8) print(arr[4,4,4]) arr = arr.astype(np.float32) print(arr[4,4,4])Broadcasting Numpy is set up internally in a way that arrays involved in operations are promoted to match the shapesa = np.zeros((10,10)) a += 1 a a = np.arange(9).reshape((3,3)) b = np.arange(3) a + bBooleans There is a binary type in numpy called boolean which encodes `True` and `False` values. For example:arr = (arr > 0) print(arr[:,:,4]) arr.dtypeBoolean types are quite handy for indexing and selecting parts of images as we will see later. Many numpy functions also work with Boolean types.print(np.count_nonzero(arr[:,:,4])) a = np.array([1,1,0,0], dtype=np.bool) b = np.array([1,0,0,1], dtype=np.bool) np.logical_and(a, b)Depending of the language that you have used before this behaviour in Python might strike you:a = np.array([0,0,0]) # We make a copy of array a with name b b = a # We modify the first element of b b[0] = 1 print(a) print(b)Both arrays have been modified. This is in fact because a and b are references to the same array. If you want to have variables with independent arrays you'll have to use the `b = np.copy(a)` function. This second part introduces matplotlib, a Python library for plotting numpy arrays as images. For the purposes of this tutorial we are going to use a part of matplotlib called pyplot. We import it by doing:%matplotlib inline import numpy as np from matplotlib import pyplot as pltAn image can be seen as a 2-dimensional array. To visualise the contents of a numpy array:arr = np.arange(100).reshape(10,10) print(arr) plt.imshow(arr)Can you create a similar image with an array with shape (50,50)? We can use the Pyplot library to load an image using the function `imread`im = np.copy(plt.imread('data/black_mountain_fire.jpg'))This image is a 3-dimensional numpy array. By convention the first dimension corresponds to the vertical axis, the second to the horizontal axis and the third are the Red, Green and Blue channels of the image. What are the dimensions of the `im` array? __Hint: Use the `.shape` property of the im variable. Let's display this image using the `imshow` function.plt.imshow(im)This is a photo of Black Mountain taken during prescribed burns in 2014. A colour image is normally composed of three layers containing the values of the red, green and blue pixels. When we display an image we see all three colours combined. Knowing the extents of the image given by its shape, can you display the values of one of the pixels in the sky? You need to provide one index for each x and y dimensions and get all three channels. Make sure the values represent a mostly blue pixel. Let's use the indexing functionality of numpy to select a slice of this image. For example to select the top right corner:plt.imshow(im[:800,-800:,:])Let's practice your indexing skills! Can you create a cropped image around Black Mountain's tower? Remember: first dimension is the vertical coordinates, second dimension is the horizontal coordinates and the third are the RGB channels of the image. Let's play around with this a little bit. For example, let's replace all the values in the 'red' layer with the value 255, this is the highest red value possible and it will make your whole image redish. The following command will replace all the values in the red channel (axis 3) with the value 255, and see what happensim[:,:,0] = 255 plt.imshow(im)Lesson 2: Computer Vision Fundamentals Submission, , 2018-02-22 Photos#importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 import os %matplotlib inline #reading in an image k1=0 # select here which of the images in the directory you want to process (0-5) test_images=os.listdir("test_images/") print ('test_images/'+test_images[k1]) image = mpimg.imread('test_images/'+test_images[k1]) #printing out some stats and plotting print('This image is:', type(image), 'with dimensions:', image.shape) plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray') # Grab the x and y size and make a copy of the image ysize = image.shape[0] xsize = image.shape[1] # Note: always make a copy rather than simply using "=" color_select = np.copy(image) # Define our color selection criteria # Note: if you run this code, you'll find these are not sensible values!! # But you'll get a chance to play with them soon in a quiz red_threshold = 180 green_threshold = 180 blue_threshold = 100 rgb_threshold = [red_threshold, green_threshold, blue_threshold] # Identify pixels below the threshold thresholds = (image[:,:,0] < rgb_threshold[0]) \ | (image[:,:,1] < rgb_threshold[1]) \ | (image[:,:,2] < rgb_threshold[2]) color_select[thresholds] = [0,0,0] # Display the image plt.imshow(color_select) plt.show() gray = cv2.cvtColor(color_select, cv2.COLOR_RGB2GRAY) #grayscale conversion plt.imshow(gray, cmap='gray') # Define a polygon region of interest # Keep in mind the origin (x=0, y=0) is in the upper left in image processing left_bottom = [0, ysize] right_bottom = [xsize, ysize] fp1 = [450, 320] fp2 = [490, 320] mask = np.zeros_like(gray) ignore_mask_color = 255 # This time we are defining a four sided polygon to mask vertices = np.array([[left_bottom, fp1, fp2, right_bottom]], dtype=np.int32) cv2.fillPoly(mask, vertices, ignore_mask_color) grayROI = cv2.bitwise_and(gray, mask) # Display the image plt.imshow(grayROI, cmap='gray') # Canny edge detection # Define a kernel size for Gaussian smoothing / blurring # Note: this step is optional as cv2.Canny() applies a 5x5 Gaussian internally kernel_size = 5 blur_gray = cv2.GaussianBlur(grayROI,(kernel_size, kernel_size), 0) # Define parameters for Canny and run it low_threshold = 50 high_threshold = 150 edges = cv2.Canny(blur_gray, low_threshold, high_threshold) # Display the image plt.imshow(edges, cmap='Greys_r') # Hough Transformation # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = 1 theta = np.pi/180 threshold = 1 min_line_length = 16 max_line_gap = 20 line_image = np.copy(image)*0 #creating a blank to draw lines on # Run Hough on edge detected image lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap) # Iterate over the output "lines" and draw lines on the blank for line in lines: for x1,y1,x2,y2 in line: cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10) # Create a "color" binary image to combine with line image #color_edges = np.dstack((edges, edges, edges)) # Draw the lines on the edge image print (test_images[k1]) combo = cv2.addWeighted(image, 0.8, line_image, 1, 0) plt.imshow(combo) mpimg.imsave('MS_images/'+test_images[k1], combo)缺失值处理 1.缺失值含义分析与删除import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, OneHotEncoder import seaborn as sns from scipy import stats %matplotlib inline local_path = "/home/jwzhang411898961/Dropbox/DataScience/Kaggle/HousingPrice" df_train = pd.read_csv(local_path + "/train.csv") df_test = pd.read_csv(local_path + "/test.csv") df_train.head() df_train.info() na_count = df_train.isnull().sum().sort_values(ascending=False) na_rate = na_count / df_train.shape[0] na_data = pd.concat([na_count, na_rate], axis=1, keys=['count', 'ratio']) na_data.head(20) df_train.drop(na_data[na_data['count']>1].index, axis=1, inplace=True) df_train.dropna(subset=['Electrical'], axis=0, how='all', inplace=True) df_train.shape2.缺失值补全与变换df_tr = pd.read_csv(local_path + "/train.csv").drop(['Id'], axis=1) df_X = df_tr.drop(['SalePrice'], axis=1) df_y = df_tr['SalePrice'] quantity = [attr for attr in df_X.columns if df_X.dtypes[attr] != 'object'] quality = [attr for attr in df_X.columns if df_X.dtypes[attr] == 'object'] # 类型变量缺失值补全 for c in quality: df_tr[c] = df_tr[c].astype('category') if df_tr[c].isnull().any(): df_tr[c] = df_tr[c].cat.add_categories(['MISSING']) # fillna requires a value already existing as category. "MISSING" is a categorical dtype. df_tr[c] = df_tr[c].fillna('MISSING') # 连续变量缺失值补全 quantity_missing_cal = df_tr[quantity].isnull().sum().sort_values(ascending=False) # is a Series missing_cols = quantity_missing_cal[quantity_missing_cal>0].index df_tr[missing_cols] = df_tr[missing_cols].fillna(0.) df_tr[missing_cols].isnull().sum() def anova(frame, qualitative): anv = pd.DataFrame() anv['feature'] = qualitative pvals = [] for c in qualitative: samples = [] for cls in frame[c].unique(): s = frame[frame[c] == cls]['SalePrice'].values samples.append(s) # 某特征下不同取值对应的房价组合形成二维列表 # print("Samples is {0}".format(samples)) pval = stats.f_oneway(*samples)[1] # 一元方差分析得到 F,P,要的是 P,P越小,对方差的影响越大。 # print("p value is {0}".format(pval)) pvals.append(pval) anv['pval'] = pvals # print(anv) return anv.sort_values('pval') a = anova(df_tr,quality) # print("_" * 40) # print(a) a['disparity'] = np.log(1./a['pval'].values) # 悬殊度 fig, ax = plt.subplots(figsize=(16,8)) sns.barplot(data=a, x='feature', y='disparity') x=plt.xticks(rotation=90) plt.show()**由上图示分析可见,不少离散变量的具体取值对最终房价会产生较大影响(例如Neighborhood这个变量,实际上暗含了地段这个影响房价的重要因素),因此,我们可以按照各离散变量相应取值下房价的均值来给各个取值划定一个1,2,3,4来定量描述他们对房价的影响,也就是将离散变量转化为数值型的有序变量:**def encode(frame, feature): ''' 对所有类型变量,依照各个类型变量的不同取值对应的样本集内房价的均值,按照房价均值高低 对此变量的当前取值确定其相对数值1,2,3,4等等,相当于对类型变量赋值使其成为连续变量。 此方法采用了与One-Hot编码不同的方法来处理离散数据,值得学习 注意:此函数会直接在原frame的DataFrame内创建新的一列来存放feature编码后的值。 ''' ordering = pd.DataFrame() ordering['val'] = frame[feature].unique() ordering.index = ordering.val ordering['price_mean'] = frame[[feature, 'SalePrice']].groupby(feature).mean()['SalePrice'] # 上述 groupby()操作可以将某一feature下同一取值的数据整个到一起,结合mean()可以直接得到该特征不同取值的房价均值 ordering = ordering.sort_values('price_mean') ordering['order'] = range(1, ordering.shape[0]+1) ordering = ordering['order'].to_dict() for attr_v, score in ordering.items(): # e.g. qualitative[2]: {'Grvl': 1, 'MISSING': 3, 'Pave': 2} frame.loc[frame[feature] == attr_v, feature+'_E'] = score # 原数据集每个feature增加一个分数列 "feature_E" quality_encoded = [] # 由于qualitative集合中包含了非数值型变量和伪数值型变量(多为评分、等级等,其取值为1,2,3,4等等)两类 # 因此只需要对非数值型变量进行encode()处理。 # 如果采用One-Hot编码,则整个qualitative的特征都要进行pd,get_dummies()处理 for q in quality: encode(df_tr, q) quality_encoded.append(q+'_E') df_tr.drop(quality, axis=1, inplace=True) # 离散变量已经有了编码后的新变量,因此删去原变量 # df_tr.shape = (1460, 80) print(quality_encoded, '\n{} qualitative attributes have been encoded.'.format(len(quality_encoded))) # print(df_tr['HouseStyle_E'].value_counts()) # print(df_tr['MSZoning_E'].value_counts())['MSZoning_E', 'Street_E', 'Alley_E', 'LotShape_E', 'LandContour_E', 'Utilities_E', 'LotConfig_E', 'LandSlope_E', 'Neighborhood_E', 'Condition1_E', 'Condition2_E', 'BldgType_E', 'HouseStyle_E', 'RoofStyle_E', 'RoofMatl_E', 'Exterior1st_E', 'Exterior2nd_E', 'MasVnrType_E', 'ExterQual_E', 'ExterCond_E', 'Foundation_E', 'BsmtQual_E', 'BsmtCond_E', 'BsmtExposure_E', 'BsmtFinType1_E', 'BsmtFinType2_E', 'Heating_E', 'HeatingQC_E', 'CentralAir_E', 'Electrical_E', 'KitchenQual_E', 'Functional_E', 'FireplaceQu_E', 'GarageType_E', 'GarageFinish_E', 'GarageQual_E', 'GarageCond_E', 'PavedDrive_E', 'PoolQC_E', 'Fence_E', 'MiscFeature_E', 'SaleType_E', 'SaleCondition_E'] 43 qualitative attributes have been encoded. 6.0 726 7.0 445 3.0 154 5.0 65 2.0 37 1.0 14 4.0 11 8.0 8 Name: HouseStyle_E, dtype: int64 4.0 1151 2.0 218 5.0 65 3.0 16 1.0 10 Name: MSZoning_E, dtype: int64特征互相关分析与特征选取def spearman(frame, features): ''' 采用“斯皮尔曼等级相关”来计算变量与房价的相关性(可查阅百科) 此相关系数简单来说,可以对上述encoder()处理后的等级变量及其它与房价的相关性进行更好的评价(特别是对于非线性关系) ''' spr = pd.DataFrame() spr['feature'] = features spr['corr'] = [frame[f].corr(frame['SalePrice'], 'spearman') for f in features] spr = spr.sort_values('corr') plt.figure(figsize=(6, 0.25*len(features))) sns.barplot(data=spr, y='feature', x='corr', orient='h') features = quantity + quality_encoded spearman(df_tr, features) plt.figure(1,figsize=(12,9)) # 连续型变量相关图 corr = df_tr[quantity+['SalePrice']].corr() sns.heatmap(corr) plt.figure(2,figsize=(12,9)) # 等级型变量相关图(离散型和伪数值型变量均已被概括为等级型变量) corr = df_tr[quality_encoded+['SalePrice']].corr('spearman') sns.heatmap(corr) plt.figure(3,figsize=(12,9)) # 连续型变量-等级型变量相关图 corr = pd.DataFrame(np.zeros([len(quantity)+1, len(quality_encoded)+1]), index=quantity+['SalePrice'], columns=quality_encoded+['SalePrice']) for q1 in quantity+['SalePrice']: for q2 in quality_encoded+['SalePrice']: corr.loc[q1, q2] = df_tr[q1].corr(df_tr[q2], 'spearman') sns.heatmap(corr) # 给房价分段,并由此查看各段房价内那些特征的取值会出现悬殊 poor = df_tr[df_tr['SalePrice'] < 200000][quantity].mean() print(poor) pricey = df_tr[df_tr['SalePrice'] >= 200000][quantity].mean() print(pricey) diff = pd.DataFrame() diff['attr'] = quantity diff['difference'] = ((pricey-poor)/poor).values plt.figure(figsize=(10,4)) sns.barplot(data=diff, x='attr', y='difference') plt.xticks(rotation=90) plt.show()MSSubClass 59.565854 LotFrontage 53.802927 LotArea 9067.083902 OverallQual 5.506341 OverallCond 5.655610 YearBuilt 1962.481951 YearRemodAdd 1978.853659 MasVnrArea 64.260488 BsmtFinSF1 364.446829 BsmtFinSF2 46.875122 BsmtUnfSF 505.382439 TotalBsmtSF 916.704390 1stFlrSF 1039.745366 2ndFlrSF 268.669268 LowQualFinSF 6.414634 GrLivArea 1314.829268 BsmtFullBath 0.362927 BsmtHalfBath 0.067317 FullBath 1.389268 HalfBath 0.318049 BedroomAbvGr 2.789268 KitchenAbvGr 1.062439 TotRmsAbvGrd 6.053659 Fireplaces 0.461463 GarageYrBlt 1819.551220 GarageCars 1.518049 GarageArea 401.615610 WoodDeckSF 73.447805 OpenPorchSF 33.525854 EnclosedPorch 25.609756 3SsnPorch 2.700488 ScreenPorch 12.701463 PoolArea 1.662439 MiscVal 52.189268 MoSold [...]数据变换与归一化经过上述分析与处理后,我们还需要对数据进行处理,使其能够按照学习器的特性进行学习,这其中最重要的便是调整数据的分布为正态分布output,var,var1 = 'SalePrice', 'GrLivArea', 'TotalBsmtSF' fig, axes = plt.subplots(nrows=1,ncols=2,figsize=(12,6)) df_train.plot.scatter(x=var,y=output,ylim=(0,800000),ax=axes[0]) df_train.plot.scatter(x=var1,y=output,ylim=(0,800000),ax=axes[1])由此可见,数据存在两个问题:1、存在离群点(居住面积、地下室特别大然而房价低),这样的点显然应该去掉;2、数据整体在左下角密集而右上角稀疏,呈圆锥状,这样的数据具有同方差性(homoscedasticity),需要进行处理。df_train.sort_values(by='GrLivArea', ascending=False)[:2] df_train = df_train.drop(df_train[df_train['Id'] == 1299].index) df_train = df_train.drop(df_train[df_train['Id'] == 524].index) from scipy.stats import norm fig = plt.figure(figsize=(12, 5)) plt.subplot(121) sns.distplot(df_train[output], fit=norm) plt.subplot(122) res = stats.probplot(df_train[output], plot=plt) plt.show()观察直方图和概率图可以发现,数据具有明显的正偏性,因此可采用对数来缓解这种趋势def log_transform(feature): # np.log1p(x) = log(1+x),这样就可以对0值求对数(针对 `TotalBsmtSF` 这样含有0的特征) df_train[feature] = np.log1p(df_train[feature].values) log_transform(output) log_transform(var) log_transform(var1) fig = plt.figure(figsize=(12, 15)) plt.subplot(321) sns.distplot(df_train[output], fit=norm) plt.subplot(322) res = stats.probplot(df_train[output], plot=plt) plt.subplot(323) sns.distplot(df_train[var],fit=norm) plt.subplot(324) res = stats.probplot(df_train[var], plot=plt) plt.subplot(325) sns.distplot(df_train[var1],fit=norm) plt.subplot(326) res = stats.probplot(df_train[var1], plot=plt) plt.show()从上图可见,部分特征还是存在问题,因此可以考虑构建新的特征来解决上述问题,并为这些连续变量提供新的连续或离散特征df_tr['HasBasement'] = df_tr['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0) df_tr['HasGarage'] = df_tr['GarageArea'].apply(lambda x: 1 if x > 0 else 0) df_tr['Has2ndFloor'] = df_tr['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0) df_tr['HasMasVnr'] = df_tr['MasVnrArea'].apply(lambda x: 1 if x > 0 else 0) df_tr['HasWoodDeck'] = df_tr['WoodDeckSF'].apply(lambda x: 1 if x > 0 else 0) df_tr['HasPorch'] = df_tr['OpenPorchSF'].apply(lambda x: 1 if x > 0 else 0) df_tr['HasPool'] = df_tr['PoolArea'].apply(lambda x: 1 if x > 0 else 0) df_tr['IsNew'] = df_tr['YearBuilt'].apply(lambda x: 1 if x > 2000 else 0) boolean = ['HasBasement', 'HasGarage', 'Has2ndFloor', 'HasMasVnr', 'HasWoodDeck', 'HasPorch', 'HasPool', 'IsNew'] def quadratic(feature): df_tr[feature] = df_tr[feature[:-1]]**2 qdr = ['OverallQual2', 'YearBuilt2', 'YearRemodAdd2', 'TotalBsmtSF2', '2ndFlrSF2', 'Neighborhood_E2', 'RoofMatl_E2', 'GrLivArea2'] for feature in qdr: quadratic(feature)除了上述连续变量的处理,对于此时的数据集中的类型变量(缺失值处理采用方法1的情况)应该进行“哑变量”处理,此步采用pd.get_dummies()与使用sklearn下的OneHotEncoder()作用是相同的。df_train = pd.get_dummies(df_train) df_train.shape # 未考虑上述增加特征时的运行结果ColorspacesLet's have a brief introduction into converting to different colorspaces! The video goes into more detail about colorspaces.import cv2 import matplotlib.pyplot as plt import numpy as np %matplotlib inline img = cv2.imread('../DATA/00-puppy.jpg')Converting to Different Colorspacesimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img)**Converting to HSV**https://en.wikipedia.org/wiki/HSL_and_HSVimg = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) plt.imshow(img) img = cv2.imread('../DATA/00-puppy.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2HLS) plt.imshow(img)Import module homework.py as hm2import homework2 as hm21. Apply the function in hm2 module to load the data from the link preview the dataurl="https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD" # try other link to test the code, need "" df=hm2.read_url(url) df2. Use the function "test_create_dataframe" in the module hm2 to examine if the data hold the following conditions: The DataFrame contains only the columns that you specified as the second argument. The values in each column have the same python type There are at least 10 rows in the DataFrame. (This function also prints the details of each condition)column_name=["Date","Fremont Bridge East Sidewalk","Fremont Bridge West Sidewalk"] # try other column names to test the code result=hm2.test_create_dataframe(df,column_name)There are at least 10 rows in the DataFrame - True The DataFrame contains only the columns that you specified as the second argument - True The values in each column have the same python type - FalseThe result (True/False):resultb - value analysis The magnitude count is at its peak at 3.7, thus we will choose 3.7 as the cut-off magnitude.df_2=df_2.loc[df_2.Magnitud >= 3.8,:] df_2=df_2.loc[df_2.Fecha > "1974-00-00",:] df_2.reset_index(drop=True,inplace=True) df_magnitud_count = df_2[["Magnitud"]].groupby(["Magnitud"]).size().reset_index(name='counts') df_magnitud_count.plot(x="Magnitud",y="counts") df_2.to_csv("../../data/DATA_2.csv", index=False)Model Trainingdf.head() fig,ax = plt.subplots(nrows=1,ncols= 3,figsize=(12,4),dpi=200) ax[0].plot(df['TV'],df['sales'],'o') ax[0].set_xlabel('TV') ax[0].set_ylabel('Sales') ax[1].plot(df['radio'],df['sales'],'o') ax[1].set_xlabel('radio') ax[1].set_ylabel('Sales') ax[2].plot(df['newspaper'],df['sales'],'o') ax[2].set_xlabel('newspaper') ax[2].set_ylabel('Sales') fig.tight_layout()**Beta value will tell us about which feature is more important and how**task 1 first seperate feature and labeltask 2 train test splitX = df.drop(['sales','Total_spend'],axis=1) X y = df['sales'] y**Train test split**from sklearn.model_selection import train_test_split X_train, X_test,y_train,y_test = train_test_split(X,y,test_size=0.3, random_state=101) len(X_train) len(X_test)**Model call and Train**from sklearn.linear_model import LinearRegression # hyperparameters are the constants within the model we edit them to adjust performance # things you can edit #help(LinearRegression) model = LinearRegression() model.fit(X_train,y_train) predict = model.predict(X_test)**Performace Evaluation**from sklearn.metrics import mean_absolute_error,mean_squared_error df['sales'].mean() y_test.mean() y_train.mean() mean_absolute_error(y_test,predict) #MAE mean_squared_error(y_test,predict) #MSE np.sqrt(mean_squared_error(y_test,predict)) #RMSE sns.histplot(data = df,x='sales')**Evaluating Residuals**#predict_for_x_train = model.predict(X_train) sns.scatterplot(data=df,x=y_train,y=y_train-predict_for_x_train) plt.axhline(y=0,ls='--',color = 'r') sns.scatterplot(data=df,x=y_test,y=y_test-predict) plt.axhline(y=0,ls='--',color = 'r') sns.kdeplot(data=df,x=y_test-predict) len(y_test) len(predict) sns.scatterplot(x=predict,y=y_test-predict) plt.axhline(y=0,ls='--',color = 'r') sns.scatterplot(data=df,x=y_test,y=y_test-predict,color='b') plt.axhline(y=0,ls='--',color = 'r')Deploymentfinal_model = LinearRegression() final_model.fit(X,y) len(X),len(y) final_model.coef_ predicted_final = final_model.predict(X) residuals = y- predicted_final plt.plot(predicted_final,residuals,'o') plt.axhline(ls='--') from joblib import dump,load dump(final_model,'final_sales_model.joblib') sales_model = load('final_sales_model.joblib') sales_model.coef_ campaign = [[400,140,50]] sales_model.predict(campaign)Independent Alleles ProblemTwo events A and B are independent if Pr(A and B) is equal to Pr(A)×Pr(B). In other words, the events do not influence each other, so that we may simply calculate each of the individual probabilities separately and then multiply.More generally, random variables X and Y are independent if whenever A and B are respective events for X and Y, A and B are independent (i.e., Pr(A and B)=Pr(A)×Pr(B)).As an example of how helpful independence can be for calculating probabilities, let X and Y represent the numbers showing on two six-sided dice. Intuitively, the number of pips showing on one die should not affect the number showing on the other die. If we want to find the probability that X+Y is odd, then we don't need to draw a tree diagram and consider all possibilities. We simply first note that for X+Y to be odd, either X is even and Y is odd or X is odd and Y is even. In terms of probability, Pr(X+Y is odd)=Pr(X is even and Y is odd)+Pr(X is odd and Y is even). Using independence, this becomes [Pr(X is even)×Pr(Y is odd)]+[Pr(X is odd)×Pr(Y is even)], or (12)2+(12)2=12. You can verify this result in Figure 2, which shows all 36 outcomes for rolling two dice.** Given: Two positive integers k (k≤7) and N (N≤2k). In this problem, we begin with Tom, who in the 0th generation has genotype Aa Bb. Tom has two children in the 1st generation, each of whom has two children, and so on. Each organism always mates with an organism having genotype Aa Bb. **** Return: The probability that at least N Aa Bb organisms will belong to the k-th generation of Tom's family tree (don't count the Aa Bb mates at each level). Assume that Mendel's second law holds for the factors. **k,N = map(int,input().split()) M = 2**k sum_ = 0 def fact(n): if(n==1 or n==0) : return 1 return n * fact(n-1) def comb(n,m) : return fact(n)/fact(n-m)/fact(m) # P(at least N AaBb Organisms) = 1 - P(AaBb < N) for i in range(0,N) : sum_ += comb(M,i)*((0.25)**i)*((0.75)**(M-i)) print(1-sum_)7 35 0.30060220579105024Datasets TestTest the various kind of data sets that come with TensorLight.*Remarks: The order of the image outputs can change between single executions!*# Force matplotlib to use inline rendering %matplotlib inline import os import sys import time # add path to libraries for ipython sys.path.append(os.path.expanduser("~/libs")) import numpy as np import tensorflow as tf import tensorlight as light DATA_ROOT = "data" TRAIN_DIR = "train-test/datasets" def write_animation(dir_path, inputs, targets, fps, index): concat_tgt = np.concatenate((inputs, targets)) print(concat_tgt.shape) light.utils.video.write_multi_gif(os.path.join(dir_path, "anim-{:02d}.gif".format(index)), [concat_tgt], fps=fps, pad_value=1.0) light.utils.video.write_multi_image_sequence(os.path.join(dir_path, "timeline-{:02d}.png".format(index)), [concat_tgt], pad_value=1.0)MNISTmnist_train = light.datasets.mnist.MNISTTrainDataset(DATA_ROOT) mnist_valid = light.datasets.mnist.MNISTValidDataset(DATA_ROOT) mnist_test = light.datasets.mnist.MNISTTestDataset(DATA_ROOT) def display_mnist(dataset): x, y = dataset.get_batch(1) light.visualization.display_array(x[0]) print('Label: {}'.format(y)) display_mnist(mnist_train) display_mnist(mnist_valid) display_mnist(mnist_test)MovingMNISTSEQ_LEN = 10 moving_train = light.datasets.moving_mnist.MovingMNISTTrainDataset( DATA_ROOT, input_shape=[SEQ_LEN,64,64,1], target_shape=[SEQ_LEN,64,64,1]) moving_valid = light.datasets.moving_mnist.MovingMNISTValidDataset( DATA_ROOT, input_shape=[SEQ_LEN,64,64,1], target_shape=[SEQ_LEN,64,64,1]) moving_test = light.datasets.moving_mnist.MovingMNISTTestDataset( DATA_ROOT, input_seq_length=SEQ_LEN, target_seq_length=SEQ_LEN) def display_moving(dataset, title): x, y = dataset.get_batch(8) for i in range(x.shape[0]): full_seq = np.concatenate((x[i], y[i])) light.visualization.display_batch(full_seq, nrows=4, ncols=5, title=title) write_animation(os.path.join(TRAIN_DIR, "mm", "out", title), x[i], y[i], 5, i) display_moving(moving_train, 'Train') display_moving(moving_valid, 'Validation') display_moving(moving_test, 'Test') def write_single_images(dataset): x, y = dataset.get_batch(1) for i in range(x.shape[1]): light.utils.image.write(os.path.join("out", "mm-input-{}.png".format(i)), x[0][i]) for i in range(y.shape[1]): light.utils.image.write(os.path.join("out", "mm-target-{}.png".format(i)), y[0][i]) write_single_images(moving_train)UCF11*Remarks: This example uses an input queue.*ucf11_train = light.datasets.ucf11.UCF11TrainDataset(DATA_ROOT, input_seq_length=3, target_seq_length=3, image_scale_factor=0.5, gray_scale=False, min_examples_in_queue=128, queue_capacitiy=256, num_threads=16, do_distortion=False, crop_size=None) ucf11_valid = light.datasets.ucf11.UCF11ValidDataset(DATA_ROOT, input_seq_length=3, target_seq_length=3, image_scale_factor=0.5, gray_scale=False, crop_size=None) batch_x, batch_y = ucf11_train.get_batch(32) with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: print('Starting queue runners...') x, y = sess.run([batch_x, batch_y]) except tf.errors.OutOfRangeError: print("Done training -- epoch limit reached") finally: # When done, ask the threads to stop coord.request_stop() coord.join(threads) def display_ucf11_queue(x, y, title): print("x-range: [{}, {}], y-range: [{}, {}]".format(x.min(), x.max(), y.min(), y.max())) full_seq = np.concatenate((x[0], y[0])) light.visualization.display_batch(full_seq, nrows=2, ncols=3, title=title) def display_ucf11_batch(dataset, title): x, y = dataset.get_batch(2) print("x-range: [{}, {}], y-range: [{}, {}]".format(x.min(), x.max(), y.min(), y.max())) light.visualization.display_batch(x[0], nrows=1, ncols=3, title=title + '-Inputs') light.visualization.display_batch(y[0], nrows=1, ncols=3, title=title + '-Targets') display_ucf11_queue(x, y, 'Train') display_ucf11_batch(ucf11_valid, 'Validation')UCF101*Remarks: This example uses an input queue.*ucf101_train = light.datasets.ucf101.UCF101TrainDataset(DATA_ROOT, input_seq_length=10, target_seq_length=10, image_scale_factor=0.5, gray_scale=False, num_threads=16, min_examples_in_queue=32, queue_capacitiy=64, do_distortion=False, crop_size=None) ucf101_valid = light.datasets.ucf101.UCF101ValidDataset(DATA_ROOT, input_seq_length=10, target_seq_length=10, image_scale_factor=0.5, gray_scale=False, double_with_flipped=True, crop_size=None) ucf101_test = light.datasets.ucf101.UCF101TestDataset(DATA_ROOT, input_seq_length=10, target_seq_length=10, image_scale_factor=0.5, gray_scale=False, double_with_flipped=False, crop_size=None) print("Train-size: {}".format(ucf101_train.size)) print("Valid-size: {}".format(ucf101_valid.size)) print("Test-size: {}".format(ucf101_test.size)) with tf.device("/cpu:0"): batch_x, batch_y = ucf101_train.get_batch(8) with tf.Session() as sess: sess.run(tf.initialize_all_variables()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: print('Starting queue runners...') x, y = sess.run([batch_x, batch_y]) except tf.errors.OutOfRangeError: print("Done training -- epoch limit reached") finally: # When done, ask the threads to stop coord.request_stop() coord.join(threads) def display_ucf101(x, y, title): print("x-shape: {}, y-shape: ", x.shape, y.shape) print("x-range: [{}, {}], y-range: [{}, {}]".format(x.min(), x.max(), y.min(), y.max())) for i in range(x.shape[0]): diff = 0.0 for j in range(x.shape[1] - 1): diff += np.sum(np.square( (x[i,j+1] - x[i,j]) * 2) ) print("Diff @ {}: {}".format(i, diff)) full_seq = np.concatenate((x[i], y[i])) light.visualization.display_batch(full_seq, nrows=4, ncols=5, title="{}-{:2d}".format(title, i)) write_animation(os.path.join(TRAIN_DIR, "ucf", "out-full", title), x[i], y[i], 5, i) display_ucf101(x, y, 'Train') print('-' * 80) x, y = ucf101_valid.get_batch(8) display_ucf101(x, y, 'Validation') print('-' * 80) x, y = ucf101_test.get_batch(8) display_ucf101(x, y, 'Test')MsPacmanpac_train = light.datasets.ms_pacman.MsPacmanTrainDataset(DATA_ROOT, input_seq_length=8, target_seq_length=8, crop_size=None) pac_valid = light.datasets.ms_pacman.MsPacmanValidDataset(DATA_ROOT, input_seq_length=8, target_seq_length=8, crop_size=None) pac_test = light.datasets.ms_pacman.MsPacmanTestDataset(DATA_ROOT, input_seq_length=8, target_seq_length=8, crop_size=None) print("Dataset-lengths:", pac_train.size, pac_valid.size, pac_test.size) def display_moving(dataset, title): x, y = dataset.get_batch(8) for i in range(x.shape[0]): full_seq = np.concatenate((x[i], y[i])) light.visualization.display_batch(full_seq, nrows=4, ncols=5, title=title) write_animation(os.path.join(TRAIN_DIR, "pac", "out-full", title), x[i], y[i], 5, i) display_moving(pac_train, 'Train') display_moving(pac_valid, 'Validation') display_moving(pac_test, 'Test')Runtime: Train-Queue + Validation-FeedingThis test tests the internal conditional switches withing the light.core.AbstractRuntime class to handle a **queue** for training and standard **feeding** für validation.class SimplePredictionModel(light.model.AbstractModel): """Predicts n future frames from given n frames.""" def __init__(self, weight_decay=0.0): super(SimplePredictionModel, self).__init__(weight_decay) @light.utils.attr.override def inference(self, inputs, targets, feeds, is_training, device_scope, memory_device): with tf.variable_scope("Encoder"): # stack time-dimension on channels (inputs=[bs,t,h,w,c]) unpacked_inputs = tf.unpack(inputs, axis=1) # unpacked_inputs=t*[bs,h,w,c] stacked_inputs = tf.concat(concat_dim=3, values=unpacked_inputs) # stacked_inputs=[bs,h,w,c*t] # 1: Conv conv1 = light.network.conv2d("Conv1", stacked_inputs, 16, (5, 5), (2, 2), weight_init=tf.contrib.layers.xavier_initializer_conv2d(), bias_init=0.1, regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay), activation=tf.nn.relu) # 2: Conv conv2 = light.network.conv2d("Conv2", conv1, 16, (3, 3), (2, 2), weight_init=tf.contrib.layers.xavier_initializer_conv2d(), bias_init=0.1, regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay), activation=tf.nn.relu) encoder_out = conv2 with tf.variable_scope("Decoder"): # 3: Deconv conv3t = light.network.conv2d_transpose("Deconv1", encoder_out, 16, (3, 3), (2, 2), weight_init=light.init.bilinear_initializer(), bias_init=0.1, regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay), activation=tf.nn.relu) # 4: Deconv target_shape = targets.get_shape().as_list() channels_out = target_shape[1] * target_shape[4] # t * c conv4t = light.network.conv2d_transpose("Deconv2", conv3t, channels_out, (5, 5), (2, 2), weight_init=light.init.bilinear_initializer(), bias_init=0.1, regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay), activation=tf.nn.sigmoid) # split channel dimensions spl_out = tf.split(split_dim=3, num_split=target_shape[1], value=conv4t) # spl_out=t*[b,h,w,c] decoder_out = tf.pack(spl_out, axis=1) # decoder_out=[b,t,h,w,c] return decoder_out @light.utils.attr.override def loss(self, predictions, targets, device_scope): return light.loss.bce(predictions, targets)UCF11runtime = light.core.DefaultRuntime(TRAIN_DIR) runtime.register_datasets(ucf11_train, ucf11_valid) runtime.register_model(SimplePredictionModel(weight_decay=0.001)) runtime.build() runtime.train(batch_size=16, steps=1000) def display_ucf11(seq, title): light.visualization.display_batch(seq, nrows=1, ncols=3, title=title) x, y = ucf11_valid.get_batch(1) pred = runtime.predict(x) display_ucf11(x[0], "Inputs") display_ucf11(y[0], "Targets") display_ucf11(pred[0], "Pred")Performance Benchmark (Throughput)Remarks: Using too feq threads can lead to an OutOfRange exception, because we consuming batches in faster speed than the producers can create examples.The queue might be feel to be slower, because it might perform image-preprocessing, as well as a session is launched in every run.LOOPS = 10000 BATCH_SIZE = 32 # define which dataset to test bench_dataset_queue = ucf11_train bench_dataset_feeding = ucf11_valid a = tf.constant(2) b = tf.constant(3) c = tf.constant(4) # do a calcualtion to slow (especially the benchmark with the queue) a little bit down, because it can not # produce examples in such a high speed. Additionally, this makes both benchmarks more comparable, because # the feeding_queue needs to run a session as well with this calculation. calculation = a * b + cFor queue-datasets:with tf.device("/cpu:0"): # Inputs should be done on CPU only (best performance) batch_x, batch_y = bench_dataset_queue.get_batch(BATCH_SIZE) with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: this_step = 0 overhead = 0.0 while not coord.should_stop(): this_step += 1 if this_step > LOOPS + 1: # extra loop because we skip the first run break if this_step == 1: print("Starting queue runners...") if this_step == 2: print("Starting...") # skipt 1st round, because queue runners have to be filled start_time = time.time() if this_step % 1000 == 0: print(this_step) x, y, _ = sess.run([batch_x, batch_y, calculation]) except tf.errors.OutOfRangeError: print("Interrupted: Queue runners are out of range. Epoch limit reached?") finally: # When done, ask the threads to stop duration = time.time() - start_time coord.request_stop() coord.join(threads) print("Duration: {}".format(duration))For feeding-datasets:with tf.Session() as sess: overhead = 0.0 for i in xrange(LOOPS): if i == 0: print("Starting...") start_time = time.time() start_overhead = time.time() _ = sess.run(calculation) overhead += time.time() - start_overhead batch_x, batch_y = bench_dataset_feeding.get_batch(BATCH_SIZE) duration = time.time() - start_time print("Duration: {}".format(duration)) print("Overhead: {}".format(overhead))This is an interactive land probability calculator for Magic the Gathering.To start the interactive calculator, select"Cell""Run All"from above menu and then scroll to the bottom see the interactive probability calculator.You can specify with interactive sliders the number of lands and nonlands in the deck, and you'll get a clearly explained output showing you the probability after not just the first draw, but also after each mulligan. So far as I know, the only way to get the interactive slider is by running this notebook (static view doesn't get the slider).If you want to skip the probability explanations, then simply scroll to the bottom and see how probabilities vary as you adjust land counts and deck size with the sliders.This was a fun way to help me recall probability theory (DeGroot and Schervish, chapter 1), practice probability calculation in Python, learn how to use iPython Notebook interactively, and at the same have a helpful Magic: The Gathering tool.Originally done in Dec 2014 as iPython notebookAdapted to Jupyter April 2019 (and made some minor explanation corrections)import ipywidgets as widgets from sympy import binomial as combos from IPython.display import display, HTML # Ignore this cell (unless you intend to create your own notebooks with messy display output) # At first all this code was integrated into land_probability() function. It was fewer # lines of code but harder to read. This HTML and iPython Notebook messiness is not # important to calculating probability. By pulling out the messy display code, it is # easier to focus on and understand the probability aspects of land_probability(). # This can be re-used in other iPython/Jupyter Notebooks, simplifing HTML output for interact() def title_HTML(title): return '

' + title + '

' def intro_HTML(*args): lands, cards, drawn_cards, mulligans_so_far = args slider_inputs = {'lands: ': lands, 'cards: ': cards, 'number of cards drawn: ': drawn_cards, 'mulligans so far: ': mulligans_so_far} s = '' for key in slider_inputs: s += '
' + key + str(slider_inputs[key]) return s def start_HTML_table(headers): s = '
\n' for header in headers: s += '' s += '' return s def row_of_HTML_table(x_int, scipy_float): ''' Args: x_int: integer scipy_float: float (scipy or regular) ''' return ''.format(x_int, float(scipy_float)) # must coerce scipy float type into regular float type for format to work right def finish_HTML_table(s, min_land, max_land, drawn_cards, mulligans_so_far, below_land_prob, above_land_prob, target_land_prob, out_of_range): cumulative_p = 1 for p in out_of_range[:-1]: cumulative_p *= p s += '
' + header + '
{0:.>6}{1:.2%}

' s += '{0:.1%} chance of between {1} and {2} lands in this {3} card draw.
'.format(float(target_land_prob), min_land, max_land, drawn_cards) s += '{0:.1%} chance that number of lands will be outside this range.

'.format(float(1 - target_land_prob)) s += '{0:.2%} chance that number of lands will be too low after {1} mulligan(s).
'.format(float(below_land_prob * cumulative_p), mulligans_so_far) s += '{0:.2%} chance that number of lands will be too high after {1} mulligan(s).

'.format(float(above_land_prob * cumulative_p), mulligans_so_far) return sIn mathematical notation, the probability of exactly 2 land cards appearing in a 7 card draw from a 60 card deck with 20 lands is:$$\frac{\binom{40}{5}\binom{20}{2}}{60 \choose 7} = \frac{\frac{40!}{5!35!}\frac{20!}{2!18!}}{\frac{60!}{7!53!}} = .3237$$The denominator in all probability calculation represents the sample space (out of how many possibilities . . .). In this case, it is the number of subsets of 7 cards possible out of 60 cards. This is exactly the definition of a combination. Plugging into the combination formula, using 60 choose 7.$${60 \choose 7} = \frac{60!}{7!53!}$$The first part of the numerator represents the number of nonland combinations possible. There are 5 nonlands in this hand, drawn from 40 nonlands. So the number of subsets of 5 cards possible out of 40 cards is:$${40 \choose 5} = \frac{40!}{5!35!}$$Similarly, the second part of the numerator is the number of land combinations possible when 2 lands are drawn from 20 lands:$${20 \choose 2} = \frac{20!}{2!18!}$$These two parts of the numerator are multiplied, because each single combination of nonlands can be combined with all the combinations of lands. The general form of this is the "multiplication rule" which states:> When all the outcomes in each part of an experiment can occur regardless of which outcomes have occured in the other parts, then the parts are multipled together.def land_probability(lands, cards, drawn_cards, min_land, max_land, mulligans=0): out_of_range = [] nonlands = cards - lands text = title_HTML("Probabilities for Lands Drawn:") for mulligans_so_far in range(mulligans+1): text += intro_HTML(lands, cards, drawn_cards - mulligans_so_far, mulligans_so_far) text += start_HTML_table(['Lands', 'Probability']) target_land_prob, below_land_prob, above_land_prob = 0, 0, 0 for drawn_lands in range(0, drawn_cards + 1 - mulligans_so_far): p = (combos(nonlands, drawn_cards - drawn_lands - mulligans_so_far) * combos(lands, drawn_lands) / float(combos(cards, drawn_cards - mulligans_so_far))) if drawn_lands < min_land: below_land_prob += p elif drawn_lands > max_land: above_land_prob += p else: target_land_prob +=p text += row_of_HTML_table(drawn_lands, p) out_of_range.append(below_land_prob + above_land_prob) text = finish_HTML_table(text, min_land, max_land, drawn_cards - mulligans_so_far, mulligans_so_far, below_land_prob, above_land_prob, target_land_prob, out_of_range) display(HTML(text))Magic the Gathering intro packs recommend the following defaults for land construction:* 60 card deck* 24 lands in the deckIf you follow the recommended defaults, you can see that there is better than an 85% chance of starting with at least 2 lands when drawing your initial 7 card hand.It can be useful to think of land tappable creatures as lands for the purposes of this calculation. For example, if you have 20 lands and 4 Mystic Elves, that is essentially equivalent to 24 lands (well, not really, because there are many more ways to lose a Mystic Elf than a land . . . but it's still useful to think of your starting hand this way as you're not likely to lose a Mystic Elf on turn 1).You may want to consider mulligans in your calculation. Move the mulligan slider to "1" and you'll see the probabilities for the subsequent 6 card draw, as well as the chances of getting below or above your desired range of lands to start with. Move it to "2" and you'll see it go down to a 5 card draw (2nd mulligan).I assume for this analysis that mulligans automatically occur when below minimum required land or above maximum required land, but not otherwise. Therefore, the probability, p, of having too little land after 1 or 2 draws (2nd draw only if 1st draw outside of target range) is:p = p1(below) * (p0(below) + p0(above))where* p0 represents probability in a 7 card draw (zero mulligans)* p1 represents probability in a 6 card draw (1 mulligan)* p0(below) represents probability that the number of cards drawn is below the required minimumi = widgets.interact(land_probability, lands = widgets.IntSlider(min = 1, max = 50, step = 1, value = 24), cards = widgets.IntSlider(min = 1, max = 200, step = 1, value = 60), drawn_cards = widgets.IntSlider(min = 0, max = 20, step = 1, value = 7), min_land = widgets.IntSlider(min = 0, max = 3, step = 1, value = 2), max_land = widgets.IntSlider(min = 3, max = 7, step = 1, value = 5), mulligans = widgets.IntSlider(min = 0, max = 3, step = 1, value = 2) )This is a starter kernel to train a YOLOv5 model on [SIIM-FISABIO-RSNA COVID-19 Detection](https://www.kaggle.com/c/siim-covid19-detection/overview) dataset. Given an input image the task is to find the region of opacity in the chest using bounding box coordinates. Check out [Visualize Bounding Boxes Interactively](https://www.kaggle.com/ayuraj/visualize-bounding-boxes-interactively) for interactive bounding box EDA. 🖼️ What is YOLOv5?YOLO an acronym for 'You only look once', is an object detection algorithm that divides images into a grid system. Each cell in the grid is responsible for detecting objects within itself.[Ultralytics' YOLOv5](https://ultralytics.com/yolov5) ("You Only Look Once") model family enables real-time object detection with convolutional neural networks. 🦄 What is Weights and Biases?Weights & Biases (W&B) is a set of machine learning tools that helps you build better models faster. Check out [Experiment Tracking with Weights and Biases](https://www.kaggle.com/ayuraj/experiment-tracking-with-weights-and-biases) to learn more. Weights & Biases is directly integrated into YOLOv5, providing experiment metric tracking, model and dataset versioning, rich model prediction visualization, and more.It's a work in progress:✔️ Required folder structure. ✔️ Bounding box format required for YOLOv5. ✔️ **Train** a small YOLOv5 model. ✔️ Experiment tracking with W&B. ✔️ Proper documentation ✔️ Inference ❌ Model prediction visualization. Results [Check out W&B Run Page $\rightarrow$](https://wandb.ai/ayush-thakur/kaggle-siim-covid/runs/1bk93e3j)![img](https://i.imgur.com/quOYtNN.gif) ☀️ Imports and SetupAccording to the official [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) guide, YOLOv5 requires a certain directory structure. ```/parent_folder /dataset /images /labels /yolov5```* We thus will create a `/tmp` directory. * Download YOLOv5 repository and pip install the required dependencies. * Install the latest version of W&B and login with your wandb account. You can create your free W&B account [here](https://wandb.ai/site).%cd ../ !mkdir tmp %cd tmp # Download YOLOv5 !git clone https://github.com/ultralytics/yolov5 # clone repo %cd yolov5 # Install dependencies %pip install -qr requirements.txt # install dependencies %cd ../ import torch print(f"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})") # Install W&B !pip install -q --upgrade wandb # Login import wandb wandb.login() # Necessary/extra dependencies. import os import gc import cv2 import numpy as np import pandas as pd from tqdm import tqdm from shutil import copyfile import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split #customize iPython writefile so we can write variables from IPython.core.magic import register_line_cell_magic @register_line_cell_magic def writetemplate(line, cell): with open(line, 'w') as f: f.write(cell.format(**globals()))🦆 HyperparametersTRAIN_PATH = 'input/siim-covid19-resized-to-256px-jpg/train/' IMG_SIZE = 256 BATCH_SIZE = 16 EPOCHS = 10🔨 Prepare DatasetThis is the most important section when it comes to training an object detector with YOLOv5. The directory structure, bounding box format, etc must be in the correct order. This section builds every piece needed to train a YOLOv5 model.I am using [xhlulu's](https://www.kaggle.com/xhlulu) resized dataset. The uploaded 256x256 Kaggle dataset is [here](https://www.kaggle.com/xhlulu/siim-covid19-resized-to-256px-jpg). Find other image resolutions [here](https://www.kaggle.com/c/siim-covid19-detection/discussion/239918).* Create train-validation split. * Create required `/dataset` folder structure and more the images to that folder. * Create `data.yaml` file needed to train the model. * Create bounding box coordinates in the required YOLO format.# Everything is done from /kaggle directory. %cd ../ # Load image level csv file df = pd.read_csv('input/siim-covid19-detection/train_image_level.csv') # Modify values in the id column df['id'] = df.apply(lambda row: row.id.split('_')[0], axis=1) # Add absolute path df['path'] = df.apply(lambda row: TRAIN_PATH+row.id+'.jpg', axis=1) # Get image level labels df['image_level'] = df.apply(lambda row: row.label.split(' ')[0], axis=1) df.head(5) # Load meta.csv file # Original dimensions are required to scale the bounding box coordinates appropriately. meta_df = pd.read_csv('input/siim-covid19-resized-to-256px-jpg/meta.csv') train_meta_df = meta_df.loc[meta_df.split == 'train'] train_meta_df = train_meta_df.drop('split', axis=1) train_meta_df.columns = ['id', 'dim0', 'dim1'] train_meta_df.head(2) # Merge both the dataframes df = df.merge(train_meta_df, on='id',how="left") df.head(2)🍘 Train-validation split# Create train and validation split. train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42, stratify=df.image_level.values) train_df.loc[:, 'split'] = 'train' valid_df.loc[:, 'split'] = 'valid' df = pd.concat([train_df, valid_df]).reset_index(drop=True) print(f'Size of dataset: {len(df)}, training images: {len(train_df)}. validation images: {len(valid_df)}')🍚 Prepare Required Folder StructureThe required folder structure for the dataset directory is: ```/parent_folder /dataset /images /train /val /labels /train /val /yolov5```Note that I have named the directory `covid`.os.makedirs('tmp/covid/images/train', exist_ok=True) os.makedirs('tmp/covid/images/valid', exist_ok=True) os.makedirs('tmp/covid/labels/train', exist_ok=True) os.makedirs('tmp/covid/labels/valid', exist_ok=True) ! ls tmp/covid/images # Move the images to relevant split folder. for i in tqdm(range(len(df))): row = df.loc[i] if row.split == 'train': copyfile(row.path, f'tmp/covid/images/train/{row.id}.jpg') else: copyfile(row.path, f'tmp/covid/images/valid/{row.id}.jpg')🍜 Create `.YAML` fileThe `data.yaml`, is the dataset configuration file that defines 1. an "optional" download command/URL for auto-downloading, 2. a path to a directory of training images (or path to a *.txt file with a list of training images), 3. a path to a directory of validation images (or path to a *.txt file with a list of validation images), 4. the number of classes, 5. a list of class names.> 📍 Important: In this competition, each image can either belong to `opacity` or `none` image-level labels. That's why I have used the number of classes, `nc` to be 2. YOLOv5 automatically handles the images without any bounding box coordinates. > 📍 Note: The `data.yaml` is created in the `yolov5/data` directory as required.# Create .yaml file import yaml data_yaml = dict( train = '../covid/images/train', val = '../covid/images/valid', nc = 2, names = ['none', 'opacity'] ) # Note that I am creating the file in the yolov5/data/ directory. with open('tmp/yolov5/data/data.yaml', 'w') as outfile: yaml.dump(data_yaml, outfile, default_flow_style=True) %cat tmp/yolov5/data/data.yaml🍮 Prepare Bounding Box Coordinated for YOLOv5For every image with **bounding box(es)** a `.txt` file with the same name as the image will be created in the format shown below:* One row per object. * Each row is class `x_center y_center width height format`. * Box coordinates must be in normalized xywh format (from 0 - 1). We can normalize by the boxes in pixels by dividing `x_center` and `width` by image width, and `y_center` and `height` by image height. * Class numbers are zero-indexed (start from 0). > 📍 Note: We don't have to remove the images without bounding boxes from the training or validation sets.# Get the raw bounding box by parsing the row value of the label column. # Ref: https://www.kaggle.com/yujiariyasu/plot-3positive-classes def get_bbox(row): bboxes = [] bbox = [] for i, l in enumerate(row.label.split(' ')): if (i % 6 == 0) | (i % 6 == 1): continue bbox.append(float(l)) if i % 6 == 5: bboxes.append(bbox) bbox = [] return bboxes # Scale the bounding boxes according to the size of the resized image. def scale_bbox(row, bboxes): # Get scaling factor scale_x = IMG_SIZE/row.dim1 scale_y = IMG_SIZE/row.dim0 scaled_bboxes = [] for bbox in bboxes: x = int(np.round(bbox[0]*scale_x, 4)) y = int(np.round(bbox[1]*scale_y, 4)) x1 = int(np.round(bbox[2]*(scale_x), 4)) y1= int(np.round(bbox[3]*scale_y, 4)) scaled_bboxes.append([x, y, x1, y1]) # xmin, ymin, xmax, ymax return scaled_bboxes # Convert the bounding boxes in YOLO format. def get_yolo_format_bbox(img_w, img_h, bboxes): yolo_boxes = [] for bbox in bboxes: w = bbox[2] - bbox[0] # xmax - xmin h = bbox[3] - bbox[1] # ymax - ymin xc = bbox[0] + int(np.round(w/2)) # xmin + width/2 yc = bbox[1] + int(np.round(h/2)) # ymin + height/2 yolo_boxes.append([xc/img_w, yc/img_h, w/img_w, h/img_h]) # x_center y_center width height return yolo_boxes # Prepare the txt files for bounding box for i in tqdm(range(len(df))): row = df.loc[i] # Get image id img_id = row.id # Get split split = row.split # Get image-level label label = row.image_level if row.split=='train': file_name = f'tmp/covid/labels/train/{row.id}.txt' else: file_name = f'tmp/covid/labels/valid/{row.id}.txt' if label=='opacity': # Get bboxes bboxes = get_bbox(row) # Scale bounding boxes scale_bboxes = scale_bbox(row, bboxes) # Format for YOLOv5 yolo_bboxes = get_yolo_format_bbox(IMG_SIZE, IMG_SIZE, scale_bboxes) with open(file_name, 'w') as f: for bbox in yolo_bboxes: bbox = [1]+bbox bbox = [str(i) for i in bbox] bbox = ' '.join(bbox) f.write(bbox) f.write('\n')🚅 Train with W&B%cd tmp/yolov5/```--img {IMG_SIZE} \ Input image size.--batch {BATCH_SIZE} \ Batch size--epochs {EPOCHS} \ Number of epochs--data data.yaml \ Configuration file--weights yolov5s.pt \ Model name--save_period 1\ Save model after interval--project kaggle-siim-covid W&B project name```!python train.py --img {IMG_SIZE} \ --batch {BATCH_SIZE} \ --epochs {EPOCHS} \ --data data.yaml \ --weights yolov5s.pt \ --save_period 1\ --project kaggle-siim-covidModel Saved Automatically as ArtifactSince it's a kernel based competition, you can easily download the best model from the W&B Artifacts UI and upload as a Kaggle dataset that you can load in your inference kernel (internel disabled). [Path to saved model $\rightarrow$](https://wandb.ai/ayush-thakur/kaggle-siim-covid/artifacts/model/run_jbt74n7q_model/4c3ca5752dba99bd227e)![img](https://i.imgur.com/KhRLQvR.png)> 📍 Download the model with the `best` alias tagged to it. InferenceYou will probably use a `Submission.ipynb` kernel to run all the predictions. After training a YOLOv5 based object detector -> head to the artifacts page and download the best model -> upload the model as a Kaggle dataset -> Use it with the submission folder. > 📍 Note that you might have to clone the YOLOv5 repository in a Kaggle dataset as well. In this section, I will show you how you can do the inference and modify the predicted bounding box coordinates.TEST_PATH = '/kaggle/input/siim-covid19-resized-to-256px-jpg/test/' # absolute pathSince I am training the model in this kernel itself, I will not be using the method that I have described above. The best model is saved in the directory `project_name/exp*/weights/best.pt`. In `exp*`, * can be 1, 2, etc.MODEL_PATH = 'kaggle-siim-covid/exp/weights/best.pt'```--weights {MODEL_PATH} \ path to the best model.--source {TEST_PATH} \ absolute path to the test images.--img {IMG_SIZE} \ Size of image--conf 0.281 \ Confidence threshold (default is 0.25)--iou-thres 0.5 \ IOU threshold (default is 0.45)--max-det 3 \ Number of detections per image (default is 1000) --save-txt \ Save predicted bounding box coordinates as txt files--save-conf Save the confidence of prediction for each bounding box```!python detect.py --weights {MODEL_PATH} \ --source {TEST_PATH} \ --img {IMG_SIZE} \ --conf 0.281 \ --iou-thres 0.5 \ --max-det 3 \ --save-txt \ --save-confHow to find the confidence score?1. First first the [W&B run page](https://wandb.ai/ayush-thakur/kaggle-siim-covid/runs/jbt74n7q) generated by training the YOLOv5 model. 2. Go to the media panel -> click on the F1_curve.png file to get a rough estimate of the threshold -> go to the Bounding Box Debugger panel and interactively adjust the confidence threshold. ![img](https://i.imgur.com/cCUnTBw.gif) > 📍 The bounding box coordinates are saved as text file per image name. It is saved in this directory `runs/detect/exp3/labels`.PRED_PATH = 'runs/detect/exp3/labels' !ls {PRED_PATH} # Visualize predicted coordinates. %cat runs/detect/exp3/labels/ba91d37ee459.txt> 📍 Note: 1 is class id (opacity), the first four float numbers are `x_center`, `y_center`, `width` and `height`. The final float value is `confidence`.prediction_files = os.listdir(PRED_PATH) print('Number of test images predicted as opaque: ', len(prediction_files))> 📍 Out of 1263 test images, 583 were predicted with `opacity` label and thus we have that many prediction txt files. SubmissionIn this section, I will show how you can use YOLOv5 as object detector and prepare `submission.csv` file.# The submisison requires xmin, ymin, xmax, ymax format. # YOLOv5 returns x_center, y_center, width, height def correct_bbox_format(bboxes): correct_bboxes = [] for b in bboxes: xc, yc = int(np.round(b[0]*IMG_SIZE)), int(np.round(b[1]*IMG_SIZE)) w, h = int(np.round(b[2]*IMG_SIZE)), int(np.round(b[3]*IMG_SIZE)) xmin = xc - int(np.round(w/2)) xmax = xc + int(np.round(w/2)) ymin = yc - int(np.round(h/2)) ymax = yc + int(np.round(h/2)) correct_bboxes.append([xmin, xmax, ymin, ymax]) return correct_bboxes # Read the txt file generated by YOLOv5 during inference and extract # confidence and bounding box coordinates. def get_conf_bboxes(file_path): confidence = [] bboxes = [] with open(file_path, 'r') as file: for line in file: preds = line.strip('\n').split(' ') preds = list(map(float, preds)) confidence.append(preds[-1]) bboxes.append(preds[1:-1]) return confidence, bboxes # Read the submisison file sub_df = pd.read_csv('/kaggle/input/siim-covid19-detection/sample_submission.csv') sub_df.tail() # Prediction loop for submission predictions = [] for i in tqdm(range(len(sub_df))): row = sub_df.loc[i] id_name = row.id.split('_')[0] id_level = row.id.split('_')[-1] if id_level == 'study': # do study-level classification predictions.append("Negative 1 0 0 1 1") # dummy prediction elif id_level == 'image': # we can do image-level classification here. # also we can rely on the object detector's classification head. # for this example submisison we will use YOLO's classification head. # since we already ran the inference we know which test images belong to opacity. if f'{id_name}.txt' in prediction_files: # opacity label confidence, bboxes = get_conf_bboxes(f'{PRED_PATH}/{id_name}.txt') bboxes = correct_bbox_format(bboxes) pred_string = '' for j, conf in enumerate(confidence): pred_string += f'opacity {conf} ' + ' '.join(map(str, bboxes[j])) + ' ' predictions.append(pred_string[:-1]) else: predictions.append("None 1 0 0 1 1") sub_df['PredictionString'] = predictions sub_df.to_csv('submission.csv', index=False) sub_df.tail()02 - Validera med W3CFöljer webbplatsens kod webbstandard? Ganska ofta finns fel och varningar. Det händer ytterst sällan att valideringsfel beror på att en webbplats ligger före en blivande standard, men läs gärna om [när Whitespace med flit valde att ignorera hundratals fel](https://whitespace.se/blogg/nar-ar-det-ratt-att-gora-fel/). 02:01 - Validerar HTML-koden?När HTML-koden är trasig kan det ställa till problem i folks olika webbläsare.import requests from bs4 import BeautifulSoup url = 'https://www.vgregion.se' # fejkar att det är Google som ber om sidan headers = { 'User-Agent': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', } req = requests.get('https://validator.w3.org/nu/?doc={0}'.format(url.replace(':', '%3A').replace('/', '%2F')), headers=headers) soup = BeautifulSoup(req.content) fel = soup.find_all(class_="error") varning = soup.find_all(class_="warning") print('Antal valideringsfel:', len(fel), 'stycken') print('Antal varningar:', len(varning), 'stycken')Antal valideringsfel: 29 stycken Antal varningar: 11 stycken02:02 – Validerar CSS-koden?Trasig CSS-kod kan göra att saker inte visas upp på rätt sätt men också att det tar längre tid för webbsidan att visa upp sig när den måste lägga tid på att bearbeta felaktigheter.# BeautifulSoup och requests är importerade i föregående cell # variablerna url och header är definierade i föregående cell req = requests.get('https://jigsaw.w3.org/css-validator/validator?uri={0}&profile=css3svg&usermedium=all&warning=1&vextwarning=&lang=en'.format(url.replace(':', '%3A').replace('/', '%2F')), headers=headers) soup = BeautifulSoup(req.content) fel = soup.find_all("tr", {"class": "error"}) varning = soup.find_all("tr", {"class": "warning"}) print('Antal valideringsfel:', len(fel), 'stycken') print('Antal varningar:', len(varning), 'stycken')Antal valideringsfel: 8 stycken Antal varningar: 634 stycken02:03 - Validerar RSS-flödet?# todo02:04 - Validerar sitemap/siteindex?# todoLab 8Last week we used the improved Euler method to numerically solve a DE of the form\begin{align*}\frac{\mathrm{d}\mathbf{y}}{\mathrm{d}t} = f(\mathbf{y}, t),\end{align*}where $\mathbf{y}$ is a vector of dependent variables and $f$ is a linear function of $\mathbf{y}$.This week we will use the SciPy `odeint` function in place of our implmentation. There are several advantages to using SciPy code, including that it is well-tested, provides a greater range of features, and uses more advanced techniques. One disadvantage is that it makes our code dependendent on SciPy, but it's worth it.We will also show how you can approximate solutions to systems of DEs that you may not be able to solve analytically. Setupfrom numpy import arange, empty, exp, array, linspace, isclose, allclose, sin, pi from scipy.integrate import odeint from scipy.optimize import minimize from plotly.offline import init_notebook_mode from plotly import graph_objs as goA Nonlinear DEImagine that the number of rabbits in an area as a function of time is modelled by $r(t)$ and the number of foxes in the same area at the same time is given by $f(t)$. We can modelled their linked fates using the coupled DEs\begin{align*}\frac{\mathrm{d}r}{\mathrm{d}t} &= 0.08 r - 0.0004 rf, \\\frac{\mathrm{d}f}{\mathrm{d}t} &= -0.09 f + 0.0005 rf,\end{align*}with initial conditions $r_0 = 300$ and $f_0 = 100$. See Section 6.3 (and the rest of Chapter 6) if you are curious about where this equation comes from.For us, it is important to note that- this system of DEs cannot be represented as matrix multiplied by a $(r\, f)^\intercal$ vector and- we have not studied how to solve this equation.We can still define it as a Python function, however. Note that in the following function `rf` is a variable that contains $r$ and $f$ as a vector (actually a NumPy `array`, but it is one-dimensional so we call it a vector).def drfdt(rf, t): r, f = rf drdt = 0.08*r - 0.0004*r*f dfdt = -0.09*f + 0.0005*r*f return array([drdt, dfdt])`odeint` works almost exactly the same as `euler_improved` did, but it takes the values of $t$ for which you would like $r$ and $f$ as inputs. Note that the initial values are input as `[300, 100]`, and that the first element of $t$ must correspond to the time of those initial values (in this case `t[0] == 0`).t = linspace(0, 100, 100) rf = odeint(drfdt, [300, 100], t) fig = go.Figure() fig.add_trace(go.Scatter(x=t, y=rf[:,0], name='rabbits')) fig.add_trace(go.Scatter(x=t, y=rf[:,1], name='foxes')) fig.show('png')You do not have to specify step sizes for `odeint` (like you did for `euler_improved`). It figures that out for itself. In fact, if you want to know how many rabbits and foxes that you will have after 40 years, you can call it with just that input:rf = odeint(drfdt, [300, 100], [0, 40]) print(rf[-1, 0], 'rabbits') print(rf[-1, 1], 'foxes')70.757766160244 rabbits 208.23878627992096 foxesIf you wanted to figure out exactly when the number of rabbits reaches its lowest ebb, you can minimize the result numerically, again using SciPy.def rabbits(t): return odeint(drfdt, [300, 100], [0, t])[-1, 0] result = minimize(rabbits, 40) print(result)fun: 70.67226332711697 hess_inv: array([[3.23550154]]) jac: array([-3.81469727e-06]) message: 'Optimization terminated successfully.' nfev: 10 nit: 4 njev: 5 status: 0 success: True x: array([40.73864735])You get a heap of diagnostic information from `minimize`, but the most important thing is that "Optimization terminated successfully" and that the value is stored in `x`:print('rabbits rebounded after', result.x[0], 'years')rabbits rebounded after 40.738647346149506 yearsExercisesWe will now numerically solve a differential equation that we cannot solve analytically. Pendulum motion can be defined as\begin{align*}\frac{\mathrm{d}^2\theta}{\mathrm{d}t^2} = -\frac{g}{L}\sin\theta,\end{align*}where $g$ is gravitational acceleration, $L$ is the length of the pendulum, and $\theta$ is theangle the pendulum makes with the vertical as shown in the figure.Figure 1: A simple pendulumThe above equation is a second-order nonlinear differential equation and we don’t have away to solve this equation algebraically. That is, we can’t use the characteristic equationmethod or method of undetermined coefficients to solve this equation.We can, however, convert it into a system of first-order DEs and find an approximate solution using `odeint`. By setting $v=\mathrm{d}\theta/\mathrm{d}t$, we obtain the equivalent formulation\begin{align*}\frac{\mathrm{d}\theta}{\mathrm{d}t} &= v \\\frac{\mathrm{d}v}{\mathrm{d}t} &= -\frac{g}{L}\sin\theta.\end{align*}Suppose that $g/L = 10$. Write a function to calculate the vector$\left(\begin{array}{1}\frac{\mathrm{d}\theta}{\mathrm{d}t}\\\frac{\mathrm{d}v}{\mathrm{d}t}\end{array}\right)$ as a function of $\left(\begin{array}{1}\theta\\v\end{array}\right)$ and $t$. (Note that NumPy does not care whether you return a column or row vector. Usually it figures out what you mean from the context.)def dthetavdt(thetav, t): z, v = thetav dzdt = v dvdt = -10*sin(z) return array([dzdt, dvdt])The following cell should run without errors.assert allclose(dthetavdt([pi/2, 0], 0), [0, -10.0]) assert allclose(dthetavdt([pi/12, 0], 0), [0, -2.5881904510252074]) assert allclose(dthetavdt([0, 1], 0), [1, 0])Use `odeint` to plot $\theta$ on the interval $0\leq t \leq 2.5$ when $\theta_0=\frac{\pi}{12}$ and $v_0=0$.t = linspace(0, 2.5, 100) thetav = odeint(dthetavdt, [pi/12, 0], t) fig = go.Figure() fig.add_trace(go.Scatter(x=t, y=thetav[:,0], name='Theta')) fig.add_trace(go.Scatter(x=t, y=thetav[:,1], name='Velocity')) fig.show('png')In the following cell, calculate and print the values for $\theta(2.5)$ and $v(2.5)$ when $\theta_0=\frac{\pi}{12}$ and $v_0=0$.thetav = odeint(dthetavdt, [pi/12, 0], [0, 2.5]) print(thetav[-1, 0], 'Theta') print(thetav[-1, 1], 'Velocity')-0.004681934744160246 Theta -0.8253870220740621 VelocityPlot $\theta$ on the interval $0\leq t \leq 2.5$ when $\theta_0=\frac{\pi}{2}$ and $v_0=0$.t = linspace(0, 2.5, 100) thetav = odeint(dthetavdt, [pi/2, 0], t) fig = go.Figure() fig.add_trace(go.Scatter(x=t, y=thetav[:,0], name='Theta')) fig.add_trace(go.Scatter(x=t, y=thetav[:,1], name='Velocity')) fig.show('png')In the following cell, calculate and print the values for $\theta(2.5)$ and $v(2.5)$ when $\theta_0=\frac{\pi}{2}$ and $v_0=0$.thetav = odeint(dthetavdt, [pi/2, 0], [0, 2.5]) print(thetav[-1, 0], 'Theta') print(thetav[-1, 1], 'Velocity')1.4510997040393572 Theta -1.5453873087645622 VelocityWebscraping 40k Hindi songs We'll be scraping http://giitaayan.com/ Phase 2 In Phase 2, we will scrape the song lyrics from all the song pagesfrom selenium import webdriver import pandas as pd import csv import time Chrome = webdriver.Chrome chromedriver = './chromedriver' browser = Chrome(chromedriver) # Table headers for the csv file table_headers = ['Song', 'Film', 'Year' 'Music Director', 'Lyricist', 'Singers'] # Opening the file in write mode and hence creating a new file with just the headers with open(r'hindi_lyrics_phase2.csv', 'w') as file: writer = csv.writer(file) writer.writerow(table_headers) # Reading the data which was scraped in Phase 1 df = pd.read_csv('hindi_lyrics_phase1.csv') df.head() %%time # This for loop iterates over each row of the csv file for i in range (len(df)): # Reading each song row as a list song_row = list(df.iloc[i]) # Extracting the song page url from each row song_url = song_row[0] time.sleep(1) try: # Opening the song page in the browser browser.get(song_url) # The given button needs to be clicked to transliterate from English Script to Hindi browser.find_element_by_id('langName').click() # Replacing the first item of the row (song url) with the lyrics of the songs song_row[0] = browser.find_element_by_id('ConvertedText').text # Writing each row to the csv file # Notice that the file is opened in append mode with open(r'hindi_lyrics_phase2.csv', 'a') as file: writer = csv.writer(file) writer.writerow(song_row) # Printing progress print(f'Writing {i+1} of {len(df)}') except Exception as e: print(e) pbar.close()Writing 1 of 10056 Writing 2 of 10056 Writing 3 of 10056 Writing 4 of 10056 Writing 5 of 10056 Writing 6 of 10056 Writing 7 of 10056 Writing 8 of 10056 Writing 9 of 10056 Writing 10 of 10056 Writing 11 of 10056 Writing 12 of 10056 Writing 13 of 10056 Writing 14 of 10056 Writing 15 of 10056 Writing 16 of 10056 Writing 17 of 10056 Writing 18 of 10056 Writing 19 of 10056 Writing 20 of 10056 Writing 21 of 10056 Writing 22 of 10056 Writing 23 of 10056 Writing 24 of 10056 Writing 25 of 10056 Writing 26 of 10056 Writing 27 of 10056 Writing 28 of 10056 Writing 29 of 10056 Writing 30 of 10056 Writing 31 of 10056 Writing 32 of 10056 Writing 33 of 10056 Writing 34 of 10056 Writing 35 of 10056 Writing 36 of 10056 Writing 37 of 10056 Writing 38 of 10056 Writing 39 of 10056 Writing 40 of 10056 Writing 41 of 10056 Writing 42 of 10056 Writing 43 of 10056 Writing 44 of 10056 Writing 45 of 10056 Writing 46 of 10056 Writing 47 of 10056 Writing 48 of 10056 Writing 49 of 10056 Writing 50 of 10056 Writing 5[...]Assignmentimport pandas as pd data = {'Activities':['Time spent on sleep','Time spent on work','Time spent on reading/writing','Time spent with family','Diet plans'], '':[7,6,8,4,'tea with egg and meat'], '':[7,8,2,4,'data insufficient'], '':[7,4.5,5,6,'data insufficient'], '':[6,10,0,1.5,'data insufficient'], '':[7,8,4,0,'weak tea,meat and wine'], '':[8,8.5,6,0,'coffee,Beer'], '':[7,11,0,4.5,'coffee, Wine']} df= pd.DataFrame(data, columns=['Activities','','','','','','','']) dfTPCHmodel_meta = (v1, os.path.join(os.getcwd(), 'logs/tpch/version_0/checkpoints/epoch=95-step=1309151.ckpt')) version, checkpoint = model_meta estimater = ge.QueryEstimater( checkpoint, dataset='tpch:optimized', encoder=version.encoder, model_cls=version.model.CNNRegressor ) results = estimater.evaluate() json.dumps(ge.evaluate_metrics(results)) ge.display_results(results) model_meta = (v1, os.path.join(os.getcwd(), 'logs/tpch/version_0/checkpoints/epoch=95-step=1309151.ckpt')) version, checkpoint = model_meta estimater = ge.QueryEstimater( checkpoint, dataset='tpcd:optimized', encoder=version.encoder, model_cls=version.model.CNNRegressor ) results = estimater.evaluate() json.dumps(ge.evaluate_metrics(results))100%|██████████| 343/343 [00:19<00:00, 17.72it/s]ALLmodel_meta = (v1, os.path.join(os.getcwd(), 'logs/all/version_0/checkpoints/epoch=16-step=987087.ckpt')) version, checkpoint = model_meta estimater = ge.QueryEstimater( checkpoint, dataset='tpch:optimized', encoder=version.encoder, model_cls=version.model.CNNRegressor ) results = estimater.evaluate() json.dumps(ge.evaluate_metrics(results)) model_meta = (v1, os.path.join(os.getcwd(), 'logs/all/version_0/checkpoints/epoch=16-step=987087.ckpt')) version, checkpoint = model_meta estimater = ge.QueryEstimater( checkpoint, dataset='tpcd:optimized', encoder=version.encoder, model_cls=version.model.CNNRegressor ) results = estimater.evaluate() json.dumps(ge.evaluate_metrics(results)) cp_path = os.path.join(os.getcwd(), 'v7/logs/three/version_1/checkpoints/') cps = [os.path.join(cp_path, f) for f in os.listdir(cp_path)] cps = sorted(cps, key= lambda s: int(epoch_num_regex.search(s)[1])) v = v7 for cp in tqdm(cps): qe = ge.QueryEstimater(cp, dataset=tpcd, encoder=v.encoder, model_cls=v.model.GereltRegressor) r = qe.evaluate(df=tpcd_sample) print(int(epoch_num_regex.search(cp)[1]), json.dumps(ge.evaluate_metrics(r))) cp_path = os.path.join(os.getcwd(), 'v4/logs/three/version_0/checkpoints/') cps = [os.path.join(cp_path, f) for f in os.listdir(cp_path)] cps = sorted(cps, key= lambda s: int(epoch_num_regex.search(s)[1])) v = v4 for cp in tqdm(cps): qe = ge.QueryEstimater(cp, dataset=tpcd, encoder=v.encoder, model_cls=v.model.GereltRegressor) r = qe.evaluate(df=tpcd_sample) print(int(epoch_num_regex.search(cp)[1]), json.dumps(ge.evaluate_metrics(r)))Curve Fitting: Temperature as a function of month of the yeartemp_max = np.array([17,19,21,28,33,37,37,31,23,19,18,23]) temp_min = np.array([-62,-59,-56,-46,-32,-18,-9,-13,-25,-46,-52,-58]) months = np.arange(12) plt.plot(months,temp_max,'ro') plt.plot(months,temp_min,'bo') plt.xlabel("Month") plt.ylabel("Min and Max Temperature")Topic: Signal Processing Knowledge of Fourier Series 1. From SciPy we can use stats module to perform statistical distribution plotting.from scipy import signal t = np.linspace(0,5,100) tCreating a sine wave:x = np.sin(t) xresampling:x_resamp = signal.resample(x,25) x_resamp plt.plot(t,x) plt.plot(t[::4],x_resamp,'ko')t[::4] 4 steps: total dots = 2525 x 4 = 100, the value that was given to the linspace 'ko' : blackplt.plot(t,x) plt.plot(t[::4],x_resamp,'ko').detrended Remove linear trend along axis from data.x_detrended = signal.detrend(x) plt.plot(t,x) plt.plot(t,x_detrended)Pre-processing Data to work with in ShinyDate Updated: 04/10/2020Purpose: pre-processes data before use within Shiny.#Needed Libararies import os import numpy as np import pandas as pd from datetime import datetime pd.set_option('display.max_columns', 999) # How to display all columns of a Pandas DataFrame in Jupyter Notebook pd.options.display.max_rows # Increases the length of a printed string from a DataFrame pd.set_option('display.max_colwidth', -1) #Working Directory and Input File workingDir = "C:/Users/rjame/Documents/RShinyAppPractice/ColoradoRiverBasin/CRBShinyAppver2/data" os.chdir(workingDir) data_Input = "P_dfAllo_CRBSites.csv" #Create Dataframe df = pd.read_csv(data_Input) df df.columns #technique to check datatype of long dataframes. with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(df.dtypes) #Changing datatype of used date fields. df['PD'] = pd.to_datetime(df['PD'], errors = 'coerce') df['PD'] = pd.to_datetime(df["PD"].dt.strftime('%m/%d/%Y')) df #The Ouput #The Shiny app is responding best to UTF-8 encoding. df.to_csv('P_dfAllo_CRBSites.csv', index=False) # The rows that were removed from output.Q1: HMC's Approach Q1 a The reason that HMC focuses on real returns is to fulfill the objectives and "benchmark" of its stakeholders. HMC was established for the purpose of managing the university's endowment, pension assets, and working capital. Specifically, the endowment had a goal of distributing 4-5% of its funds to the university, and wanted to preserve the value of the remaining assets as well.To maintain this distribution rate indefinitely, the endowment would have to achieve a real rate return of 3-4%, given it received 1% in gifts per year. On top of this, the real expenses for the university was growing by an additional 3%, which would require additional returns.Focusing on nominal returns would not suffice, because the university is already "benchmarking" against its real costs. For example, if the university knows it needs to spend 7% (real) of AUM, but the fund returned only 7% (nominal) when inflation was 2%, then that would be a deterioration of 2% in the endowment pool.Therefore, if nominal returns were to be used in the MV optimization, the results may not be optimal for the purposes of the fund. Even though a certain asset class may have relatively high returns for a given amount of risk, if the returns were measured in real terms, then the mean-variance payoff may not be as high versus a different asset class. b It is simply too expensive. In 1999, the active management portion of the fund cost 49bps, or 93 million. It had 38 investment professionals managing $19 billion assets, of which they managed 68% directly. Sifting through thousands of securities to find the best portfolio would require additional headcount and more decision-making processes. More importantly, the cost of executing any portfolio rebalancing for a thousand or more securities would be expensive: bid/offer costs, operational maintenance, soft-dollar relationships, liquidity reserves on low trade volume and higher probabiity for problematic stocks, maintaining models per stock/strategy, etc.Mathematically, the MV optimization would find it difficult to find distinct attributes across so many securities, especially if they are of the same asset class and exhibit similar MV characteristics. This may lead to spurious allocations with high offsets, similar to how we see strange betas in multicollinear regressions. c The securities or funds in each asset class must be distinct. In other words, the securities/funds that make up one asset class must have as many similar characteristics to each other as possible and be as different as possible to the make-up of other classes. Individual securities/funds should not be double-counted.In terms of MV, this means that each asset class should ideally have a distinct mean and variance from the other classes, but most importantly a correlation vs other asset classes that is not close to or equal to 1. d When first looking at TIPS, one should first consider the asset class it is most related to. This should be domestic bonds, since HMC is based in the US, TIPS is a US gov't issued security, and is categorized as either a note or bond.If one compares the characteristics of TIPS vs other domestic bonds, then one can do the same analysis written in 1c with respect to mean, variance, and correlation to see how the product acts vs Domestic Bonds, and whether it is acts similarly to Domestic Bonds, similarly to another asset class, or is unique.Given the mandate of HMC is to achieve superior real returns, an analysis in real terms would have to be done - which is conveniently presented in Exhibits 3 and 4. Based on the graph in Exhibit 3, it is clear that TIPS has a different mean. For the relatively short period of time that both products exist, it appears the graphs exhibit different shapes or trends.Exhibit 4 presents us with more numbers to compare. We can see that Infl-Indexed Bonds as an asset class has very low s.d., and has only a correlation of 0.50 against Domestic Bonds, the asset class we would have wanted to categorize TIPS by if we had to put it somwehere. Given its unique s.d. and correlation characteristics, it makes sense to give it its own asset classs - in real terms. Q2 a You would first need to convert the MV optimization formula to use real returns rather than excess returns. You would then need to do a two-sided bound for $w$'$\Sigma$$w$ such that:Exhibit 5$w$'$\tilde{\mu} = \mu^{p}$Risky asset weights must be between 0 and 1Risk-free asset weight must be between -0.5 and 1Exhibit 6$w$'$\tilde{\mu} = \mu^{p}$All assets must be within a 10% bound of the long-term portfolio allocation mandate b The portfolio is showing the fact that for a given mean-variance payoff, inflation-indexed bonds outperform both domestic equity and domestic bonds (in addition to other asset classes) and therefore the allocation should be concentrated in TIPS.The constraint is binding across all hypothetical expected real returns differing by 25bps because no matter the portfolio return, the variance of the domestic bonds/equity do not offer a comparative advantage in reducing the variance of the portfolio.Visually, with the given constraints, including these asset classes would pull our portfolio away from the efficient frontier.If we were to use an unconstrained portfolio, we would have massive long/short offsets across these asset classes - a portfolio which would be very costly to create and maintain in the market. c The detioration in the Sharpe ratio across the board is about 2% - which is not a lot considering how different the constraints are. The vol is consistently higher across all hypothetical returns which should be expected given we are further limiting the bounds of the portfolio.From a different perspective, we are able to diversify across every single asset class (whereas Exhibit 5 virtually eliminated 4 asset classes), at very little performance cost (2%). This way, can have a convincing argument of adding a new asset class which will enhance returns, without unsettling anyone with a drastic portfolio overhaul. Then, it would be up to the Board on how conservatively they would like to bound the TIPS asset class, given Exhibit 6 shows a 0 to 100% constraint. Q2: MV Optimization 1. Summary Statistics#Import data import pandas as pd import numpy as np #assume excel sheet in same folder as jupyter notebook path_to_data_file = '/Users/xuzhen/OneDrive - The University of Chicago/FINM 36700 Portfolio Theory and Risk Management/HW/HW1/multi_asset_etf_data.xlsx' df = pd.read_excel(path_to_data_file, sheet_name="excess returns") #convert dataset to returns only with date as index df = df.set_index('Date') df_tilde = df # Display raw data with Date as index df #annualize data, starting with returns df_tilde_annual = 12 * df_tilde mu_tilde = df_tilde_annual.mean() #annualize vol, given returns have been annualized alreadys sigma =df_tilde_annual.std()/np.sqrt(12) #create a table which includes mean, vol and the sharpe ratio (which is returns achieved per vol) table1 = pd.DataFrame({'Mean':mu_tilde, 'Vol':sigma, 'Sharpe':mu_tilde/sigma}) #Question 1a and 1b: Mean and vol are presented in the table below. #As we can see, SPY has the largest Sharpe ratio, while DBC has the lowest. table1.sort_values(['Sharpe'],ascending=False)2. Descriptive Analysisimport seaborn as sns corrmap = df_tilde_annual.corr() corrmap.replace(1,np.nan,inplace = True) corrmap #Question 2a. Draw a heatmap of the correlation matrix to get a general idea of how the correlations vary. #Highest and lowest correlations follow below the heatmap. sns.heatmap(corrmap) corr_rank = corrmap.unstack().sort_values().dropna() max1 = corr_rank.index[-1] min1 = corr_rank.index[0] print("Highest correlation is between:"+str(max1)+" with correlation of "+str(corr_rank[-1])) print("Lowest correlation is between:{} with correlation of {}".format(min1,corr_rank[0])) dfbonds = df_tilde_annual.loc[:,['BWX','IEF','TIP']] dfbonds['BWX_Diff'] = df_tilde_annual['TIP'] - df_tilde_annual['BWX'] dfbonds['IEF_Diff'] = df_tilde_annual['TIP'] - df_tilde_annual['IEF'] dfbonds[['BWX_Diff','IEF_Diff']].plot()Question 2b: Based on taking the mean of the TIP diff vs BWX and IEF respectively, we can see that TIP outperforms both Domestic and Foreign bond MFs. However, the outperformance is marginal: TIPS vs Foreign is +13bps, and TIPS vs Domestic is +3bps. The question then is whether this outperformance is statistically significant. When evaluating the p-value of the data, it appears that the p value is high for both TIPS vs Foreign and TIPS vs Domestic, so it would be hard to say that this outperformance is in fact 'outperformance' Question 2c: Based on the data, it is difficult to say that TIPS expands the opportunity set, given that its mean returns are similar to that of IEF and BWX. The only point worth mentioning would be the fact that it has a lower standard deviation, which may offer portfolio diversification benefits and achieving optimal mean at lower variance/s.d. However, if we think about TIPS without limiting ourselves to the data, then TIPS as an investment product may offer additional benefits. For example, BWX and IEF are made up of foreign and domestic bonds, while TIPS is specifically US Gov't issued inflation linked bonds. Liquidity/market participants may be different as well in the different spaces.dfbonds.describe() from scipy.stats import ttest_ind #show that the p-values suggest that there is no significant difference between the foreign/domestic bonds and TIPS print(ttest_ind(dfbonds['BWX'], dfbonds['TIP'])) print(ttest_ind(df_tilde_annual['EFA'], dfbonds['TIP']))Ttest_indResult(statistic=-0.6655860356292569, pvalue=0.5061940502903506) Ttest_indResult(statistic=1.3041039284832523, pvalue=0.19321133819271927)3. The MV frontier# (a) Compute and display the weights of the tangency portfolio: def compute_tangency(df_tilde, diagonalize_Sigma=False): Sigma = df_tilde.cov() # N is the number of assets N =Sigma.shape[0] Sigma_adj = Sigma.copy() if diagonalize_Sigma: Sigma_adj.loc[:,:] = np.diag(np.diag(Sigma_adj)) mu_tilde = df_tilde.mean() Sigma_inv = np.linalg.inv(Sigma_adj) weights = Sigma_inv @ mu_tilde / (np.ones(N) @ Sigma_inv @ mu_tilde) omega_tangency = pd.Series(weights, index=mu_tilde.index) return omega_tangency, mu_tilde, Sigma omega_tangency, mu_tilde, Sigma = compute_tangency(df_tilde_annual) omega_tangency # (b) Compute the mean, volatility and Sharpe ratio for the tangency # Mean mean = mu_tilde @ omega_tangency # Volatility vol = np.sqrt(omega_tangency @ Sigma @ omega_tangency)/np.sqrt(12) # Sharpe ratio sharpe_ratio = mean/vol print("Mean: ",mean, ", vol: ",vol,", sharpe_ratio: ",sharpe_ratio)Mean: 0.23776716339549708 , vol: 0.10480197847812801 , sharpe_ratio: 2.2687278126635624. The allocation# (a) Compute and display the weights of MV portfolios with target returns =0.01 def target_mv_portfolio(df_tilde, target_return=0.01*12, diagonalize_Sigma=False): omega_tangency, mu_tilde, Sigma = compute_tangency(df_tilde, diagonalize_Sigma=diagonalize_Sigma) Sigma_adj = Sigma.copy() if diagonalize_Sigma: Sigma_adj.loc[:,:] = np.diag(np.diag(Sigma_adj)) Sigma_inv = np.linalg.inv(Sigma_adj) N = Sigma_adj.shape[0] delta_tilde = ((np.ones(N) @ Sigma_inv @ mu_tilde)/(mu_tilde @ Sigma_inv @ mu_tilde)) * target_return omega_star = delta_tilde * omega_tangency return omega_star omega_star = target_mv_portfolio(df_tilde_annual, target_return=0.01*12) omega_star omega_star.sum() # (b) What is the mean, volatility, and Sharpe ratio for $w^p$? # Mean mean = mu_tilde @ omega_star # Volatility vol = np.sqrt(omega_star @ Sigma @ omega_star)/np.sqrt(12) # Sharpe Ratio sharpe_ratio = mean/vol print("Mean: ",mean,", vol: ",vol,", sharpe_ratio: ",sharpe_ratio) # (c) Discuss the allocation. In which asset is the portfolio most long? and short? omega_star.sort_values(ascending=False) #The longest position is in Domestic Bonds, which does not have the highest Sharpe Ratio. #This is a reminder that MV optimization will put strong weights on some securities due to their covariances, #not due to their means, vols or Sharpe Ratios # (d) Does this line up with wich assets have the strongest Sharpe ratios? # See tables. It does not.5. Simple Portfolios# (a) Calculate the performance of the equally-weighted portfolio over the sample. # Rescale the entire weighting vector to have target mean w ̃ = .01. # Report its mean, volatility, and Sharpe ratio. def compute_equal_weight(df_tilde, target_mean): Sigma = df_tilde.cov() # N is the number of assets N =Sigma.shape[0] mu_tilde = df_tilde.mean() original_equal_weight = np.ones(N) original_mean = mu_tilde @ original_equal_weight scaler = target_mean / original_mean weights = scaler * original_equal_weight omega_equal = pd.Series(weights, index=mu_tilde.index) return omega_equal, mu_tilde, Sigma # annual_target_mean = 12 * mu_p_tilde = 0.12 omega_equal, mu_tilde, Sigma = compute_equal_weight(df_tilde_annual, 0.12) print(omega_equal) print(omega_equal.sum()) # Mean mean = mu_tilde @ omega_equal # Volatility vol = np.sqrt(omega_equal @ Sigma @ omega_equal)/np.sqrt(12) # Sharpe Ratio sharpe_ratio = mean/vol print("Mean: ",mean,", vol: ",vol,", sharpe_ratio: ",sharpe_ratio) # (b) Calculate the performance of the “risk-parity” portfolio over the sample. def compute_risk_parity(df_tilde, target_mean): Sigma = df_tilde.cov() # N is the number of assets N =Sigma.shape[0] mu_tilde = df_tilde.mean() vol =df_tilde.std()/np.sqrt(12) original_weight = vol.copy() original_weight = original_weight.apply(lambda x: 1/x) original_mean = mu_tilde @ original_weight scaler = target_mean / original_mean weights = scaler * original_weight omega_equal = pd.Series(weights, index=mu_tilde.index) return omega_equal, mu_tilde, Sigma # annual_target_mean = 12 * mu_p_tilde = 0.12 omega_risk_parity, mu_tilde, Sigma = compute_risk_parity(df_tilde_annual, 0.12) print(omega_risk_parity) print(omega_risk_parity.sum()) # Mean mean = mu_tilde @ omega_risk_parity # Volatility vol = np.sqrt(omega_risk_parity @ Sigma @ omega_risk_parity)/np.sqrt(12) # Sharpe Ratio sharpe_ratio = mean/vol print("Mean: ",mean,", vol: ",vol,", sharpe_ratio: ",sharpe_ratio)Mean: 0.12 , vol: 0.12835250527091432 , sharpe_ratio: 0.9349252649702111(c) How does these compare to the MV portfolio from problem 2.4?\With the same target mean, the equally weighted portfolio and the risk parity portfolio from problem 2.5 both have much higher volatility than the MV portffolio from 2.4. The Sharpe ratios of the two portfolios are also much lower than the MV portfolio. This implies that these two portfolios are less desired than the MV portfolio. 6. Out-of-Sample Performance# (a) Using only data through the end of 2020, compute w^p for $/mu^p = 0.01$, ... df_temp = df_tilde_annual.loc[:'2020', :] omega_tangency, mu_tilde, Sigma = compute_tangency(df_temp) omega_star = target_mv_portfolio(df_temp, target_return=0.01*12) omega_star # (b) Calculate the portfolio's Sharpe ratio within that sample mean = omega_star @ mu_tilde vol = np.sqrt(omega_star @ Sigma @ omega_star)/np.sqrt(12) sharpe_ratio_in_sample = mean/vol sharpe_ratio_in_sample # (c) Calculate the portfolio’s Sharpe ratio based on performance in 2021. df_temp = df_tilde_annual.loc["2021", :] omega_tangency, mu_tilde, Sigma = compute_tangency(df_temp) mean = omega_star @ mu_tilde vol = np.sqrt(omega_star @ Sigma @ omega_star)/np.sqrt(12) sharpe_ratio_out_of_sample = mean/vol sharpe_ratio_out_of_sampleT12 Classifcation and clustering Cross-validating support vector machine classifier In today's tutorial, we will use the constructed data example from class to check how well a trained classifier separtes the data. The data was obtained through (hypothetical) extracellular recordings *in vivo* and post-experiment reconstruction of the recorded neurons. Firing rates (in spk/s), spike width is extracted from the extracellular recordings and the reconstruction yields the identity of the neuron, either an excitatory pyramidal cell or an inhibitory fast-spiking basket cell. First we will use the the **Support Vector Machine (SVM)** classifier (from the scikit-learn implementation) to learn the separation of the data in the two neuron types. Later, we will check the performance of the classifer through cross-validation. 1. Construction of the dataimport numpy as np import matplotlib.pyplot as plt %matplotlib inline np.random.seed(1020203) spikeWidthPy = np.random.normal(350,50,200) spikeWidthFS = np.random.normal(190,40,100) spikeWidth = np.concatenate((spikeWidthPy,spikeWidthFS)) firingRatePy = np.random.exponential(3,200) firingRateFS = np.random.exponential(10,100) firingRateFS +=3 firingRate = np.concatenate((firingRatePy,firingRateFS)) neuronType = np.zeros(300) neuronType[200:]=1 Xdata = np.column_stack((spikeWidth,firingRate)) Ydata = neuronType2. Data visualizationfig = plt.figure(figsize=(6,10)) fig.subplots_adjust(hspace=0.4) ax = fig.add_subplot(311) #ax.plot(spikeWidth[:200],firingRate[:200],'o') ax.plot(spikeWidth[:200],firingRate[:200],'o') ax.plot(spikeWidth[200:],firingRate[200:],'x') ax.set_ylabel('firing rate (spk/s)') ax.set_xlabel('spike width (us)') ax1 = fig.add_subplot(312) ax1.hist(spikeWidth,bins=30) ax1.set_xlabel('spike width (us)') ax1.set_ylabel('cells') ax2 = fig.add_subplot(313) ax2.hist(firingRate,bins=30) ax2.set_xlabel('firing rate (spk/s)') ax2.set_ylabel('cells') plt.show()3. Implementation of the Support Vector Machine classificationThe `svm` package from the scikit-learn library (`sklearn`) is used to implement the support vector machine classifier below.from sklearn import svm #1 from matplotlib.colors import ListedColormap #2 clf = svm.SVC(kernel='poly') #3 creates instance of svm clf.fit(Xdata, Ydata) #4 trains the SVM on the data # figure production fig=plt.figure(figsize=(6,5)) #5 markershape = ['o','x'] #6 markercolor = ['C0','C1'] #7 markerlabel = ['Pyramidal','Basket'] #8 gridRes=0.1 #9 cmap = ListedColormap(markercolor) #10 maxvalX = 450. #11 minvalX = 50 #12 maxvalY = 60 #13 minvalY = 0 #14 xgrid,ygrid=np.meshgrid(np.arange(minvalX,maxvalX,gridRes),np.arange(minvalY,maxvalY,gridRes)) #15 Z = clf.predict(np.array([xgrid.flatten(),ygrid.flatten()]).T) #16 predicts class for the plot area Z = Z.reshape(xgrid.shape) #17 plt.contourf(xgrid,ygrid,Z,alpha=0.4,cmap=cmap) #18 for yind,yval in enumerate(set(Ydata)): #19 plt.scatter(x=Xdata[Ydata==yval,0], y=Xdata[Ydata==yval,1], c=markercolor[yind],marker=markershape[yind], label=markerlabel[yind],s=50) #20 plt.xlabel('spike width (us)');plt.ylabel('firing rate (spk/s)')#21 plt.title('SVM using spike width and rate, with '+clf.kernel+' kernel') #22 plt.legend() #23/home/mgraupe/.virtualenvs/locorungs/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this warning.", FutureWarning)4. Check the performance of the classifier We want to evaluate the classification error of the SVM. In other words, we want to assess how well the SVM classifies new data-points. We will do this through *cross-validation*. The idea is to train the classifier on only 90 % of the data and test whether the remaining 10 % of the data are correctly classfied. Write a routine with fits 10 times the above SVM classifier to 90 % of the data, the test dataset. Use the remaining 10 % - test dataset - to evaluate the performance of the classifier. That means, calculate the percentage of erronous classification. Note, the split between training and test dataset should be different in each run.Use the function `clf.predict(xTest)` to calculate the predictions with the trained classifier from the test dataset.**Hint:** Split the dataset in train- and test subsets using the scikit-learn function `train_test_split()`. Check online which type of inputs this function takes and how to use it.# your code goes here from sklearn.model_selection import train_test_split nIterations = 10 lengthData = 300 nTraningDataSet = int(300*.9) nTestDataSet = 300 - nTraningDataSet print('Test with ', nTestDataSet, ' cells and training with ',nTraningDataSet,' cells.') performance = [] clf = svm.SVC(kernel='poly') for n in range(nIterations): print('iteration number :',n) xTrain, xTest, yTrain, yTest = train_test_split(Xdata,Ydata, test_size=0.1) #trainingIdx = np.random.choice(range(lengthData),nTraningDataSet, replace=False) #trainingIdx = np.sort(trainingIdx) #xTrain = Xdata[trainingIdx] #yTrain = Ydata[trainingIdx] #xTest = np.delete(Xdata,trainingIdx,axis=0) #yTest = np.delete(Ydata,trainingIdx) #print('Length of training index',len(trainingIdx)) #print('Length of training data',len(xTrain),len(yTrain)) #print('Length of test data',len(xTest),len(yTest)) clf.fit(xTrain, yTrain) print('...fitting done') yPredict = clf.predict(xTest) correctClassifications = sum(np.equal(yTest,yPredict)) print('correct classifications : ', correctClassifications) print('wrong classifications :',(len(xTest)-correctClassifications)) print('error rate : ', (len(xTest)-correctClassifications)/len(xTest)) #print(sum(np.equal(yTest,yPredict))) performance.append([correctClassifications,(len(xTest)-correctClassifications),(len(xTest)-correctClassifications)/len(xTest),len(xTest)]) performance = np.asarray(performance) print(performance) print('The mean error rate of the of the classifier is ', np.mean(performance[:,2])*100,' %')[[30. 0. 0. 30. ] [29. 1. 0.03333333 30. ] [29. 1. 0.03333333 30. ] [29. 1. 0.03333333 30. ] [29. 1. 0.03333333 30. ] [30. 0. 0. 30. ] [30. 0. 0. 30. ] [30. 0. 0. 30. ] [30. 0. 0. 30. ] [29. 1. 0.03333333 30. ]] The mean error rate of the of the classifier is 1.6666666666666667 %Under- and overfitting, model selection PreliminariesIn the first set of exercises you had to implement the training and evaluation of the linear regression and $k$-NN methods from scratch in order to practice your `numpy` skills. From this set of exercises onward, you can use the implementations provided in `scikit-learn` or other higher-level libraries. We start this set of exercises by demonstrating some of the features of `scikit-learn`.For example, implementation of linear regression model fitting with an analytical solution for the parameters is provided by the class `sklearn.linar_model.LinearRegression`. You can train a linear regression model in the following way:import numpy as np from sklearn import datasets, linear_model # load the diabetes dataset diabetes = datasets.load_diabetes() # use only one feature X = diabetes.data[:, np.newaxis, 2] y = diabetes.target # split the data into training/testing sets X_train = X[:-20] X_test = X[-20:] # split the targets into training/testing sets y_train = y[:-20] y_test = y[-20:] # create linear regression object model = linear_model.LinearRegression() # train the model using the training dataset model.fit(X_train, y_train)Let's visualize the training dataset and the learned regression model.%matplotlib inline import matplotlib.pyplot as plt fig = plt.figure() plt.plot(X_train, y_train, 'r.', markersize=12) X_edge = np.array([np.min(X_train, 0), np.max(X_train, 0)]) plt.plot(X_edge, model.predict(X_edge), 'b-') plt.legend(('Data', 'Linear regression'), loc='lower right') plt.title('Linear regression') plt.xlabel('$x$') plt.ylabel('$y$') plt.show()Once trained, the model can be used to make predictions on the test data:# Make predictions using the testing dataset prediction = model.predict(X_test)The next step (not shown here) is to evaluate the performance of the trained model.Note that the `scikit-learn` interface works by first initializing an object from the class that implements the machine learning model (linear regression in this case) and then fitting the initialized model using the data in the training set. Finally, the trained (fitted) model can be used to make predictions on unseen data. In fact, all models implemented in this library follow the same *initialize-fit-predict* programming interface. For example, a $k$-NN classifier can be trained in the following way:from sklearn.model_selection import train_test_split from sklearn import datasets, neighbors breast_cancer = datasets.load_breast_cancer() X = breast_cancer.data y = breast_cancer.target # make use of the train_test_split() utility function instead # of manually dividing the data X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=40) # initialize a 3-NN classifier model = neighbors.KNeighborsClassifier(n_neighbors=3) # train the model using the training dataset model.fit(X_train, y_train) # make predictions using the testing dataset prediction = model.predict(X_test)Note that the features in the breast cancer dataset have different scales (some have on average very small absolute values, and some very large), which means that the distance metric used by $k$-NN will me dominated by the features with large values. You can use any of the number of feature transformation methods implemented in `scikit-learn` to scale the features. For example, you can use the `sklearn.preprocessing.StandardScaler` method to transform all features to a have a zero mean and unit variance:from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train_scaled = scaler.transform(X_train)The scaler has its own parameters which are the means and standard deviations of the features estimated from the training set. If you train a model with the scaled features, you will have to remember to also apply the scaling transformation every time you make a prediction on new unseen and unscaled data. This is somewhat prone to error. One option for making the code more robust is to create a processing pipeline that includes the scaling and $k$-NN models in a sequence:from sklearn.pipeline import Pipeline knn = neighbors.KNeighborsClassifier(n_neighbors=3) model = Pipeline([ ("scaler", scaler), ("knn", knn) ]) # train the model using the training dataset model.fit(X_train, y_train) # make predictions using the testing dataset prediction = model.predict(X_test)If you are curious, more information about the design of the `scikit-learn` application programming interface (API) can be found [in this paper](https://arxiv.org/pdf/1309.0238.pdf). Exercises Bias-variance decompositionShow that the mean squared error of the estimate of a parameter can be decomposed into an expression that includes both the bias and variance (Eq. 5.53-5.54 in "Deep learning" by Goodfellow et al.).***Answer***: $MSE = \mathbb{E} [(\hat{\theta}_{m} - \theta)^2] $let $\mu = \mathbb{E} [\hat{\theta}_{m}]$add and substract $\mu$$\mathbb{E} [(\hat{\theta}_{m} - \theta)^2] = \mathbb{E}[((\hat{\theta}_{m} - \mu ) + ( \mu - \theta))^2]$$\mathbb{E} [(\hat{\theta}_{m} - \theta)^2] = \mathbb{E}[(\hat{\theta}_{m} - \mu )^2 + 2(\hat{\theta}_{m} - \mu)(\mu - \theta)+ (\mu - \theta)^2]$$\mathbb{E}(\hat{\theta}_{m} - \mu ) = 0$$\mathbb{E} [(\hat{\theta}_{m} - \theta)^2] = \mathbb{E}[(\hat{\theta}_{m} - \mu )^2 + (\mu - \theta)^2]$$\mathbb{E} [(\hat{\theta}_{m} - \theta)^2] = \mathbb{E}[(\hat{\theta}_{m} - \mathbb{E} [\hat{\theta}_{m}] )^2 + (\mathbb{E} [\hat{\theta}_{m}] - \theta)^2]$Bias of an estimator is defined as: $bias(\boldsymbol{\hat{\theta}_{m}}) = \mathbb{E} (\boldsymbol{\hat{\theta}_{m}}) - \boldsymbol{\theta}$Variance is defined as: $var(\boldsymbol{\hat{\theta}_{m}}) = \mathbb{E}(\hat{\theta}_{m} - \mathbb{E} [\hat{\theta}_{m}] )^2$$MSE = bias(\boldsymbol{\hat{\theta}_{m}})^2 + Var ({\hat{\theta}_{m}}) $ Polynomial regressionFor this exercise we will be using generated data to better show the effects of the different polynomial orders.The data is created using the make_polynomial_regression function.%matplotlib inline def generate_dataset(n=100, degree=1, noise=1, factors=None): # Generates a dataset by adding random noise to a randomly # generated polynomial function. x = np.random.uniform(low=-1, high=1, size=n) factors = np.random.uniform(0, 10, degree+1) y = np.zeros(x.shape) for idx in range(degree+1): y += factors[idx] * (x ** idx) # add noise y += np.random.normal(-noise, noise, n) return x, y # load generated data np.random.seed(0) X, y = generate_dataset(n=100, degree=4, noise=1.5) plt.plot(X, y, 'r.', markersize=12)Implement polynomial regression using the `sklearn.preprocessing.PolynomialFeatures` transformation. Using the `sklearn.grid_search.GridSearchCV` class, perform a grid search of the polynomial order hyperparameter space with cross-validation and report the performance on an independent test set.Plot a learning curve that show the validation accuracy as a function of the polynomial order. Which models have a high bias, and which models have high variance? Motivate your answer. Repeat this experiment, this time using the diabetes dataset instead of the generated data.from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import GridSearchCV, cross_val_score from sklearn.metrics import accuracy_score from sklearn.linear_model import Ridge from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline # independent test set X_test, y_test = generate_dataset(n=100, degree=4, noise=1.5) # reshape because otherwise X is an 1D array instead of 2D: X_test = X_test.reshape(-1, 1) X = X.reshape(-1, 1) x_plot = np.linspace(-1, 1, 100) # polynomial order hyperparameter for grid search max_order = 5 parameters = {'order':np.arange(1,max_order+1)} x_plot = np.linspace(-1, 1, 100) # create matrix versions of these arrays X_plot = x_plot[:, np.newaxis] plt.scatter(X, y, color='navy', s=30, marker='o') colors = ['teal', 'yellowgreen', 'gold', 'orange', 'purple'] for count, degree in enumerate([ 2, 3, 4, 5]): model = make_pipeline(PolynomialFeatures(degree), Ridge()) model.fit(X, y) cvs = cross_val_score(model,X,y) y_pred = model.predict(X_plot) plt.plot(x_plot, y_pred, color=colors[count], label = 'degree = %i' % degree) plt.legend(loc = 'upper left') plt.show() # cv is the number of folds for which is fitted, verbose means how much will be printed poly_grid = GridSearchCV(estimator = PolynomialFeatures(), param_grid = parameters, cv=5, scoring='accuracy', verbose=3) poly_grid.fit(X,y) # error: 'PolynomialFeatures' object has no attribute 'predict' #print((cross_val_score(poly_grid.fit(X,y))) #y_pred = poly_grid.predict(X_test) #acc = poly_grid.score(X_test,y_test) #acc = accuracy_score(y_test, y_pred, *, normalize=True, sample_weight=None) res = poly_grid.cv_results_ mean_test_score = res['mean_test_score'] # find best model model_poly = poly_grid.best_estimator_ model_poly.score(X_test, y_test) # performance of independent test set y_pred = model_poly.fit(X, y).predict(X)Unfortunately GridSearchCV gave the error that the 'PolynomialFeatures' object has no attribute 'predict'. We could not make this work so we were not possible to do the cross-validation. However we can look at the graph and see that the polynomials with a 2nd degree is not a good option for this dataset. It has a relatively high bias compared to the polynomial of a 3rd or higher degree. The variance is also relatively high in the lower degree polynomials in this case. This would make you believe that the higher the degree, the better the model will perform. This is not the case because in this case overfitting has not yet occurred, when this does happen the scores will go back to being relatively high for the variance as well as the bias. ROC curve analysisA common method to evaluate binary classifiers is the receiver operating characteristic (ROC) curve. Similar to the week one practicals, implement a $k$-NN classifier on the breast cancer dataset, however, his time use the $k$-NN pipeline from the preliminary. Train the model for different values of $k$ and evaluate their respective performance with an ROC curve, use the `sklearn.metrics.roc_curve` function.import matplotlib.pyplot as plt from sklearn.datasets import load_breast_cancer from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import roc_curve from sklearn.metrics import auc from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=40) scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) # kNN for k = 1 until k = 10 fig = plt.figure(figsize = (13,20)) #subplots(5,2, figsize = (14,20)) for k in range(1,11): knn = KNeighborsClassifier(n_neighbors=k) model = Pipeline([ ("scaler", scaler), ("knn", knn) ]) # train the model using the training dataset model.fit(X_train, y_train) prediction = model.predict(X_test) y_scores = model.predict_proba(X_test) fpr, tpr, threshold = roc_curve(y_test, y_scores[:, 1]) roc_auc = auc(fpr, tpr) ax = fig.add_subplot(5,2,k) ax.plot(fpr, tpr, 'b', label = 'AUC = %0.3f' % roc_auc) ax.legend(loc = 'lower right') ax.plot([0, 1], [0, 1],'r--') ax.set_xlim([0, 1]) ax.set_ylim([0, 1]) ax.set_ylabel('True Positive Rate') ax.set_xlabel('False Positive Rate') ax.set_title('k = %i' % k) plt.suptitle("ROC Curves with Pipeline",fontsize=14) plt.subplots_adjust(hspace=0.35) from sklearn.datasets import load_breast_cancer from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import roc_curve from sklearn.metrics import auc import matplotlib.pyplot as plt # new split so not exactly the same data as above X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=40) fig = plt.figure(figsize = (13,20)) for k in range(1,11): knn = KNeighborsClassifier(n_neighbors = k) knn.fit(X_train,y_train) y_scores = knn.predict_proba(X_test) fpr, tpr, threshold = roc_curve(y_test, y_scores[:, 1]) roc_auc = auc(fpr, tpr) ax = fig.add_subplot(5,2,k) ax.plot(fpr, tpr, 'b', label = 'AUC = %0.3f' % roc_auc) ax.legend(loc = 'lower right') ax.plot([0, 1], [0, 1],'r--') ax.set_xlim([0, 1]) ax.set_ylim([0, 1]) ax.set_ylabel('True Positive Rate') ax.set_xlabel('False Positive Rate') ax.set_title('k = %i' % k) plt.suptitle("ROC Curves without Pipeline",fontsize=14) plt.subplots_adjust(hspace=0.35)$F_1$ score and Dice similarity coefficientThe Dice similarity coefficient is a very popular evaluation measure for image segmentation applications. Assuming that $A$ is the ground truth segmentation of an object represented as a binary image, and $B$ is the binary output of an image segmentation method, the Dice similarity coefficient is computed as:$\text{Dice}(A,B) = \frac{2|A\cap B|}{|A| + |B|}$where $|\cdot|$ represents the cardinality of the objects (e.g. $|A|$ is the number of non-zero pixels in the ground truth segmentation).For example, the Dice similarity can be computed in the following way:# generate some test objecys A = np.zeros((32, 32)) A[10:-10, 10:-10] = 1 B = np.zeros((32, 32)) B[5:-15, 5:-15] = 1 dice = 2*np.sum(A*B)/(np.sum(A)+np.sum(B)) # display the results plt.plot() plt.imshow(A) plt.imshow(B, alpha=0.7) print(dice)0.3402777777777778Show that the $F_1$ score, which is the harmonic mean of precision and recall, is equivalent to the Dice similarity coefficientfrom sklearn.metrics import f1_score A = A.flatten() B = B.flatten() y_true = A y_pred = B f = f1_score(y_true, y_pred) print (f) #basic functions: #precision = tp / (tp + fp) #recall = tp / (tp + fn) #f = 2 * ((precision * recall) / (precision + recall))0.3402777777777778Project - Seminar Computer Vision by Deep Learning (CS4245) 2020/2021Group Number: 20Student 1: Student 2: Date: June 14, 2021 Instruction For correct functioning of this notebook, the dataset [morado_5may](https://www.kaggle.com/teddevrieslentsch/morado-5may) must be in the same directory as this notebook. Import necessary libraries# standard libraries import glob import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import shutil import time # widgets from IPython.display import display, clear_output import ipywidgetsRelabel Make folder with for the annotations with the new labels.root_path = 'morado_5may' relabel_path = '{}/annotations_relabel'.format(root_path) if os.path.isdir(relabel_path): shutil.rmtree(relabel_path) time.sleep(0.1) os.makedirs(relabel_path) else: os.makedirs(relabel_path)Below is the `ReLabelDataset` class for relabeling.class ReLabelDataset(object): def __init__(self, root): self.root = root # directory to dataset self.imgs = list(sorted(os.listdir('{}/images'.format(root)))) # load images self.annots = list(sorted(os.listdir('{}/annotations'.format(root)))) # load annotations self.classes = ['background', 'raw', 'ripe'] # classes self.idx = 0 # image/annotation index self.idx_last = -1 # last image/annotation index self.row_number = -1 # number of the current row self.start = True # initialize process self.img = None # image self.annot = None # annotation self.done = False # whether all images have been labeled def plot_patch(self): with out: annot = self.annot.loc[self.row_number,0:4].to_numpy() img = self.img[int(annot[1]):(int(annot[3])+1),int(annot[0]):(int(annot[2])+1),:] clear_output(True) if not self.done: plt.figure(figsize=(5, 5)) plt.imshow(img, zorder=-10) plt.title('Old label: {}'.format(self.annot.loc[self.row_number, 4])) plt.show() else: plt.figure() plt.show() def manage_ids(self): if self.row_number==len(self.annot)-1: self.save_annot() self.row_number = 0 self.idx_last = self.idx self.idx += 1 if self.idx==len(self.imgs): self.done = True else: self.idx_last = self.idx self.row_number += 1 def get_data(self): if self.idx!=self.idx_last: img_path = '{}/images/{}'.format(self.root, self.imgs[self.idx]) annot_path = '{}/annotations/{}'.format(self.root, self.annots[self.idx]) self.img = np.rot90(plt.imread(img_path), -1) self.annot = pd.read_csv(annot_path, sep=',', header=None) def save_annot(self): annot_re_path = '{}/annotations_relabel/{}'.format(self.root, self.annots[self.idx]) self.annot.sort_values(by=[4], inplace=True) self.annot.reset_index(drop=True, inplace=True) self.annot.to_csv(annot_re_path, index=0, header=0) print('The file {} has been relabeled!'.format(self.annots[self.idx])) def button_click_action(self, label): if not self.done: self.get_data() if not self.start: self.annot.at[self.row_number,4] = label self.start = False self.manage_ids() self.plot_patch() def left_button_click(self, click): self.button_click_action('raw') def right_button_click(self, click): self.button_click_action('ripe')Below is the tool for relabeling. The process is started by clicking on one of the two buttons. The first annotation is then plotted. You can then indicate for each image to which class it belongs. If all the annotations for one image have been made, a new .csv file is saved in the `annotations_relabel` directory that was created above.%matplotlib inline relabeler = ReLabelDataset(root_path) # create buttons for the 2 classes button_left = ipywidgets.Button(description='Raw') button_right = ipywidgets.Button(description='Ripe') # assign functions to the press of the buttons button_left.on_click(relabeler.left_button_click) button_right.on_click(relabeler.right_button_click) # output window for the plot out = ipywidgets.Output() # widget ipywidgets.VBox([ipywidgets.HBox([button_left, button_right]), out])GITHUB 121 LSDS9https://github.com/maiormarso/DS-Unit-1-Sprint-2-StatisticsLecture Youtube.https://www.youtube.com/watch?v=zhPGoHZo5i4&feature=youtu.be *Data Science Unit 1 Sprint 2 Assignment 1* Apply the t-test to real dataYour assignment is to determine which issues have "statistically significant" differences between political parties in this [1980s congressional voting data](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records). The data consists of 435 instances (one for each congressperson), a class (democrat or republican), and 16 binary attributes (yes or no for voting for or against certain issues). Be aware - there are missing values!Your goals:1. Load and clean the data (or determine the best method to drop observations when running tests)2. Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.013. Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.014. Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference)Note that this data will involve *2 sample* t-tests, because you're comparing averages across two groups (republicans and democrats) rather than a single group against a null hypothesis.Stretch goals:1. Refactor your code into functions so it's easy to rerun with arbitrary variables2. Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested)# TODO - during class, but please help! from scipy.stats import ttest_ind, ttest_ind_from_stats, ttest_rel import scipy.stats dir(scipy.stats) # Getting started with the assignment !wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data # Load Data import pandas as pd import numpy as np df = pd.read_csv('house-votes-84.data', header=None, names=['party','handicapped-infants','water-project', 'budget','physician-fee-freeze', 'el-salvador-aid', 'religious-groups','anti-satellite-ban', 'aid-to-contras','mx-missile','immigration', 'synfuels', 'education', 'right-to-sue','crime','duty-free', 'south-africa']) print(df.shape) df.head() df = df.replace({'?':np.NaN, 'n':0, 'y':1}) df.head() df.isnull().sum() rep = df[df.party == "republican"] print(rep.shape) rep.head() dem = df[df.party == "democrat"] print(rep.shape) dem.head() df.describe() rep.describe() dem.describe()2. Using hypothesis testing,find an issue that democrats support more than republicans with p < 0.01# Example 1 sample t-test from scipy.stats import ttest_1samp print(rep['aid-to-contras'].mean()) print(dem['aid-to-contras'].mean()) dem['aid-to-contras'].mean() dem['aid-to-contras'].value_counts() rep['aid-to-contras'].value_counts()The null hypothesis for the aid-to-contras bill is that the bill will pass. Therefor the correct parameter will be 1.The alternative hypothesis would be that the aid-to-contra bill is that the bill would not pass.Confidence level is 95%.The T.statistic is -7.354085178140069pvalue=0.004363402589282088# For now, 1-sample hyopthesis - republican support is 0 ttest_1samp(dem['aid-to-contras'], 1, nan_policy='omit')3.Using hypothesis testing, find an issue thatrepublicans support more than democrats with p < 0.01# Example 1 sample t-test from scipy.stats import ttest_1samp print(rep['el-salvador-aid'].mean()) print(dem['el-salvador-aid'].mean()) rep['el-salvador-aid'].mean() rep['el-salvador-aid'].value_counts() dem['el-salvador-aid'].value_counts()The null hypothesis for the el-salvador-aid bill is that the bill will pass. There for the correct parameter will be 1.The alternative hypothesis would be that the el-slavador-aid bill is that the bill would not pass.Confidence level is 95%.The T.statistic is -2.890pvalue=0.004363402589282088# For now, 1-sample hyopthesis - republican support is 0 ttest_1samp(rep['el-salvador-aid'], 1, nan_policy='omit') # For the last test, we're much closer to the null - fail to reject! # And now, like the homework - 2 sample # 2 sample compares the means of two samples, null is that they're the same # 1 sample compared a mean to a given null (above) ttest_ind(rep['el-salvador-aid'], dem['el-salvador-aid'], nan_policy='omit')4. Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference)dem.sum(axis = 0, skipna = True) rep.sum(axis = 0, skipna = True) # Example 1 sample t-test from scipy.stats import ttest_1samp print(dem['religious-groups'].mean()) print(rep['religious-groups'].mean()) rep['religious-groups'].mean() rep['religious-groups'].value_counts() dem['religious-groups'].value_counts()The null hypothesis for the religious-groups bill is that the bill will pass. There for the correct parameter will be 1.The alternative hypothesis would be that the el-slavador-aid bill is that the bill would not pass.Confidence level is 95%.The T.statistic is -2.890pvalue=0.004363402589282088# For now, 1-sample hyopthesis - republican support is 0 ttest_1samp(rep['religious-groups'], 1, nan_policy='omit') # For the last test, we're much closer to the null - fail to reject! # And now, like the homework - 2 sample # 2 sample compares the means of two samples, null is that they're the same # 1 sample compared a mean to a given null (above) ttest_ind(rep['religious-groups'], dem['religious-groups'], nan_policy='omit')6. Functions In the previous chapter we developped a small procedure to segment our image of nuclei. If you develop such a routine you are going to re-use it multiple times, so it makes sense to package it into a re-usable unit.We will summarize here how to achieve that in this brief chapter.#importing packages import numpy as np import matplotlib.pyplot as plt from skimage.external.tifffile import TiffFile import course_functions datapath = course_functions.define_data_path() import skimage.morphology as skm import skimage.filters as skf #load the image to process data = TiffFile(datapath+'Data/30567/30567.tif') image = data.pages[3].asarray() plt.imshow(image,cmap = 'gray') plt.show()Let us summarize all the necessary steps within one code block#median filter image_med = skf.rank.median(image,selem=np.ones((2,2))) #otsu thresholding image_local_threshold = skf.threshold_local(image_med,block_size=51) image_local = image > image_local_threshold #remove tiny features image_local_eroded = skm.binary_erosion(image_local, selem= skm.disk(1)) #label image image_labeled = label(image_local_eroded) #analyze regions our_regions = regionprops(image_labeled) #create a new mask with constraints on the regions to keep newimage = np.zeros(image.shape) #fill in using region coordinates for x in our_regions: if (x.area>200) and (x.eccentricity<0.8): newimage[x.coords[:,0],x.coords[:,1]] = 1/Users/gw18g940/miniconda3/envs/teaching_test/lib/python3.6/site-packages/skimage/filters/rank/generic.py:102: UserWarning: Bitdepth of 14 may result in bad rank filter performance due to large number of bins. "performance due to large number of bins." % bitdepth)We see that we have an error message. We didn't import the label() function from skimage. Let's add it to the code, as well as the regionprops() function that is missing as well.from skimage.measure import label, regionprops #median filter image_med = skf.rank.median(image,selem=np.ones((2,2))) #otsu thresholding image_local_threshold = skf.threshold_local(image_med,block_size=51) image_local = image > image_local_threshold #remove tiny features image_local_eroded = skm.binary_erosion(image_local, selem= skm.disk(1)) #label image image_labeled = label(image_local_eroded) #analyze regions our_regions = regionprops(image_labeled) #create a new mask with constraints on the regions to keep newimage = np.zeros(image.shape) #fill in using region coordinates for x in our_regions: if (x.area>200):# and (x.eccentricity<0.8): newimage[x.coords[:,0],x.coords[:,1]] = 1 plt.figure(figsize=(10,10)) plt.imshow(newimage, cmap = 'gray') plt.show()We can now make a function out of it. You can choose the "level" of your function depending on your needs. For example you could pass a filename and a plane index to the function and make it import your data, or you can pass directly an image.In addition to the image, you coud pass other arguments if you want to make your function more general. For example, you might not always want to filter objects of the same size or shape, and so you can set those as parameters:from skimage.measure import label, regionprops def detect_nuclei(image, size = 200, shape = 0.8): #median filter image_med = skf.rank.median(image,selem=np.ones((2,2))) #otsu thresholding image_local_threshold = skf.threshold_local(image_med,block_size=51) image_local = image > image_local_threshold #remove tiny features image_local_eroded = skm.binary_erosion(image_local, selem= skm.disk(1)) #label image image_labeled = label(image_local_eroded) #analyze regions our_regions = regionprops(image_labeled) #create a new mask with constraints on the regions to keep newimage = np.zeros(image.shape) #fill in using region coordinates for x in our_regions: if (x.area>size) and (x.eccentricityAnd now we can test the function (which appears also now in autocompletion):nuclei = detect_nuclei(image, size = 400) plt.imshow(nuclei, cmap = 'gray') plt.show()In order to avoid cluttering your notebooks with function definitions and to be able to reuse your functions across multiple notebooks, I also strongly advice you to create your own module files. Those are .py files that group multipe functions and that can be called from any notebook.Let's create one, call it my_module.py and copy our function in it. Now we can use the function like this:import my_module #or alternatively: from my_module import detect_nuclei my_module.detect_nuclei() nuclei2 = my_module.detect_nuclei(image)/Users/gw18g940/miniconda3/envs/teaching_test/lib/python3.6/site-packages/skimage/filters/rank/generic.py:102: UserWarning: Bitdepth of 14 may result in bad rank filter performance due to large number of bins. "performance due to large number of bins." % bitdepth) /Users/gw18g940/miniconda3/envs/teaching_test/lib/python3.6/site-packages/skimage/measure/_regionprops.py:250: UserWarning: regionprops and image moments (including moments, normalized moments, central moments, and inertia tensor) of 2D images will change from xy coordinates to rc coordinates in version 0.16. See http://scikit-image.org/docs/0.14.x/release_notes_and_installation.html#deprecations for details on how to avoid this message. warn(XY_TO_RC_DEPRECATION_MESSAGE) /Users/gw18g940/miniconda3/envs/teaching_test/lib/python3.6/site-packages/skimage/measure/_regionprops.py:260: UserWarning: regionprops and image moments (including moments, normalized moments, central moments, and inertia tensor) of 2D images will chang[...]We get an error because in that module, we use skimage functions that were not imported **in the module itself**. We have them in the notebook, but they are not accessible from there.plt.imshow(nuclei2, cmap = 'gray') plt.show()** ** Step 1: DBLP PARSER Designing a DBLP parser for gathering data from AI conferences. (https://dblp.org/xml/) Importsimport requests import pprint from bs4 import BeautifulSoup import pandas as pd from pandas import DataFrame1.1 Parse authors names Parsing URLIn this example the URL from the UAI 2019 conference is taken, but any conference URL from DBLP will work.URL = 'https://dblp.org/db/conf/uai/uai2019.html' page = requests.get(URL) soup = BeautifulSoup(page.content, 'html.parser')Extracting Titletitle = soup.title.string title = title.replace("dblp: ", "")Creating a list with authors names and their DBLP corresponding URLsauthor_names = soup.find_all('span', itemprop='author') names_list=[] for names in author_names: name = names['itemprop'].split("."), names.text url = names.a['href'] names_list.append([name[1], url])Creation of the dataframedf = DataFrame( names_list, columns = ['Authors', 'Url']) df['Conference'] = title df = df.drop_duplicates()1.2 Parse authors Affiliationimport time start_time = time.time() afilliation_list=[] count = 0 for url in df.Url: page_auth = requests.get(url) soup_authors = BeautifulSoup(page_auth.content, 'html.parser') author_affilitation = soup_authors.find('div', class_='hide-body') for names in author_affilitation: try: name = names.span['itemprop'].split("."), names.text name= name[1] name = name.replace("affiliation: ", "") afilliation_list.append([name, url]) count+=1 if count%50 == 0: print('Already parsed:',count,'/',len(df.Url), '--- %s minutes ---' %round((time.time() - start_time)/60, 2)) except: afilliation_list.append([ None, url]) count+=1 if count%50 ==0: print('Already parsed:',count,'/',len(df.Url), '--- %s minutes ---' %round((time.time() - start_time)/60, 2)) df_affiliation = DataFrame( afilliation_list, columns = ['Affiliation', 'Url']) df_affiliation[8:13] print('Total number of entries:', len(df_affiliation)) print('Number of Null Affiliations:', len(df_affiliation[df_affiliation['Affiliation'].isnull()]))Total number of entries: 417 Number of Null Affiliations: 346Merge DatAffiliationframesdf_complete = df.merge(df_affiliation, how='left', on='Url')#Your A1C test result (also known as HbA1c or glycated hemoglobin) can be a good general gauge of your diabetes control, # because it provides an average blood glucose level over the past few months. #Unlike daily blood glucose test results, which are reported as mg/dL, A1C is reported as a percentage. #This can make it difficult to understand the relationship between the two. For example, if you check blood #glucose 100 times in a month, and your average result is 190 mg/dL this would lead to an A1C of approximately 8.2%, #which is above the target of 7% or lower recommended by the American Diabetes Association (ADA) for many adults who are not pregnant. #For some people, a tighter goal of 6.5% may be appropriate, and for others, a less stringent goal such as 8% may be better. #Here's how it works: #Some blood sugar (or glucose) naturally attaches itself to A1C cells as they move through your bloodstream. When this happens, the cell is considered "glycated." #Once a cell has been glycated, it stays that way. And since each A1C cell has a lifespan of about 4 months, your A1C sample will include cells that are a few days, #a few weeks and a few months old. As a result, the test covers a span of about 2 to 3 months. #The more sugar in your blood, the higher the percentage of glycated A1C cells you'll have—that percentage is your A1C test result. #The Hemoglobin A1c (HbA1c or simply A1c for short) test is a blood test used to measure the average blood glucose concentration in your body in the past 1-3 months. #For diabetics, this is the standard way of determining how well the diabetes is controlled. An A1c of less than 7% is considered good. #If your A1c level is equal to or higher than 6.5%, you may be diagnosed as diabetic. If this is your result, book an appointment with your doctor immediately! #Prediabetes group (5.7-6.4%) - Thats a red flag. You're at a high risk of diabetes, but there is still hope. #Glucose in mmol/L: A1c = (2.59 + average_blood_glucose) / 1.59 #Glucose in mg/dL: A1c = (46.7 + average_blood_glucose) / 28.7 A1c = (46.7 + 110)/28.7 print(A1c) A1c = (2.57 + 110)/1.59 print(A1c) A1c(%) eAG (Everyday Average Glucose - MG/DL) 5 97 is an average of about (76 - 120) 6 126 is an average between (100 - 152) 7 154 is an average between (123 - 185) 8 183 is an average between (147 - 217) 9 212 is an average between (170 - 249) 10 240 is an average between (193 - 282) 11 269 is an average between (217 - 314) 12 298 is an average between (240 - 347) eAG = 28.7 * A1c - 45.7 ~ 29 pts per 1% DIASTOLIC SYSTOLIC BLOOD PRESSURE LEVEL ========================================================================== Diastolic 40 - 45 Systolic 70 - 79 ==============> LOW Diastolic 50 - 59 Systolic 80 - 89 ==============> LOW Diastolic 60 - 69 Systolic 90 - 99 ==============> LOW Diastolic 70 - 79 Systolic 100 - 109 ============> IDEAL Diastolic 80 - 89 Systolic 110 - 119 ============> IDEAL Diastolic 90 - 99 Systolic 120 - 129 ============> PRE-HIGH Diastolic 100 - 109 Systolic 130 - 139 ============> PRE-HIGH Diastolic 110 - 119 Systolic 140 - 149 ============> PRE-HIGH Diastolic 120 - 129 Systolic 150 - 159 ============> HIGH Diastolic 130 - 139 Systolic 160 - 169 ============> HIGH Diastolic 140 - 149 Systolic 170 - 179 ============> HIGH Diastolic 150 - 159 Systolic 180 - 189 ============> HIGH Diastolic 160 - 169 Systolic 190 - 000 ============> HIGH1. Crea una función en Python que calcule el mínimo común múltiplo de dos números.# Calculate the factors of a number def factorize(number): factors = [] for n in range(2, number+1): if (number % n) == 0: factors.append(n) return factors # Get factor repetition count for a number def factors_counts(number): factors = factorize(number) res = number fc = dict() for i, f in enumerate(factors): while res % f == 0: res = res / f fc[f]=fc.get(f, 0) + 1 return fc # Calculate least common multiple def lcm(num1, num2): fc1 = factors_counts(num1) fc2 = factors_counts(num2) common_factors = set(fc1.keys()) | set(fc2.keys()) lcm_value = 1 for cf in common_factors: count = max(fc1.get(cf, 0), fc2.get(cf, 0)) lcm_value *= cf**count return lcm_value # Well there seems to be an easier way ... def lcm_short(n1, n2): if n1 == n2: return n1 else: return n1 * n2 lcm(13, 22) lcm_short(13, 22)2. Crea una función en Python que calcule el máximo común divisor de dos números.# Greatest common dividor based on greatest common factor def gcd(num1, num2): f1 = factorize(num1) f2 = factorize(num2) common_factors = set(f1) & set(f2) # If we have no common factors return None if len(common_factors) == 0: return None else: return max(common_factors) gcd(24, 54)3. Basándote en datos aleatorios (los que quieras) crea un gráfico de tarta (pie chart) con Mapplotlib.%matplotlib inline from matplotlib import pyplot as plt import numpy as np N = 5 x = np.random.randint(1, 100, size=N) plt.pie(x) plt.show()4. Basándote en datos aleatorios, crea un Scatter Plot con Mapplotlib. Explica para qué pueden ser útiles estos gráficos.N = 100 x, y = np.random.rand(2, N) f = plt.scatter(x, y) f.axes.set_title('A scatter plot, perfect to visualize two dimensional data!') plt.show()5. Explica cómo combinar 4 gráficos en una única figura (subplot).Matplotlib has the concept of figures and axes. A single picture or image is a figure, but a figure can contain multiple axes. Using subplots one can create a grid of axes and include them in a single figure.# Create a helper function to genate titled scatter plots def scatter(ax, x, y, fig_title): ax.scatter(x, y) ax.set_title(fig_title) # Generate random data N = 100 naxes = 4 data = np.random.rand(naxes, 2, N) # use subplots to generate 4 axis for this figure f, axes = plt.subplots(nrows=naxes, ncols=1) for i in range(4): scatter(axes[i], data[i, 0], data[i, 1], 'Figure %02d' % i) plt.tight_layout() plt.show()Servers = time.time() w0, b0 = model_new.layers[0].get_weights() model_ = Sequential() for layer in model_new.layers[1:]: model_.add(layer) model_.build(input_shape = model_new.layers[0].output_shape) listw = [] for d in np.arange(w0.shape[-1]): w_reshape = w0[:,:,:,d].reshape(1,-1) w_plus = [np.random.rand() for i in range(w_reshape.shape[-1])] listw.append([w_reshape[0],w_plus]) w = np.array(listw) k = np.array([[np.random.rand(),np.random.rand()], [np.random.rand(),np.random.rand()]]) kd = np.linalg.inv(k) listKW = [] for i in np.arange(w.shape[0]): listKW.append(k.dot(w[i])) kw = np.array(listKW) print('\nTime: ',time.time() - s) kw.shapeClients = time.time() strd, kernel = 1, 3 listXX = [] for _x in np.arange(x_.shape[0]): listX = [] for h in np.arange(0,x_.shape[1],strd): list_width = [] for width in np.arange(0,x_.shape[2],strd): width_reshape = x_[_x,h:h+kernel,width:width+kernel,:].reshape(1,-1) x_plus = [np.random.rand() for i in range(width_reshape.shape[-1])] list_width.append([width_reshape[0], x_plus]) if width+kernel == x_.shape[2]: break listX.append(list_width) if h+kernel == x_.shape[1]: break listXX.append(listX) ex = np.array(listXX) listKWXX = [] for xx in np.arange(ex.shape[0]): listKWX = [] for h in np.arange(ex.shape[1]): listwi = [] for wi in np.arange(ex.shape[2]): lw = [] for ww_ in np.arange(kw.shape[0]): lw.append(kw[ww_].dot(ex[xx,h,wi].T)) listwi.append(lw) listKWX.append(listwi) listKWXX.append(listKWX) print('\nTime: ',time.time() - s) kwx = np.array(listKWXX) kwx.shapeTime: 0.09017586708068848Serverdef Relu(x): return np.maximum(x,0) s = time.time() oy = np.zeros(kwx.shape[0:4]) for xx in np.arange(oy.shape[0]): for height in np.arange(oy.shape[1]): for width in np.arange(oy.shape[2]): for dept in np.arange(oy.shape[3]): oy[xx,height,width,dept] = Relu(kd.dot(kwx[xx,height,width,dept])[0][0]+b0[dept]) print('Model dự đoán: ',model_.predict(oy).argmax()) print('\nTime: ',time.time() - s) s = time.time() print('Model dự đoán: ',model_.predict(oy).argmax()) print('\nTime: ',time.time() - s)Build Classification Modelimport pandas as pd cuisines_df = pd.read_csv("../data/cleaned_cuisines.csv") cuisines_df.head() cuisines_label_df = cuisines_df['cuisine'] cuisines_label_df.head() cuisines_feature_df = cuisines_df.drop(['Unnamed: 0', 'cuisine'], axis=1) cuisines_feature_df.head() from sklearn.model_selection import train_test_split, cross_val_score from sklearn.linear_model import LinearRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.metrics import accuracy_score, precision_score, classification_report, confusion_matrix, precision_recall_curve import numpy as np X_train, X_test, y_train, y_test = train_test_split(cuisines_feature_df, cuisines_label_df, test_size= 0.3, random_state= 0) C = 10 classifiers = { 'Linear SVC': SVC(kernel='linear', C = C, probability=True , random_state=0), 'K Neighbours': KNeighborsClassifier(n_neighbors=C), 'SVC': SVC(), 'Random Forest': RandomForestClassifier(n_estimators=100), 'Ada Boost': AdaBoostClassifier(n_estimators=100) } for index, (name, classifier) in enumerate(classifiers.items()): classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print('Accuracy for %s model is %0.1f%%' % (name, accuracy*100)) print('classification Report', classification_report(y_test, y_pred))Accuracy for Linear SVC model is 78.1% classification Report precision recall f1-score support chinese 0.68 0.73 0.70 245 indian 0.84 0.90 0.87 233 japanese 0.77 0.73 0.75 249 korean 0.84 0.74 0.78 222 thai 0.80 0.80 0.80 250 accuracy 0.78 1199 macro avg 0.78 0.78 0.78 1199 weighted avg 0.78 0.78 0.78 1199 Accuracy for K Neighbours model is 73.6% classification Report precision recall f1-score support chinese 0.67 0.67 0.67 245 indian 0.82 0.85 0.83 233 japanese 0.65 0.83 0.73 249 korean 0.90 0.53 0.66 222 thai 0.75 0.80 0.77 250 accuracy 0.74 1199 macr[...]Code for part 1: Data retrieval systemimport json import requests def get_top_stories(top=10): sess = requests.Session() url = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' html = sess.get(url) ids = json.loads(html.content.decode('utf-8')) ids = ids[:top] return ids ids = get_top_stories(top=10) ids def get_item_dict(ids): item_dict = {} sess = requests.Session() for item in ids: url = 'https://hacker-news.firebaseio.com/v0/item/{}.json?print=pretty'.format(item) html = sess.get(url) item_data = json.loads(html.content.decode('utf-8')) item_dict[item] = item_data return item_dict item_dict = get_item_dict(ids) item_dict def process_info(item_dict): titles = [] for key in item_dict.keys(): titles.append(item_dict[key].get('title')) item_info = "... ".join([x for x in titles]) return item_info process_info(item_dict) def get_headlines(): top_stories_ids = get_top_stories() item_dict = get_item_dict(top_stories_ids) data = process_info(item_dict) return data get_headlines()GRU Sentiment Classifier Classifying IMDB reviews by their sentiment with a *GRU*. Load dependenciesimport keras from keras.datasets import imdb from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Embedding, Dense, Flatten, Dropout, SpatialDropout1D from keras.layers import GRU from keras.callbacks import ModelCheckpoint import os from sklearn.metrics import roc_auc_score import matplotlib.pyplot as plt %matplotlib inlinec:\users\siddh\appdata\local\programs\python\python35\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters Using TensorFlow backend.Set Hyperparameteroutput_dir = './model_output/gru' epochs = 4 batch_size = 128 n_dim = 64 n_unique_words = 10000 max_review_length = 100 pad_type = trunc_type = 'pre' drop_embed = 0.2 n_gru = 256 dropout_gru = 0.2Load data(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words)Preprocess datax_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0) x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)Design NN Architecturemodel = Sequential() model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length)) model.add(SpatialDropout1D(drop_embed)) model.add(GRU(n_gru, dropout=dropout_gru)) model.add(Dense(1, activation='sigmoid')) model.summary()_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_1 (Embedding) (None, 100, 64) 640000 _________________________________________________________________ spatial_dropout1d_1 (Spatial (None, 100, 64) 0 _________________________________________________________________ gru_1 (GRU) (None, 256) 246528 _________________________________________________________________ dense_1 (Dense) (None, 1) 257 ================================================================= Total params: 886,785 Trainable params: 886,785 Non-trainable params: 0 _________________________________________________________________configure modelmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5") if not os.path.exists(output_dir): os.makedirs(output_dir)Train!model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint])Train on 25000 samples, validate on 25000 samples Epoch 1/4 25000/25000 [==============================] - 40s 2ms/step - loss: 0.5011 - acc: 0.7486 - val_loss: 0.3536 - val_acc: 0.8433 Epoch 2/4 25000/25000 [==============================] - 30s 1ms/step - loss: 0.2986 - acc: 0.8739 - val_loss: 0.3411 - val_acc: 0.8517 Epoch 3/4 25000/25000 [==============================] - 30s 1ms/step - loss: 0.2401 - acc: 0.9047 - val_loss: 0.3585 - val_acc: 0.8417 Epoch 4/4 25000/25000 [==============================] - 30s 1ms/step - loss: 0.2036 - acc: 0.9218 - val_loss: 0.3707 - val_acc: 0.8391Evaluatemodel.load_weights(output_dir+'/weights.01.hdf5') y_hat = model.predict_proba(x_valid) y_hat[0] plt.hist(y_hat) _ = plt.axvline(x=0.5, color='orange') pct_auc = roc_auc_score(y_valid, y_hat)*100.0 "{:0.2f}".format(pct_auc)Template (Place Useful Header Here) This template file serves as our "portable library." Whenever you want to run a simulation, just make a copy of this file and adjust it to your needs in order to run an experiment. The structure is as follows:1. Imports at top2. Your Experiment in Middle3. All Functions in function_list hosted together at bottomNote: Be sure to actually run the notebook before working on your own experiments to have access to the functions. Oh, and in the future remove this note and add description of experiment, etc., etc.# these are all the imports needed across all experiments... some may be unnecessary for any given test import numpy as np import math import datetime import random import matplotlib.pyplot as plt import matplotlib from qiskit import IBMQ, execute, Aer, QuantumCircuit, transpile, schedule as build_schedule, assemble from qiskit import BasicAer from qiskit.visualization import plot_histogram from qiskit.providers.aer.noise import NoiseModel from qiskit.pulse import Schedule, Play, DriveChannel, SamplePulse, ConstantPulse, Delay from qiskit.pulse import InstructionScheduleMap from qiskit.tools.monitor import job_monitor %config InlineBackend.figure_format = 'svg' # makes the figures look nice matplotlib.rcParams['text.usetex'] = False %matplotlib inlinePlace your experiment runs here!!! The functions I wrote are broken up into three sections, but since this update strategy sucks, just look at function_list to see them organized better.# some useful 'globals' or future commands to use backend_list = ['ibmq_armonk', 'ibmq_ourense'] # inst_map = backend.defaults().instruction_schedule_map def load_backend(name, info_dump=True): """ Loads backend called (name). """ provider = IBMQ.load_account() backend = provider.get_backend(name) now = datetime.datetime.now() if info_dump is True: print("Backend loaded at date/time : ") print(now.strftime("%Y-%m-%d %H:%M:%S")) print("---------------------------------") print("Backend Configuration Information") print("---------------------------------") print(backend.configuration()) print("Backend Properties Information") print(backend.properties()) return backend def load_noisemodel(backend): """ Given a backend, loads in the necessary information to run a noisy simulation emulating noise on actual device. """ # set-up noise model for simulator noise_model = NoiseModel.from_backend(backend) # Get coupling map from backend coupling_map = backend.configuration().coupling_map # Get basis gates from noise model basis_gates = noise_model.basis_gates noise_info = {'noise_model': noise_model, 'coupling_map': coupling_map, 'basis_gates': basis_gates} return noise_info def get_max_runs(backend): """ Given the backend, returns the max # experiments and max # shots can be queued in a single job. """ max_experiments = backend.configuration().max_experiments max_shots = backend.configuration().max_shots max_runs = {'max_experiments': max_experiments, 'max_shots': max_shots} return max_runs def frev(circ, qubits, num_ids=1): """ Appends free evolution to [qubits] over [n] identity gates. Inputs: * qubits -- list, qubits to add identities to * num_ids -- int, number of identities to append Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 0 for i in range(num_ids): ch_depth += 1 circ.barrier(qubits) circ.id(qubits) # create channel tag ch_tag = 'frev_{}'.format(num_ids) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def xy4(circ, qubits, num_rep=1, num_ids=0): """ Appends standard xy4 sequence to [qubits] for [num_rep] times with [num_ids] in between each DD pulse gate. Inputs: * qubits -- list, qubits to append to * num_rep -- int, desired number of times to repeat xy4 sequence * num_ids -- int, number of identities to pad in between DD pulses Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 0 for i in range(num_rep): # add Y gate ch_depth += 1 circ.barrier(qubits) circ.y(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) circ.x(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Y gate ch_depth += 1 circ.barrier(qubits) circ.y(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) circ.x(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add channel tag ch_tag = 'xy4_{}r_{}i'.format(num_rep, num_ids) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def rga2x(circ, qubits, num_rep=1, num_ids=0): """ Appends RGA2 sequence to [qubits] for [num_rep] times with [num_ids] in between each DD pulse gate. Sequence: Xb X NOTE: Xb means X-bar which is pi phase flip on all axes of Y gate. Inputs: * qubits -- list, qubits to append to * num_rep -- int, desired number of times to repeat xy4 sequence * num_ids -- int, number of identities to pad in between DD pulses Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 0 for i in range(num_rep): # add Xb gate ch_depth += 1 circ.barrier(qubits) add_xb(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) circ.x(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add channel tag ch_tag = 'rga2x_{}r_{}i'.format(num_rep, num_ids) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def rga2y(circ, qubits, num_rep=1, num_ids=0): """ Appends RGA2 sequence to [qubits] for [num_rep] times with [num_ids] in between each DD pulse gate. Sequence: Yb Y NOTE: Xb means X-bar which is pi phase flip on all axes of Y gate. Inputs: * qubits -- list, qubits to append to * num_rep -- int, desired number of times to repeat xy4 sequence * num_ids -- int, number of identities to pad in between DD pulses Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 0 for i in range(num_rep): # add Yb gate ch_depth += 1 circ.barrier(qubits) add_yb(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Y gate ch_depth += 1 circ.barrier(qubits) circ.y(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add channel tag ch_tag = 'rga2y_{}r_{}i'.format(num_rep, num_ids) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def rga2z(circ, qubits, num_rep=1, num_ids=0): """ Appends RGA2z sequence to [qubits] for [num_rep] times with [num_ids] in between each DD pulse gate. Sequence: Zb Z NOTE: Xb means X-bar which is pi phase flip on all axes of Y gate. Inputs: * qubits -- list, qubits to append to * num_rep -- int, desired number of times to repeat xy4 sequence * num_ids -- int, number of identities to pad in between DD pulses Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 0 for i in range(num_rep): # add zb gate ch_depth += 1 circ.barrier(qubits) add_zb(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add z gate ch_depth += 1 circ.barrier(qubits) circ.z(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add channel tag ch_tag = 'rga2z_{}r_{}i'.format(num_rep, num_ids) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def rga4(circ, qubits, num_rep=1, num_ids=0): """ Appends RGA4 sequence to [qubits] for [num_rep] times with [num_ids] in between each DD pulse gate. NOTE: We choose P2 = X and P1 = Y for no particular reason. NOTE: Xb means X-bar which is pi phase flip on all axes of Y gate. Inputs: * qubits -- list, qubits to append to * num_rep -- int, desired number of times to repeat xy4 sequence * num_ids -- int, number of identities to pad in between DD pulses Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 0 for i in range(num_rep): # add Xb gate ch_depth += 1 circ.barrier(qubits) add_xb(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Y gate ch_depth += 1 circ.barrier(qubits) circ.y(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Xb gate ch_depth += 1 circ.barrier(qubits) add_xb(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Y gate ch_depth += 1 circ.barrier(qubits) circ.y(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add channel tag ch_tag = 'rga4_{}r_{}i'.format(num_rep, num_ids) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def rga8c(circ, qubits, num_rep=1, num_ids=0): """ Appends RGA8c sequence to [qubits] for [num_rep] times with [num_ids] in between each DD pulse gate. NOTE: We choose P2 = X and P1 = Y for no particular reason. NOTE: Xb means X-bar which is pi phase flip on all axes of Y gate. That is -- RGA8c: Y X Y X X Y X Y Inputs: * qubits -- list, qubits to append to * num_rep -- int, desired number of times to repeat xy4 sequence * num_ids -- int, number of identities to pad in between DD pulses Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 0 for i in range(num_rep): # add Y gate ch_depth += 1 circ.barrier(qubits) add_y(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) circ.x(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Y gate ch_depth += 1 circ.barrier(qubits) add_y(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) circ.x(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) circ.x(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Y gate ch_depth += 1 circ.barrier(qubits) circ.y(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) circ.x(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Y gate ch_depth += 1 circ.barrier(qubits) circ.y(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add channel tag ch_tag = 'rga8c_{}r_{}i'.format(num_rep, num_ids) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def prep_theta_state(circ, qubits, theta=0): """ Appends u3(theta, 0, 0) gate to [qubits] in order to prepare each qubit in 1 / sqrt(2) * (cos(theta/2)|0> + sin(theta/2)|1>) state. Inputs: * qubits -- list, qubits to append to * theta -- float, superposition theta angle Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 1 circ.u3(theta, 0, 0, qubits) circ.barrier(qubits) # add channel tag ch_tag = 'ptheta_{}'.format(theta) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def decode_theta_state(circ, qubits, theta=0): """ Appends u3(-theta, 0, 0) gate to [qubits] in order to decode qubits in 1 / sqrt(2) * (cos(theta/2)|0> + sin(theta/2)|1>) state back to |0> state. Inputs: * qubits -- list, qubits to append to * theta -- float, superposition theta angle Output: * ch_depth -- int, depth added to circuit """ ch_depth = 1 circ.barrier(qubits) circ.u3(-1*theta, 0, 0, qubits) circ.barrier(qubits) # add channel tag ch_tag = 'dtheta_{}'.format(theta) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def xz4(circ, qubits, num_rep=1, num_ids=0): """ Appends xz4 sequence to [qubits] for [num_rep] times with [num_ids] in between each DD pulse gate. Inputs: * qubits -- list, qubits to append to * num_rep -- int, desired number of times to repeat xy4 sequence * num_ids -- int, number of identities to pad in between DD pulses Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 0 for i in range(num_rep): # add Z gate ch_depth += 1 circ.barrier(qubits) circ.z(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) circ.x(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Z gate ch_depth += 1 circ.barrier(qubits) circ.z(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) circ.x(qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add channel tag ch_tag = 'xz4_{}r_{}i'.format(num_rep, num_ids) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} def native_xy4(circ, qubits, num_rep=1, num_ids=0): """ Appends standard xy4 sequence to [qubits] for [num_rep] times with [num_ids] in between each DD pulse gate using native gates. Inputs: * qubits -- list, qubits to append to * num_rep -- int, desired number of times to repeat xy4 sequence * num_ids -- int, number of identities to pad in between DD pulses Output: * {'ch_tag': ch_tag, 'ch_depth': ch_depth) --> ch_tag -- str, channel tag with identifies this evolution --> ch_depth -- int, depth added to circuit with this channel """ ch_depth = 0 for i in range(num_rep): # add Y gate ch_depth += 1 circ.barrier(qubits) add_y(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) add_x(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add Y gate ch_depth += 1 circ.barrier(qubits) add_y(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add X gate ch_depth += 1 circ.barrier(qubits) add_x(circ, qubits) # add [num_ids] I gates ch_depth += frev(circ, qubits, num_ids)['ch_depth'] # add channel tag ch_tag = 'n_xy4_{}r_{}i'.format(num_rep, num_ids) return {'ch_tag': ch_tag, 'ch_depth': ch_depth} # Adding functions which implement all the gates we need in terms of the # IBM native gates. These were done with armonk as of 1 May 2020 using # transpile(circ, backend) for some basic gates. def add_x(circ, qubits): """ Appends x gate to circuit on [qubits] in terms of native u3 gate. """ circ.u3(np.pi, 0, np.pi, qubits) return def add_y(circ, qubits): """ Appends y gate to circuit on [qubits] in terms of native u3 gate. """ circ.u3(np.pi, np.pi/2, np.pi/2, qubits) return def add_z(circ, qubits): """ Appends z gate to circuit on [qubits] in terms of native u1 gate. """ #NOTE: u1(pi) = u3(0, pi, 0) circ.u1(np.pi, qubits) return def add_xb(circ, qubits): """ Appends x bar to circ on [qubits] in terms of native gates. xbar comes from RGAn sequences and is pi rotation of x around all axes. """ #NOTE: u1(pi) = u3(0, pi, 0) = z circ.u1(np.pi, qubits) return def add_yb(circ, qubits): """ Appends y bar to circ on [qubits] in terms of native gates. ybar comes from RGAn sequences and is pi rotation of y around all axes. """ #NOTE: u1(3pi) = u1(pi) = z circ.u1(3*np.pi, qubits) return def add_zb(circ, qubits): """ Appends z bar to circ on [qubits] in terms of native gates. zbar comes from RGAx sequences and is pi rotation of z around all axes. """ #NOTE: u3(pi, 0, pi) = x circ.u3(np.pi, 0, np.pi, qubits) return def add_H(circ, qubits): """ Appends hadamard to circ on [qubits] in terms of native u2 gate. """ #NOTE: u2(0, pi) = u3(pi/2, 0, pi) circ.u2(0, np.pi, qubits) return def submit_job(experiments, backend, qobj_id, num_shots='max'): """ Submit [experiments] to [backend] and run [num_shots] for each of the [experiments]. Results get a qobj_id tag labelled with [qobj_id]. """ # set runs to max runs allowed by hardware if set to do so max_runs = get_max_runs(backend) if str(num_shots).lower() == 'max': num_shots = max_runs['max_shots'] # submit the job in the background and output status information program = assemble(experiments, backend=backend, shots=num_shots, qobj_id=qobj_id) job = backend.run(program) print("job id: {}".format(job.job_id())) print(job_monitor(job)) return job.result(timeout=120) def submit_test(experiments, backend, qobj_id, num_shots = 1000): """ Submits [experiments] to simulator with noisemodel of [backend] and runs [num_shots] for each """ # set up noise model of backend noise_info = load_noisemodel(backend) coupling_map = noise_info['coupling_map'] basis_gates = noise_info['basis_gates'] noise_model = noise_info['noise_model'] job = execute(experiments, Aer.get_backend('qasm_simulator'), coupling_map=coupling_map, basis_gates=basis_gates, noise_model=noise_model, shots=num_shots, qobj_id=qobj_id) return job.result() def combine_experiments(results): """ Combines the results over all experiments in a submitted job. """ counts = results.get_counts() # results of all experiments totals = {} # data to combine all results # if only one experiment, qiskit resturns dict which is already what we want if type(counts) is dict: totals = counts # otherwise, need to iterate through experiments and create one single dictionary elif type(counts) is list: for ic in counts: print(ic) # iterate over key/value pairs in results of ith count for key, value in ic.items(): if key in totals: totals[key] += value else: totals[key] = value return totals def save_raw_data(result): """ Saves data from [result] into a raw data file named after the unique job id. """ # save the raw data fname = (result.job_id + ".txt") dict_result = result.to_dict() with open(fname, 'w') as f: print(dict_result, file=f) print("Successfully saved raw data to file: {}".format(fname)) return fname def save_combined_experiments_data(result): """ Saves data from [result] which combines all experiments into one data set named after job id with tag 'comb_sum.' """ # create summary of data through counts and save summary file fname = (result.job_id + "_comb_sum.txt") totals = combine_experiments(result) with open(fname, 'w') as f: print(totals, file=f) print("Successfully saved combined experiments data to file: {}".format(fname)) return fname def save_dict_data(data, fname): """ Saves [data] which is assumed to be a dictionary. """ with open(fname, 'w') as f: print(data, file=f) print("Successfully saved combined experiments data to file: {}".format(fname)) return def read_dict(fname): """ Reads data file formatted as dictionary back into python as a dictionary. NOTE: This uses eval()--which is NOT a secure option, but this is default python so requires no imports and will be necessary since Bruno cannot run on local envrionment. Just don't use this in your own codes too often... """ with open(fname, 'r') as f: data_dict = eval(f.read()) return data_dict def hex_to_bin(hexstr): """ Converts a hex string into binary string. """ binstr = "{0:08b}".format(int(hexstr, 16)) return binstr def hex_dict_to_bin_dict(hexdict): """ Converts a dictionary with hexadecimal (str) keys into binary (str) keys. """ return {hex_to_bin(key): hexdict[key] for key in hexdict} def count_0s(istr): """ Counts the number of 0s in [istr] and returns that integer. """ num_0s = 0 for s in istr: if s == '0': num_0s += 1 return num_0s def dict_data_to_n0_array(data_dict): """ Converts a dictionary of the form {'101': 2, '000': 1} --> [101, 101, 000] --> [1, 1, 3] where last list specifies how many zeros each bitstring has. (We don't actually compute second list, but helps to see it this way, I think.) """ # populate data_list with relative frequencies from dict data_list = [] for key, value in data_dict.items(): for i in range(value): # get number of zeros in this string and append to list data_list.append(count_0s(key)) return np.array(data_list) # my old bootstrapping code def bootstrapci(data, n=1000, func=np.mean, ci=.95): """ Generate `n` bootstrap samples, evaluating `func` at each resampling. Then computes error bar with 'ci' confidence interval on data. """ simulations = np.zeros(n) sample_size = len(data) xbar = func(data) for c in range(n): itersample = np.random.choice(data, size=sample_size, replace=True) simulations[c] = func(itersample) diff = np.sort(simulations - xbar) lefterr_idx = int(np.floor(ci*n)) righterr_idx = int(np.ceil((1 - ci)*n)) conf_int = (xbar - diff[lefterr_idx], xbar - diff[righterr_idx]) return conf_int def per_err(mu, errbar): """ Computes percent error with respect to mean. In particular, we calculate difference between (mu - errbar[0]) / mu and (mu - errbar[1]) / mu and takes the larger of the two in abs value then just multiply by 100. """ diff1 = abs(mu - errbar[0]) diff2 = abs(mu - errbar[1]) perdif1 = (diff1 / mu) * 100 perdif2 = (diff2 / mu) * 100 if perdif1 > perdif2: return perdif1 else: return perdif2 def calc_exper_fid(exper): """ Given an single experiment's results from IBMQ, calculates the fidelity and computes bootstrapped error bars. By "fidelity" we mean the total number of zeros--that is, we assume experiment is designed to output 0 at the end. (If you encode state--decode it!) For example, if 3 qubit experiment with 10 shots gives results {'101': 3, '000': 5, '111': 2}, then the total number of zeros is 3(1) + 5(3) + 2(0) = 18, and fid = 18 / (10 * 3). Input ---------------- exper -- "results" of IBMQ experiemnt run, i.e. if you run result = submit_job(...).to_dict(), then this expects results['results'][j], i.e. the results of jth experiment. tol -- float, desired tolerance for data (i.e. 0.05 is 5% abs err mag tolerance) Output ---------------- fid -- tuple, (fidelity, -.95ci, +.95ci, p_err) -(+).95ci is lower(upper) bound of 95% confidence interval via bootstrapping and err_mag is magnitude of error w.r.t. mean. Also prints if abs mag of error is less than input tol. """ # results have hex keys--convert them to binary keys first counts = hex_dict_to_bin_dict(exper['data']['counts']) # get arbitary key of counts dict to counts number of qubits (i.e. length of bitstring) key0 = list(counts.keys())[0] n_qubits = len(key0) # obtain the number 0s from each shot of experiment and create a (degenerate) array # That is, if '001' shows up 50 times, populate array with 50 instances of 2 num_0s = dict_data_to_n0_array(counts) # turn num0s to fideltiy by dividing by number of qubits fids = num_0s / n_qubits # calculate mean mean_fid = np.mean(fids) # calculate confidence interval with 1000 bootstrap samples and .95 confidence interval ci = bootstrapci(fids, 1000, np.mean, .95) # calculate percent error p_err = per_err(mean_fid, ci) return (mean_fid, ci[0], ci[1], p_err) def theta_wrangle(result_dict): """ wrangles thata data for real this time """ # get global info about all experiments in this result (i.e. type of experiment run) batch_name = result_dict['qobj_id'] job_id = result_dict['job_id'] # iterate over the experimental results in results dict thetas = [] fids = [] l_ci = [] u_ci = [] p_errs = [] for exper in result_dict['results']: # extract theta value from experiment label label = exper['header']['name'] x = 'theta_' try: theta = label[label.find(x)+len(x):] except ValueError: raise ValueError("theta tag is not present in circuit names") thetas.append(float(theta)) # get statistics on data stats = calc_exper_fid(exper) # append stats to relevant lists fids.append(stats[0]) l_ci.append(stats[1]) u_ci.append(stats[2]) p_errs.append(stats[3]) data = (thetas, fids, l_ci, u_ci, p_errs) # save summary of data to file with name as (job_id)_(sim_tag)_(theta_sweep).txt fname = job_id + "_" + batch_name + "_" + "theta_sweep" with open(fname + '.txt', 'w') as f: header = "theta, fidelity, lower confidence internval, upper ci, percent error\n" f.write(header) for i in range(len(thetas)): line = str(thetas[i]) + "," + str(fids[i]) + "," + str(l_ci[i])\ + "," + str(u_ci[i]) + "," + str(p_errs[i]) + "\n" f.write(line) print("Successfully saved fid_data to file: {}".format(fname + '.txt')) # plot the data with error bars title = job_id + '_' + batch_name + '_theta_sweep' plt = plot_theta_sweep(data, title) return data def plot_theta_sweep(data, title=''): """ plots theta sweep given data as tuple of lists """ # unpack data thetas, fids, l_ci, u_ci, p_errs = data # convert to arrays for easier manipulation thetas = np.array(thetas) fids = np.array(fids) l_ci= np.array(l_ci) u_ci = np.array(u_ci) p_errs = np.array(p_errs) # get difference between upper confidence internval and mean value for errbar formatting fid_up = u_ci - fids fid_low = fids - l_ci # make thetas into more readable format thetas = thetas / np.pi # finally plot it and add labels plt.errorbar(thetas, fids, yerr=(fid_low, fid_up)) plt.xlabel('theta / pi') plt.ylabel('fidelity') plt.title(title) return pltEmbedding generator for textGoal: Generating embedding vectors for the content of messages from Jeopardy game data log using transformers model for sentences. Importsfrom __future__ import division, print_function, absolute_import, unicode_literals import io import numpy as np import matplotlib.pyplot as plt from google.colab import files import pandas as pd import pickle as pk from time import time from typing import Text import tensorflow as tf pip install -U sentence-transformers from sentence_transformers import SentenceTransformer model = SentenceTransformer('roberta-base-nli-mean-tokens') # model = SentenceTransformer('bert-base-nli-mean-tokens') # model = SentenceTransformer('bert-large-nli-mean-tokens')100%|██████████| 459M/459M [00:06<00:00, 65.8MB/s]Helper functionsclass Timer(): def __init__(self, message: Text = None): if message: self.message = message else: self.message = 'It took {elapsed_time:.2f} {unit}.' def __enter__(self): self.start = time() return None def __exit__(self, type, value, traceback): elapsed_time = time() - self.start if elapsed_time < 60: unit = 'seconds' elif elapsed_time < 3600: unit = 'minutes' elapsed_time /= 60.0 else: unit = 'hours' elapsed_time /= 3600.0 print( self.message.format(elapsed_time=elapsed_time, unit=unit))Loading the datauploaded = files.upload() data = pk.load(io.BytesIO(uploaded['Teams_contents.pk'])) len(data)Generating embeddings bodywith Timer(): embeddings = {} for team_id, contents in data.items(): print('Team', team_id, '...') embeddings[team_id] = [] for content in contents: emb = np.array(model.encode(content)) embeddings[team_id].append(emb) len(embeddings) with open('content_embeddings_with_roberta_base.pk', 'wb') as handle: pk.dump(embeddings, handle, protocol=pk.HIGHEST_PROTOCOL) !ls files.download('content_embeddings_with_roberta_base.pk')Testing networks with fast sign perturbated samplesNow that the networks have been trained, we are going to test them over perturbated samples. One of the most simple method to generate adversarial perturbations is the so called _Fast Sign Method_ presented in [1]. The idea is quite simple: Compute the gradient of the loss w.r.t. the input for a fixed input, and keep the sign of it:$$ x_{pert} = x + \epsilon sign(\nabla_x L(x,y,\theta))$$In what follows, we are going to test our previously trained network on such perturbated images for different values of $\epsilon$. Bibliography+ [1] ., ., & . (2015). Explaining and Harnessing Adversarial Examples. Iclr 2015, 1–11. We import librairiesimport sys, os sys.path.append(os.getcwd()+'/../src') sys.path.append(os.getcwd()+'/../data') import numpy import matplotlib.pyplot as plt %matplotlib inline from IPython.display import clear_output import architectures import cifar10 import utils import perturbationsWe import the dataX_test, y_test_lab = cifar10.load_cifar(dataset='testing', path=os.getcwd()+'/../data/cifar') X_test = numpy.reshape(X_test, [X_test.shape[0], -1]) X_test = X_test/255. X_test = X_test.astype(numpy.float32) y_test = utils.labels_to_categoricals(y_test_lab)We load networksstab_net = architectures.CifarNet(path_to_logs=os.getcwd()+'/Logs') stab_net.load('Networks/StabilityTuned1') class_net = architectures.CifarNet(path_to_logs=os.getcwd()+'/Logs') class_net.load('Networks/RegularTuned') class_acc = class_net.test(X_test, y_test) print("Classicly trained network accuracy: %f"%class_acc) stab_acc = stab_net.test(X_test, y_test) print("Stability trained network accuracy: %f"%stab_acc)Classicly trained network accuracy: 0.787700 Stability trained network accuracy: 0.788500Gradient Computation We compute the gradient of the loss w.r.t. input for each input samplestab_gradient = perturbations.compute_fs_grad_sym(stab_net, X_test, y_test) numpy.save('Gradients/stabgrad',stab_gradient) class_gradient = perturbations.compute_fs_grad_sym(class_net, X_test, y_test) numpy.save('Gradients/classgrad',class_gradient)We load gradient from diskstab_gradient = numpy.load('Gradients/stabgrad.npy') class_gradient = numpy.load('Gradients/classgrad.npy')How does perturbed images looks for $\epsilon=0.04$X_stab = X_test + 0.04*numpy.sign(stab_gradient) X_stab /= X_stab.max() i = 10 plt.imshow(numpy.reshape(X_stab[i], [32,32,3])) plt.show() plt.imshow(numpy.reshape(X_test[i], [32,32,3]))Accuracy Computation for fixed perturbationWe compute accuracy of each for $\epsilon=0.001$eps = 0.001 X_stab = X_test + eps*numpy.sign(stab_gradient) X_stab /= X_stab.max() X_class = X_test + eps*numpy.sign(class_gradient) X_class /= X_class.max() class_acc = class_net.test(X_class, y_test) print("Classicly trained network accuracy: %f"%class_acc) stab_acc = stab_net.test(X_stab, y_test) print("Stability trained network accuracy: %f"%stab_acc)Classicly trained network accuracy: 0.757200 Stability trained network accuracy: 0.756400Performances are slightly lower for stabilized network... Accuracy curve for varying $\epsilon$We compute accuracies for different values of $\epsilon$epsilons = numpy.linspace(0.0001, 0.1, 20) accuracies = numpy.zeros([2,epsilons.shape[0]]) for epsiter in range(0,epsilons.shape[0]): X_stab = X_test.copy() + epsilons[epsiter]*numpy.sign(stab_gradient) X_stab /= X_stab.max() X_class = X_test.copy() + epsilons[epsiter]*numpy.sign(class_gradient) X_class /= X_class.max() accuracies[0,epsiter] = class_net.test(X_class, y_test) accuracies[1,epsiter] = stab_net.test(X_stab, y_test) plt.plot(epsilons,accuracies[0], label='Classicly Trained') plt.plot(epsilons,accuracies[1], label='Stability Trained') plt.legend() plt.savefig('Figures/accuracy_curv.pdf')Again, stabilized network gives the same performances as classic network Top-n Accuracies curve for fixed $\epsilon$We compute the __top-n__ accuracies for $n\in [1,10]$%%time top_ns = numpy.arange(1,11) accuracies = numpy.zeros([2, top_ns.shape[0]]) eps = 0.01 X_stab = X_test.copy() + eps*numpy.sign(stab_gradient) X_stab /= X_stab.max() X_class = X_test.copy() + eps*numpy.sign(class_gradient) X_class /= X_class.max() for itr in range(0,top_ns.shape[0]): accuracies[0,itr] = class_net.test(X_class, y_test, top=top_ns[itr]) accuracies[1,itr] = stab_net.test(X_stab, y_test, top=top_ns[itr]) plt.plot(top_ns,accuracies[0], label='Classicly Trained') plt.plot(top_ns,accuracies[1], label='Stability Trained') plt.legend(loc=4) plt.savefig('Figures/top_n.pdf')CPU times: user 5min 12s, sys: 59.6 s, total: 6min 11s Wall time: 3min 44sGetting IDs of Enabled Usersuser_list=UserGroup(connection=conn, id='UserGroup ID').list_members() enabled_users=[u["id"] for u in user_list if u["enabled"]] print(enabled_users) subs_mngr = SubscriptionManager(conn, project_name='MicroStrategy Tutorial') for sub in subs_mngr.list_subscriptions(to_dictionary=False): print(sub.id, sub.owner['id'], sub.owner['name']) #print(sub.__dict__) for sub in subs_mngr.list_subscriptions(to_dictionary=False): if sub.owner['id'] not in enabled_users: print(sub.id, sub.owner['id'], sub.owner['name']) sub.delete(force=True)Create an orphan subscriptionfrom mstrio.distribution_services import EmailSubscription, Content from datetime import datetime project_id = 'Insert Project ID' recipient_ids = ["Insert Recipient ID"] content_id = 'Insert Content ID' schedule_id = 'Insert Schedule ID' owner_id = 'Insert Owner ID' subscription_name = 'REST_API_'+datetime.now().strftime("%Y-%m-%d__%H-%M") subject_txt='Email Subject' message_txt="Message Text" EmailSubscription.create(connection=conn, name=subscription_name, owner_id=owner_id, project_id=project_id, send_now = True, contents=[Content(id=content_id, type='report', name='Report 1', personalization=Content.Properties(format_type='EXCEL'))], schedules_ids=[schedule_id], recipients=recipient_ids, email_subject=subject_txt, email_message=message_txt, email_send_content_as="data") for sub in subs_mngr.list_subscriptions(to_dictionary=False, owner={'id':'Insert Owner ID'}): print(sub.id, sub.owner['id'], sub.owner['name'])Getting started with anndata **Authors:** [](https://twitter.com/adamgayoso), [](https://twitter.com/falexwolf) **Note**This tutorial is based on a blog posts by [Adam in 2021](https://adamgayoso.com/posts/ten_min_to_adata/) and [Alex in 2017](https://falexwolf.me/2017/introducing-anndata/). In this tutorial, we introduce basic properties of the central object, [AnnData](http://anndata.readthedocs.io/en/latest/anndata.AnnData.html) ("Annotated Data").`AnnData` is specifically designed for matrix-like data. By this we mean that we have $n$ observations, each of which can be represented as $d$-dimensional vectors, where each dimension corresponds to a variable or feature. Both the rows and columns of this $n \times d$ matrix are special in the sense that they are indexed.For instance, in scRNA-seq data, each row corresponds to a cell with a barcode, and each column corresponds to a gene with a gene id. Furthermore, for each cell and each gene we might have additional metadata, like (1) donor information for each cell, or (2) alternative gene symbols for each gene. Finally, we might have other unstructured metadata like color palletes to use for plotting. Without going into every fancy Python-based data structure, we think that still today no other alternative really exists that:* Handles sparsity* Handles unstructured data* Handles observation- and feature-level metadata* Is user-friendlyimport numpy as np import pandas as pd import anndata as ad from scipy.sparse import csr_matrix print(ad.__version__)0.8.0Initializing AnnData Let's start by building a basic AnnData object with some sparse count information, perhaps representing gene expression counts.counts = csr_matrix(np.random.poisson(1, size=(100, 2000)), dtype=np.float32) adata = ad.AnnData(counts) adataWe see that AnnData provides a representation with summary stastics of the data The initial data we passed are accessible as a sparse matrix using `adata.X`.adata.XNow, we provide the index to both the `obs` and `var` axes using `.obs_names` (resp. `.var_names`).adata.obs_names = [f"Cell_{i:d}" for i in range(adata.n_obs)] adata.var_names = [f"Gene_{i:d}" for i in range(adata.n_vars)] print(adata.obs_names[:10])Index(['Cell_0', 'Cell_1', 'Cell_2', 'Cell_3', 'Cell_4', 'Cell_5', 'Cell_6', 'Cell_7', 'Cell_8', 'Cell_9'], dtype='object')Subsetting AnnData These index values can be used to subset the AnnData, which provides a view of the AnnData object. We can imagine this to be useful to subset the AnnData to particular cell types or gene modules of interest. The rules for subsetting AnnData are quite similar to that of a Pandas DataFrame. You can use values in the `obs/var_names`, boolean masks, or cell index integers.adata[["Cell_1", "Cell_10"], ["Gene_5", "Gene_1900"]]Adding aligned metadata Observation/Variable level So we have the core of our object and now we'd like to add metadata at both the observation and variable levels. This is pretty simple with AnnData, both `adata.obs` and `adata.var` are Pandas DataFrames.ct = np.random.choice(["B", "T", "Monocyte"], size=(adata.n_obs,)) adata.obs["cell_type"] = pd.Categorical(ct) # Categoricals are preferred for efficiency adata.obsWe can also see now that the AnnData representation has been updated:adataSubsetting using metadata We can also subset the AnnData using these randomly generated cell types:bdata = adata[adata.obs.cell_type == "B"] bdataObservation/variable-level matrices We might also have metadata at either level that has many dimensions to it, such as a UMAP embedding of the data. For this type of metadata, AnnData has the `.obsm/.varm` attributes. We use keys to identify the different matrices we insert. The restriction of `.obsm/.varm` are that `.obsm` matrices must length equal to the number of observations as `.n_obs` and `.varm` matrices must length equal to `.n_vars`. They can each independently have different number of dimensions.Let's start with a randomly generated matrix that we can interpret as a UMAP embedding of the data we'd like to store, as well as some random gene-level metadata:adata.obsm["X_umap"] = np.random.normal(0, 1, size=(adata.n_obs, 2)) adata.varm["gene_stuff"] = np.random.normal(0, 1, size=(adata.n_vars, 5)) adata.obsmAgain, the AnnData representation is updated.adataA few more notes about `.obsm/.varm`1. The "array-like" metadata can originate from a Pandas DataFrame, scipy sparse matrix, or numpy dense array.2. When using scanpy, their values (columns) are not easily plotted, where instead items from `.obs` are easily plotted on, e.g., UMAP plots. Unstructured metadata AnnData has `.uns`, which allows for any unstructured metadata. This can be anything, like a list or a dictionary with some general information that was useful in the analysis of our data.adata.uns["random"] = [1, 2, 3] adata.unsLayers Finally, we may have different forms of our original core data, perhaps one that is normalized and one that is not. These can be stored in different layers in AnnData. For example, let's log transform the original data and store it in a layer:adata.layers["log_transformed"] = np.log1p(adata.X) adataConversion to DataFrames We can also ask AnnData to return us a DataFrame from one of the layers:adata.to_df(layer="log_transformed")We see that the `.obs_names/.var_names` are used in the creation of this Pandas object. Writing the results to disk `AnnData` comes with its own persistent HDF5-based file format: `h5ad`. If string columns with small number of categories aren't yet categoricals, `AnnData` will auto-transform to categoricals.adata.write('my_results.h5ad', compression="gzip") !h5ls 'my_results.h5ad'X Group layers Group obs Group obsm Group obsp Group uns Group var Group varm Group varp GroupWrapping up the introduction AnnData has become the standard for single-cell analysis in Python and for good reason -- it's straightforward to use and faciliatates more reproducible analyses with it's key-based storage. It's even becoming easier to convert to the popular R-based formats for single-cell analysis.Keep reading on to better understand "views", on-disk backing, and other details. Views and copies For the fun of it, let's look at another metadata use case. Imagine that the observations come from instruments characterizing 10 readouts in a multi-year study with samples taken from different subjects at different sites. We'd typically get that information in some format and then store it in a DataFrame:obs_meta = pd.DataFrame({ 'time_yr': np.random.choice([0, 2, 4, 8], adata.n_obs), 'subject_id': np.random.choice(['subject 1', 'subject 2', 'subject 4', 'subject 8'], adata.n_obs), 'instrument_type': np.random.choice(['type a', 'type b'], adata.n_obs), 'site': np.random.choice(['site x', 'site y'], adata.n_obs), }, index=adata.obs.index, # these are the same IDs of observations as above! )This is how we join the readout data with the metadata. Of course, the first argument of the following call for `X` could also just be a DataFrame.adata = ad.AnnData(adata.X, obs=obs_meta, var=adata.var)Now we again have a single data container that keeps track of everything.print(adata)AnnData object with n_obs × n_vars = 100 × 2000 obs: 'time_yr', 'subject_id', 'instrument_type', 'site'Subsetting the joint data matrix can be important to focus on subsets of variables or observations, or to define train-test splits for a machine learning model. **Note**Similar to numpy arrays, AnnData objects can either hold actual data or reference another `AnnData` object. In the later case, they are referred to as "view".Subsetting AnnData objects always returns views, which has two advantages:- no new memory is allocated- it is possible to modify the underlying AnnData objectYou can get an actual AnnData object from a view by calling `.copy()` on the view. Usually, this is not necessary, as any modification of elements of a view (calling `.[]` on an attribute of the view) internally calls `.copy()` and makes the view an AnnData object that holds actual data. See the example below.adataGet access to the first 5 rows for two variables. **Note**Indexing into AnnData will assume that integer arguments to `[]` behave like `.iloc` in pandas, whereas string arguments behave like `.loc`. `AnnData` always assumes string indices.adata[:5, ['Gene_1', 'Gene_3']]This is a view! If we want an `AnnData` that holds the data in memory, let's call `.copy()`adata_subset = adata[:5, ['Gene_1', 'Gene_3']].copy()For a view, we can also set the first 3 elements of a column.print(adata[:3, 'Gene_1'].X.toarray().tolist()) adata[:3, 'Gene_1'].X = [0, 0, 0] print(adata[:3, 'Gene_1'].X.toarray().tolist())[[1.0], [2.0], [1.0]] [[0.0], [0.0], [0.0]]If you try to access parts of a view of an AnnData, the content will be auto-copied and a data-storing object will be generated.adata_subset = adata[:3, ['Gene_1', 'Gene_2']] adata_subset adata_subset.obs['foo'] = range(3)/var/folders/bd/43q20k0n6z15tdfzxvd22r7c0000gn/T/ipykernel_25768/2955902014.py:1: ImplicitModificationWarning: Trying to modify attribute `.obs` of view, initializing view as actual. adata_subset.obs['foo'] = range(3)Now `adata_subset` stores the actual data and is no longer just a reference to `adata`.adata_subsetEvidently, you can use all of pandas to slice with sequences or boolean indices.adata[adata.obs.time_yr.isin([2, 4])].obs.head()Partial reading of large data If a single `.h5ad` is very large, you can partially read it into memory by using backed mode:adata = ad.read('my_results.h5ad', backed='r') adata.isbackedIf you do this, you'll need to remember that the `AnnData` object has an open connection to the file used for reading:adata.filenameAs we're using it in read-only mode, we can't damage anything. To proceed with this tutorial, we still need to explicitly close it:adata.file.close()Visión General del Datasetmov_df.columns mov_df.dtypes mov_df.describe() ## Selección de variables mov_df = mov_df[['OBJECTID', 'TID', 'INICIO', 'FIN', 'HORA', 'DISTANCE', 'TYPE', 'VEL_PROMEDIO', 'DIA_SEMANA', 'MES', 'NAME_FROM', 'NAME_TO']]Análisis Unidimensional de las Variables## Conteo de la ocurrencia de una variable y un valor # Conteo de la movilidad en cada mes mov_df_sorted = mov_df.sort_values('MES') mov_df_sorted['MES'].hist(bins=15, xrot=45, grid=True) ##plt.xticks(rotation=45) mov_df['DIA_SEMANA'].value_counts(normalize=True) mov_df['NAME_FROM'].value_counts() mov_df['NAME_TO'].value_counts() mov_dfAnálisis Multidimensional de las Variables Velocidad promedio versus la trayectoria realizada.La trayectoria se va a definir como la concatenación entre NAME_FROM y NAME_TO.mov_df['TRAYEC'] = mov_df['NAME_FROM'] + ' - ' +mov_df['NAME_TO'] mov_df['TRAYEC'].value_counts()Mediana de la velocidad promedio en cada trayecto. VEL_PROMEDIO que es más común en cada trayecto:medianVel_Tray = mov_df.groupby('TRAYEC').median()['VEL_PROMEDIO'] medianVel_TrayAnálisis de Textoimport nltk from nltk.corpus import stopwords print(stopwords.words('spanish')) list_lite_NAME_TO = mov_df['NAME_TO'].value_counts().sort_values(ascending=False).index[0:10] list_lite_NAME_TO mov_df_filter_lite_NAME_TO = mov_df[mov_df['NAME_TO'].isin(list_lite_NAME_TO)] mov_df_filter_lite_NAME_TO textos_destino = '' for row in mov_df_filter_lite_NAME_TO['NAME_TO']: textos_destino = textos_destino + ' ' + row ## to check the ModuleNotFoundError: No module named 'wordcloud' ## install: ## /anaconda3/bin/python -m pip install wordcloud import sys print(sys.executable) from wordcloud import WordCloud import matplotlib.pyplot as plt wc = WordCloud(background_color= 'white') wc.generate(textos_destino) plt.axis("off") plt.imshow(wc, interpolation='bilinear') plt.show()Comparison of *revrand*'s algorithms on the SARCOS datasetIn this notebook we test how the GLM in *revrand* performs on the inverse dynamics experiment conducted in Gaussian Processes for Machine Learning, Chapter 8, page 182. In this experiment there are 21 dimensions, and 44,484 training examples. All GP's are using square exponential covariance functions, with a separate lengthscale for each dimension.import logging import numpy as np from scipy.stats import gamma from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import WhiteKernel, RBF from revrand import GeneralizedLinearModel, StandardLinearModel, Parameter, Positive from revrand.basis_functions import RandomRBF, OrthogonalRBF from revrand.likelihoods import Gaussian from revrand.metrics import smse, msll from revrand.utils.datasets import fetch_gpml_sarcos_data from revrand.optimize import Adam, AdaDelta from plotting import fancy_yyplot import matplotlib.pyplot as pl %matplotlib inline logging.basicConfig(level=logging.INFO)SettingsALG = 'SLM' random_state = 100 if ALG == 'GLM': lenscale = gamma(2, scale=50) regularizer = gamma(2, scale=10) var = gamma(2, scale=50) nbases = 8192 nsamples = 10 batch_size = 10 maxiter = int(1e6) updater = Adam() elif ALG == 'SLM': lenscale = gamma(1, scale=50) regularizer = gamma(2, scale=10) var = gamma(2, scale=5) nbases = 512 m = 10000 elif ALG == 'GP': m = 1024 n_restarts=1 else: raise ValueError("Invalid algorithm")Load the datagpml_sarcos = fetch_gpml_sarcos_data() X_train = gpml_sarcos.train.data y_train = gpml_sarcos.train.targets X_test = gpml_sarcos.test.data y_test = gpml_sarcos.test.targets Ntrain, D = X_train.shape print("Training data shape = {}".format(X_train.shape)) print("Testing data shape = {}".format(X_test.shape))Training data shape = (44484, 21) Testing data shape = (4449, 21)Transform targets and inputsAs per GPML p23# Targets ymean = y_train.mean() y_train -= ymean y_test -= ymean # Inputs Xscaler = StandardScaler() Xscaler.fit(X_train) X_train = Xscaler.transform(X_train) X_test = Xscaler.transform(X_test)Initialise the algorithmsregularizer_init = Parameter(regularizer, Positive()) lenscale_init = Parameter(lenscale, Positive(), shape=(D,)) base = RandomRBF(nbases=nbases, Xdim=D, lenscale=lenscale_init, random_state=random_state, regularizer=regularizer_init ) var_init = Parameter(var, Positive()) if ALG == 'GLM': llhood = Gaussian(var=var_init) alg = GeneralizedLinearModel(llhood, base, updater=updater, batch_size=batch_size, maxiter=maxiter, nsamples=nsamples, random_state=random_state ) elif ALG == 'GP': kern = 3**2 * RBF(length_scale=np.ones(D), length_scale_bounds=(1e-3, 1e7)) \ + WhiteKernel(noise_level=1) alg = GaussianProcessRegressor(kernel=kern, n_restarts_optimizer=n_restarts) elif ALG == 'SLM': alg = StandardLinearModel( basis=base, var=var_init, random_state=random_state ) else: raise ValueError("Invalid algorithm")Train the algorithmsrnd = np.random.RandomState(random_state) if ALG == 'GLM': alg.fit(X_train, y_train) else: t_ind = rnd.choice(Ntrain, size=m, replace=False) alg.fit(X_train[t_ind], y_train[t_ind])INFO:revrand.optimize.decorators:Evaluating random starts... INFO:revrand.slm:ELBO = -72302.84401763417, var = 14.464154967282827, reg = 9.565635279198858, hypers = [ 46.29238006 5.5548293 24.05876563 1.85789705 110.55117079 197.95797654 3.09068458 110.61252022 43.00751326 67.83283423 49.73747613 43.59481938 1.03254508 11.78779884 39.33827622 73.29181345 14.43047373 16.83631177 95.66080436 184.45696161 108.07741274]. INFO:revrand.slm:ELBO = -57240.75298080301, var = 18.612476164774026, reg = 2.9819127522585234, hypers = [ 9.80567081 13.57037514 2.2949871 35.20347357 23.60047833 44.92320388 49.70475963 7.69255458 135.78494349 146.29154616 46.10244614 24.53205872 22.56404034 11.42949761 16.201058 14.15369407 9.53430219 169.97448376 157.34241188 45.56188653 65.7081283 ]. INFO:revrand.slm:ELBO = -95131.86137545099, var = 4.681210350736981, reg = 13.474065265834824, hypers = [ 4.6309996 37.542546[...]Predict and scoreif ALG == 'GLM': Ey, Vf = alg.predict_moments(X_test) Vy = Vf + alg.like_hypers_ Sy = np.sqrt(Vy) elif ALG == 'GP': Ey, Sy = alg.predict(X_test, return_std=True) Vy = Sy**2 else: Ey, Vy = alg.predict_moments(X_test) Sy = np.sqrt(Vy) print("SMSE = {}".format(smse(y_test, Ey))) print("MSLL = {}".format(msll(y_test, Ey, Vy, y_train))) # YY plot pl.figure(figsize=(15, 10)) fancy_yyplot(y_test, Ey, Ey - 2 * Sy, Ey + 2 * Sy, "Joint torque")SMSE = 0.020127672349979957 MSLL = -1.973052228872672Numerical variable description: DistributionNumerical variable description with a plot for one or several columns in a dataframe.%matplotlib inline import seaborn as sns sns.set_theme(style="whitegrid") import matplotlib.pyplot as plt import pandas as pd import numpy as np import mathfunctions## plot distribution of a column def dist(df:pd.DataFrame, column:str, ax:"matplotlib axis")->"matplotlib axis": # validation assert column in df.columns.tolist(), f"column '{column}' is not available." # plot ax = sns.stripplot(x = column, data = df, color = "red", alpha = .35, ax = ax) ax = sns.violinplot(x = column, data = df, scale="count", inner="quartile", scale_hue=False, bw=.2, ax = ax) ax = sns.boxplot(x = column, data = df, showfliers=True, showbox=False, ax = ax) # return axis return ax # plot selected columns def plot(df:pd.DataFrame, columns:"list or str", plot_function:"function", num_plots_per_row:int = 3): # if columns is only one string if isinstance(columns, str): # figsize figsize = (10, 10) # number of plots in rows / columns nrs = ncs = 1 # if is a list elif isinstance(columns, list): # number of columns to be ploted ncolumns = len(columns) # number of plots in rows / columns nrs = math.ceil(ncolumns / num_plots_per_row) ncs = num_plots_per_row if ncolumns >= num_plots_per_row else ncolumns # figsize figsize = (ncs*5, nrs*5) else: raise # create figure and axis fig, ax = plt.subplots(ncols = ncs, nrows = nrs, figsize = figsize) # if axis is an array if isinstance(ax, np.ndarray): # reshape ax = ax.ravel() # loop of axis for ii, c in enumerate(columns): # plot in cells _ = plot_function(df, c, ax[ii]) # if axis is only one else: # plots only one _ = plot_function(df, columns[0] if len(columns) == 1 else columns, ax) # display plot plt.show()data""" tips """ # load data tips = sns.load_dataset("tips") # get numerical columns numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] cols_num_tips = tips.select_dtypes(include=numerics).columns.tolist() """ iris """ # load data iris = sns.load_dataset("iris") # get numerical columns numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] cols_num_iris = iris.select_dtypes(include=numerics).columns.tolist()plots# plot only one column fig, ax = plt.subplots(ncols = 1, nrows = 1, figsize = (5, 5)) ax = dist(tips, cols_num_tips[0], ax) plt.show() # plot only one column fig, ax = plt.subplots(ncols = 1, nrows = 1, figsize = (5, 5)) ax = dist(iris, cols_num_iris[0], ax) plt.show() # plot several columns columns = cols_num_tips[:] df = tips.copy() plot_function = dist plot(df, columns, plot_function, num_plots_per_row = 3) # plot several columns columns = cols_num_iris[:] df = iris.copy() plot_function = dist plot(df, columns, plot_function, num_plots_per_row = 3)TensorNetworks in Neural Networks.Here, we have a small toy example of how to use a TN inside of a fully connected neural network.First off, let's install tensornetwork!pip install tensornetwork import numpy as np import matplotlib.pyplot as plt import tensorflow as tf tf.enable_v2_behavior() # Import tensornetwork import tensornetwork as tn # Set the backend to tesorflow # (default is numpy) tn.set_default_backend("tensorflow")Collecting tensornetwork [?25l Downloading https://files.pythonhosted.org/packages/d5/84/4421ac1add2011e50e8d85dc1a8446f5eeae8ad404cb4df6d4d598a61383/tensornetwork-0.2.1-py3-none-any.whl (232kB)  |█▍ | 10kB 20.6MB/s eta 0:00:01  |██▉ | 20kB 3.1MB/s eta 0:00:01  |████▎ | 30kB 4.5MB/s eta 0:00:01  |█████▋ | 40kB 3.0MB/s eta 0:00:01  |███████ | 51kB 3.7MB/s eta 0:00:01  |████████▌ | 61kB 4.3MB/s eta 0:00:01  |█████████▉ | 71kB 5.0MB/s eta 0:00:01  |███████████▎ | 81kB 5.6MB/s eta 0:00:01  |████████████▊ | 92kB 6.3MB/s eta 0:00:01  |██████████████▏ | 102kB 4.9MB/s eta 0:00:01  |███████████████▌ | 112kB 4.9MB/s eta 0:00:01  |█████████████████ | 122kB 4.[...]TensorNetwork layer definitionHere, we define the TensorNetwork layer we wish to use to replace the fully connected layer. Here, we simply use a 2 node Matrix Product Operator network to replace the normal dense weight matrix.We TensorNetwork's NCon API to keep the code short.class TNLayer(tf.keras.layers.Layer): def __init__(self): super(TNLayer, self).__init__() # Create the variables for the layer. self.a_var = tf.Variable(tf.random.normal( shape=(32, 32, 2), stddev=1.0/32.0), name="a", trainable=True) self.b_var = tf.Variable(tf.random.normal(shape=(32, 32, 2), stddev=1.0/32.0), name="b", trainable=True) self.bias = tf.Variable(tf.zeros(shape=(32, 32)), name="bias", trainable=True) def call(self, inputs): # Define the contraction. # We break it out so we can parallelize a batch using # tf.vectorized_map (see below). def f(input_vec, a_var, b_var, bias_var): # Reshape to a matrix instead of a vector. input_vec = tf.reshape(input_vec, (32,32)) # Now we create the network. a = tn.Node(a_var) b = tn.Node(b_var) x_node = tn.Node(input_vec) a[1] ^ x_node[0] b[1] ^ x_node[1] a[2] ^ b[2] # The TN should now look like this # | | # a --- b # \ / # x # Now we begin the contraction. c = a @ x_node result = (c @ b).tensor # To make the code shorter, we also could've used Ncon. # The above few lines of code is the same as this: # result = tn.ncon([x, a_var, b_var], [[1, 2], [-1, 1, 3], [-2, 2, 3]]) # Finally, add bias. return result + bias_var # To deal with a batch of items, we can use the tf.vectorized_map # function. # https://www.tensorflow.org/api_docs/python/tf/vectorized_map result = tf.vectorized_map( lambda vec: f(vec, self.a_var, self.b_var, self.bias), inputs) return tf.nn.relu(tf.reshape(result, (-1, 1024)))Smaller modelThese two models are effectively the same, but notice how the TN layer has nearly 10x fewer parameters.Dense = tf.keras.layers.Dense fc_model = tf.keras.Sequential( [ tf.keras.Input(shape=(2,)), Dense(1024, activation=tf.nn.relu), Dense(1024, activation=tf.nn.relu), Dense(1, activation=None)]) fc_model.summary() tn_model = tf.keras.Sequential( [ tf.keras.Input(shape=(2,)), Dense(1024, activation=tf.nn.relu), # Here, we replace the dense layer with our MPS. TNLayer(), Dense(1, activation=None)]) tn_model.summary()Model: "sequential_8" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_20 (Dense) (None, 1024) 3072 _________________________________________________________________ tn_layer_4 (TNLayer) (None, 1024) 5120 _________________________________________________________________ dense_21 (Dense) (None, 1) 1025 ================================================================= Total params: 9,217 Trainable params: 9,217 Non-trainable params: 0 _________________________________________________________________Training a modelYou can train the TN model just as you would a normal neural network model! Here, we give an example of how to do it in Keras.X = np.concatenate([np.random.randn(20, 2) + np.array([3, 3]), np.random.randn(20, 2) + np.array([-3, -3]), np.random.randn(20, 2) + np.array([-3, 3]), np.random.randn(20, 2) + np.array([3, -3]),]) Y = np.concatenate([np.ones((40)), -np.ones((40))]) tn_model.compile(optimizer="adam", loss="mean_squared_error") tn_model.fit(X, Y, epochs=300, verbose=1) # Plotting code, feel free to ignore. h = 1.0 x_min, x_max = X[:, 0].min() - 5, X[:, 0].max() + 5 y_min, y_max = X[:, 1].min() - 5, X[:, 1].max() + 5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # here "model" is your model's prediction (classification) function Z = tn_model.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z) plt.axis('off') # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)VS Fully Connectedfc_model.compile(optimizer="adam", loss="mean_squared_error") fc_model.fit(X, Y, epochs=300, verbose=0) # Plotting code, feel free to ignore. h = 1.0 x_min, x_max = X[:, 0].min() - 5, X[:, 0].max() + 5 y_min, y_max = X[:, 1].min() - 5, X[:, 1].max() + 5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # here "model" is your model's prediction (classification) function Z = fc_model.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z) plt.axis('off') # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)ECM-EB Exampleimport pandas as pd import datetime from pathlib import Path from tadpole_algorithms.models import ECMEB train_set_path = Path("data/TADPOLE_D1_D2.csv") test_set_path = Path("data/tadpole_test_set.csv") train_df = pd.read_csv(train_set_path) test_set_df = pd.read_csv(test_set_path) test_set_df = test_set_df.fillna(0) model = ECMEB(confidence_intervals=False) model.train(train_df) forecast_df = model.predict(test_set_df) from tadpole_algorithms.evaluation import evaluate_forecast eval_set_df = pd.read_csv('data/TADPOLE_D4_corr.csv') evaluate_forecast(eval_set_df, forecast_df)[[ 0 86 0] [ 0 92 0] [ 0 32 0]]Market Datags_quant allows for easy access to market data.from gs_quant.data import Dataset from datetime import date, timedelta from gs_quant.session import Environment, GsSession # external users should substitute their client id and secret; please skip this step if using internal jupyterhub GsSession.use(Environment.PROD, client_id=None, client_secret=None, scopes=('read_product_data',)) # Get IRSWAP Data ds_rate = Dataset('SWAPRATES_STANDARD') # Get a swap rate data set ds_rate.get_coverage() # Lets see what's available start_date = date(2019, 1, 1) end_date = date.today() - timedelta(days=1) data = ds_rate.get_data(start_date, end_date, assetId='MABXTJXXN8WJR7R8', tenor='10y') print(data.head()) ds_rate_vol = Dataset('SWAPTIONVOL_STANDARD') # Get a swaption vol data set ds_rate_vol.get_coverage() # Lets see what's available data = ds_rate_vol.get_data(start_date, end_date, assetId='MAFYB8Z4R1377A19') print(list(data)) print(data.head())ETL (Extract Transform Load)ETL은 추출(Extract), 변환(Transform), 적재(Load)를 뜻한다. 컴퓨팅에서 데이터베이스 이용의 한 과정으로 특히 데이터 웨어하우스에서 다음을 말한다. 이는 데이터를 다루는 IT쪽 용어이다.- 동일 기종 또는 타기종의 데이터 소스로부터 데이터를 추출.- 조회 또는 분석을 목적으로 데이터를 적절한 포맷이나 구조로 데이터를 저장하기 위해 데이터를 변환.- 변환 데이터를 적재. 일단 데이터를 불러서 와서 하나씩 해보자.import pandas as pd df = pd.read_csv("ranging_stock_df_python3.csv") df1. 추출(Extract)필요한데이터 칼럼을 추출한다고 이해하도록 하자.필요한 데이터는 종목명, 종목코드, 시장구분이다 이것을 이용해 나중에 날짜를 넣어 주식데이터를 불러올것이다. 링크는 아래 링크를 참고하자.- [주식 정보 수집을 위한 사전준비 및 연습](https://uikang.tistory.com/88)- [[python]외부변수 설정](https://uikang.tistory.com/93)df_extract = df[["종목명","종목코드","시장구분"]] df_extract2. 변환(Transform)이제 중복된것을 .drop_duplicates()함수를 이용해 하나만 남겨서 변환하자. 또한 인덱스 순서가 엉망인것을 볼 수 있다. 인덱스 재배열을 하여 변환하자.df_transform = df_extract[['종목명','종목코드','시장구분']].drop_duplicates() df_transform df_transform = df_transform.reset_index(drop = True) df_transform3. 적재(Load)이제 변환이 끝났으니 DB로 적재해보도록 하자.import sqlite3 con = sqlite3.connect("ETL.db") df_transform.to_sql("ETL_table",con, index=False, if_exists="replace") cur = con.cursor() cur.execute("SELECT * FROM ETL_table") df_load = pd.read_sql("SELECT * FROM ETL_table", con) df_load con.commit() con.close()4. 확인con = sqlite3.connect("ETL.db") cur = con.cursor() cur.execute("SELECT * FROM ETL_table") df_col = pd.read_sql("SELECT * FROM ETL_table", con) df_col con.close()Data augmentation for detectron2 models#export from drone_detector.imports import * from drone_detector.utils import * import detectron2 from detectron2.data import transforms as T # export def build_aug_transforms(cfg:detectron2.config.CfgNode, flip_horiz:bool=True, flip_vert:bool=False, max_rotate:int=10, brightness_limits:Tuple[int,int]=(0.8,1.4), contrast_limits:Tuple[int,int]=(0.8,1.4), saturation_limits:Tuple[int,int]=(0.8,1.4), p_lighting:float=0.75 ) -> detectron2.data.transforms.AugmentationList: "Build a list of detectron2 augmentations" augs = [] augs.append(T.ResizeShortestEdge(cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING)) if flip_horiz: augs.append(T.RandomFlip(prob=0.5, horizontal=True, vertical=False)) if flip_vert: augs.append(T.RandomFlip(prob=0.5, horizontal=False, vertical=True)) if max_rotate: augs.append(T.RandomRotation(angle=[-max_rotate, max_rotate], expand=False)) if brightness_limits: augs.append(T.RandomApply(prob=p_lighting, tfm_or_aug=T.RandomBrightness(*brightness_limits))) if contrast_limits: augs.append(T.RandomApply(prob=p_lighting, tfm_or_aug=T.RandomContrast(*contrast_limits))) if saturation_limits: augs.append(T.RandomApply(prob=p_lighting, tfm_or_aug=T.RandomSaturation(*saturation_limits))) return augsscikit-mobility tutorials 5 - Validation- Individual measures Radius of gyration k-radius of gyration Entropy Random entropy Uncorrelatedimport skmob as sm from skmob.utils import plot from skmob.utils import constants import pandas as pd import numpy as np"Geospatial Analysis exercise - proximity analysis"> "Geospatial Analysis exercise - proximity analysis"- toc:true- branch: master- badges: true- comments: true- author: jaeeon- categories: [jupyter, python] **Introduction**You are part of a crisis response team, and you want to identify how hospitals have been responding to crash collisions in New York City.Before you get started, run the code cell below to set everything up.import math import geopandas as gpd import pandas as pd from shapely.geometry import MultiPolygon import folium from folium import Choropleth, Marker from folium.plugins import HeatMap, MarkerCluster from learntools.core import binder binder.bind(globals()) from learntools.geospatial.ex5 import *You'll use the `embed_map()` function to visualize your maps.지도를 시각화하기 위해 `embed_map()` 함수를 사용할 것입니다.def embed_map(m, file_name): from IPython.display import IFrame m.save(file_name) return IFrame(file_name, width='100%', height='500px')Exercises 1) Visualize the collision data.Run the code cell below to load a GeoDataFrame `collisions` tracking major motor vehicle collisions in 2013-2018.아래 코드 셀을 실행하여 2013-2018년의 주요 자동차 충돌을 추적하는 GeoDataFrame`collisions`을 로드합니다.collisions = gpd.read_file("../input/geospatial-learn-course-data/NYPD_Motor_Vehicle_Collisions/NYPD_Motor_Vehicle_Collisions/NYPD_Motor_Vehicle_Collisions.shp") collisions.head()Use the "LATITUDE" and "LONGITUDE" columns to create an interactive map to visualize the collision data. What type of map do you think is most effective?"LATITUDE" 및 "LONGITUDE" 열을 사용하여 충돌 데이터를 시각화하는 대화형 맵을 만듭니다. 어떤 유형의 지도가 가장 효과적이라고 생각하십니까?m_1 = folium.Map(location=[40.7, -74], zoom_start=11) # Your code here: Visualize the collision data HeatMap(data=collisions[['LATITUDE', 'LONGITUDE']], radius=10).add_to(m_1) # Uncomment to see a hint q_1.hint() # Show the map embed_map(m_1, "q_1.html") # Get credit for your work after you have created a map q_1.check() # Uncomment to see our solution (your code may look different!) #q_1.solution()2) Understand hospital coverage.Run the next code cell to load the hospital data.다음 코드 셀을 실행하여 병원 데이터를 로드합니다.hospitals = gpd.read_file("../input/geospatial-learn-course-data/nyu_2451_34494/nyu_2451_34494/nyu_2451_34494.shp") hospitals.head()Use the "latitude" and "longitude" columns to visualize the hospital locations."위도" 및 "경도" 열을 사용하여 병원 위치를 시각화합니다.m_2 = folium.Map(location=[40.7, -74], zoom_start=11) # Your code here: Visualize the hospital locations for idx, row in hospitals.iterrows(): Marker([row['latitude'], row['longitude']], popup=row['name']).add_to(m_2) # Uncomment to see a hint q_2.hint() # Show the map embed_map(m_2, "q_2.html") # Get credit for your work after you have created a map q_2.check() # Uncomment to see our solution (your code may look different!) #q_2.solution()3) When was the closest hospital more than 10 kilometers away?Create a DataFrame `outside_range` containing all rows from `collisions` with crashes that occurred more than 10 kilometers from the closest hospital.Note that both `hospitals` and `collisions` have EPSG 2263 as the coordinate reference system, and EPSG 2263 has units of meters.------------------------------**3) 10km 이상 떨어진 가장 가까운 병원은 언제였습니까?**가장 가까운 병원에서 10km 이상 떨어진 곳에서 발생한 충돌이 있는 `collisions` 의 모든 행을 포함하는 DataFrame `outside_range`를 만듭니다.`hospitals`과 `collisions` 모두 좌표 참조 시스템으로 EPSG 2263을 사용하고 EPSG 2263은 미터 단위를 사용합니다.# Your code here hos_df_buffer = gpd.GeoDataFrame(geometry=hospitals.geometry).buffer(10000) print(hos_df_buffer) hos_union = hos_df_buffer.geometry.unary_union hos_union # 가장 가까운 병원에서 10km 이상 떨어진 곳에서 발생한 충돌이 있는 collisions 의 모든 행을 포함 outside_range = collisions.loc[~collisions["geometry"].apply(lambda x: hos_union.contains(x))] outside_range # Check your answer q_3.check() # Lines below will give you a hint or solution code q_3.hint() #q_3.solution()The next code cell calculates the percentage of collisions that occurred more than 10 kilometers away from the closest hospital.다음 코드 셀은 가장 가까운 병원에서 10km 이상 떨어진 곳에서 발생한 충돌의 비율을 계산합니다.percentage = round(100*len(outside_range)/len(collisions), 2) print("Percentage of collisions more than 10 km away from the closest hospital: {}%".format(percentage))4) Make a recommender.When collisions occur in distant locations, it becomes even more vital that injured persons are transported to the nearest available hospital.With this in mind, you decide to create a recommender that:- takes the location of the crash (in EPSG 2263) as input,- finds the closest hospital (where distance calculations are done in EPSG 2263), and - returns the name of the closest hospital. --------------------------멀리 떨어진 곳에서 충돌이 발생하면 부상자를 가까운 병원으로 이송하는 것이 더욱 중요해집니다.이를 염두에 두고 다음과 같은 추천자를 만들기로 결정합니다.- 충돌 위치(EPSG 2263에서)를 입력으로 사용합니다.- 가장 가까운 병원을 찾습니다(거리 계산은 EPSG 2263에서 수행됨).- 가장 가까운 병원의 이름을 반환합니다.def best_hospital(collision_location): # Your code here idx_min = hospitals.geometry.distance(collision_location).idxmin() my_hospital = hospitals.iloc[idx_min] name = my_hospital["name"] return name # Test your function: this should suggest CALVARY HOSPITAL INC print(best_hospital(outside_range.geometry.iloc[0])) # Check your answer q_4.check() # Lines below will give you a hint or solution code q_4.hint() #q_4.solution()5) Which hospital is under the highest demand?Considering only collisions in the `outside_range` DataFrame, which hospital is most recommended? Your answer should be a Python string that exactly matches the name of the hospital returned by the function you created in **4)**.-------------------------**5) 수요가 가장 많은 병원은?**`outside_range` DataFrame에서 충돌만 고려한다면 어느 병원을 가장 추천하는가?**4)**에서 생성한 함수가 반환한 병원 이름과 정확히 일치하는 Python 문자열이어야 합니다.print(best_hospital(outside_range.geometry.iloc[0])) #> CALVARY HOSPITAL INC # Your code here highest_demand = outside_range.geometry.apply(best_hospital).value_counts().idxmax highest_demand # Check your answer q_5.check() # Lines below will give you a hint or solution code q_5.hint() #q_5.solution()6) Where should the city construct new hospitals?Run the next code cell (without changes) to visualize hospital locations, in addition to collisions that occurred more than 10 kilometers away from the closest hospital. 다음 코드 셀(변경 없이)을 실행하여 가장 가까운 병원에서 10km 이상 떨어진 곳에서 발생한 충돌 외에도 병원 위치를 시각화합니다.m_6 = folium.Map(location=[40.7, -74], zoom_start=11) coverage = gpd.GeoDataFrame(geometry=hospitals.geometry).buffer(10000) folium.GeoJson(coverage.geometry.to_crs(epsg=4326)).add_to(m_6) HeatMap(data=outside_range[['LATITUDE', 'LONGITUDE']], radius=9).add_to(m_6) folium.LatLngPopup().add_to(m_6) embed_map(m_6, 'm_6.html')Click anywhere on the map to see a pop-up with the corresponding location in latitude and longitude.The city of New York reaches out to you for help with deciding locations for two brand new hospitals. They specifically want your help with identifying locations to bring the calculated percentage from step **3)** to less than ten percent. Using the map (and without worrying about zoning laws or what potential buildings would have to be removed in order to build the hospitals), can you identify two locations that would help the city accomplish this goal? Put the proposed latitude and longitude for hospital 1 in `lat_1` and `long_1`, respectively. (Likewise for hospital 2.)Then, run the rest of the cell as-is to see the effect of the new hospitals. Your answer will be marked correct, if the two new hospitals bring the percentage to less than ten percent.-----------------------------지도의 아무 곳이나 클릭하면 해당 위치의 위도 및 경도 팝업이 표시됩니다.New York 시는 두 곳의 새로운 병원을 지을 위치를 결정하는 데 도움을 드리기 위해 연락을 드립니다. 그들은 특히 **3)** 단계에서 계산된 백분율을 10% 미만으로 만들기 위해 위치 식별에 대한 귀하의 도움을 원합니다. 지도를 사용하여(지역 설정법이나 병원을 건설하기 위해 제거해야 할 잠재적 건물에 대해 걱정하지 않고) 도시가 이 목표를 달성하는 데 도움이 될 두 위치를 식별할 수 있습니까?병원 1에 대해 제안된 위도와 경도를 각각 `lat_1`과 `long_1`에 넣습니다. (병원2도 마찬가지)그런 다음 나머지 셀을 그대로 실행하여 새 병원의 효과를 확인하십시오. 두 개의 새로운 병원에서 백분율을 10% 미만으로 낮추면 답이 정답으로 표시됩니다.# Your answer here: proposed location of hospital 1 lat_1 = 40.6714 long_1 = -73.8492 # Your answer here: proposed location of hospital 2 lat_2 = 40.6702 long_2 = -73.7612 # Do not modify the code below this line try: new_df = pd.DataFrame( {'Latitude': [lat_1, lat_2], 'Longitude': [long_1, long_2]}) new_gdf = gpd.GeoDataFrame(new_df, geometry=gpd.points_from_xy(new_df.Longitude, new_df.Latitude)) new_gdf.crs = {'init' :'epsg:4326'} new_gdf = new_gdf.to_crs(epsg=2263) # get new percentage new_coverage = gpd.GeoDataFrame(geometry=new_gdf.geometry).buffer(10000) new_my_union = new_coverage.geometry.unary_union new_outside_range = outside_range.loc[~outside_range["geometry"].apply(lambda x: new_my_union.contains(x))] new_percentage = round(100*len(new_outside_range)/len(collisions), 2) print("(NEW) Percentage of collisions more than 10 km away from the closest hospital: {}%".format(new_percentage)) # Did you help the city to meet its goal? q_6.check() # make the map m = folium.Map(location=[40.7, -74], zoom_start=11) folium.GeoJson(coverage.geometry.to_crs(epsg=4326)).add_to(m) folium.GeoJson(new_coverage.geometry.to_crs(epsg=4326)).add_to(m) for idx, row in new_gdf.iterrows(): Marker([row['Latitude'], row['Longitude']]).add_to(m) HeatMap(data=new_outside_range[['LATITUDE', 'LONGITUDE']], radius=9).add_to(m) folium.LatLngPopup().add_to(m) display(embed_map(m, 'q_6.html')) except: q_6.hint() # Uncomment to see one potential answer q_6.solution()Bonus: Temperature Analysis Iimport pandas as pd from datetime import datetime as dt # "tobs" is "temperature observations" df = pd.read_csv(r'C:\Users\nnoar\OneDrive\Desktop\sqlaclhemy-challenge\Resources\hawaii_measurements.csv') df.head() # Convert the date column format from string to datetime df['date'] = pd.to_datetime(df['date'],format='%Y-%m-%d') df.info() # Set the date column as the DataFrame index df.set_index('date') # Drop the date column ### Why would I do this if I need to filter by month???Compare June and December data across all yearsfrom scipy import stats # Filter data for desired months june = df.loc[df['date'].dt.month == 6 ] december = df.loc[df['date'].dt.month == 12] # Identify the average temperature for June avg_june = june['tobs'].mean() print(f'The average temperature in June is {avg_june} degrees') # Identify the average temperature for December avg_dec = december['tobs'].mean() print(f'The average temperature in December is {avg_dec} degrees ') # Create collections of temperature data june_temps = june['tobs'] dec_temps = december['tobs'] all_temps = df['tobs'] # Run paired t-test stats.ttest_ind(june_temps,all_temps) stats.ttest_ind(dec_temps,all_temps)Analysis##We see from these that the average temperature on a day in June is closer to the average day overall than the average temperature on a day in Deecember in Hawaii.Lambda School Data Science*Unit 2, Sprint 1, Module 3*--- Ridge Regression AssignmentWe're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.But not just for condos in Tribeca...- [x] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.- [x] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.- [x] Do one-hot encoding of categorical features.- [ ] Do feature selection with `SelectKBest`.- [ ] Fit a ridge regression model with multiple features. Use the `normalize=True` parameter (or do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html) beforehand — use the scaler's `fit_transform` method with the train set, and the scaler's `transform` method with the test set)- [ ] Get mean absolute error for the test set.- [ ] As always, commit your notebook to your fork of the GitHub repo.The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal. Stretch GoalsDon't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.- [ ] Add your own stretch goal(s) !- [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥- [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).- [ ] Learn more about feature selection: - ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance) - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html) - [mlxtend](http://rasbt.github.io/mlxtend/) library - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection) - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.- [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).%%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/' !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # Ignore this Numpy warning when using Plotly Express: # FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead. import warnings warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy') import pandas as pd # breaks local envviroment it's not being used so i went ahead and commented it out #import pandas_profiling # Read New York City property sales data df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv') # Change column names: replace spaces with underscores df.columns = [col.replace(' ', '_') for col in df] # SALE_PRICE was read as strings. # Remove symbols, convert to integer df['SALE_PRICE'] = ( df['SALE_PRICE'] .str.replace('$','') .str.replace('-','') .str.replace(',','') .astype(int) ) # BOROUGH is a numeric column, but arguably should be a categorical feature, # so convert it from a number to a string df['BOROUGH'] = df['BOROUGH'].astype(str) # Reduce cardinality for NEIGHBORHOOD feature # Get a list of the top 10 neighborhoods top10 = df['NEIGHBORHOOD'].value_counts()[:10].index # At locations where the neighborhood is NOT in the top 10, # replace the neighborhood with 'OTHER' df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER' # make a copy of that data so that I don't mess anything up copy=df.copy() # make a mask for building class category condition=copy['BUILDING_CLASS_CATEGORY']=='01 ONE FAMILY DWELLINGS' copy=copy[condition] # make a mask for building sales price condition=(copy['SALE_PRICE']<2000000) | (copy['SALE_PRICE']>100000) copy=copy[condition] # make a new column to sort date copy['dt']=pd.to_datetime(copy['SALE_DATE'],infer_datetime_format=True) # make train test split train=copy[copy['dt'].dt.month<=3] test=copy[copy['dt'].dt.month==4] # check the train set to see if it includes what i want train['dt'].value_counts().sort_index() # check the test set to see if it includes what i need test['dt'].value_counts() # take a look at diffrent categories that would be good to onehot encode train.describe(include='object') # set features based on the cardinality of the data in them target='SALE_PRICE' ignore=['dt','BUILDING_CLASS_CATEGORY','BUILDING_CLASS_AT_PRESENT','ADDRESS','APARTMENT_NUMBER','SALE_DATE','BUILDING_CLASS_AT_TIME_OF_SALE'] features=train.columns.drop([target]+ignore) # print shape features # make cv split X_train=train[features] y_train=train[target] X_test=test[features] y_test=test[target] # use one hot encoding to encode some of my catigorical variables import category_encoders as ce encode=ce.OneHotEncoder(use_cat_names=True) X_train = encode.fit_transform(X_train) X_test = encode.transform(X_test) # use selectKbest to select my best features, or features with highest explained variance from sklearn.feature_selection import SelectKBest,f_regression # note that im using 5 features her to reduce the dimentions of my feature matrix by a factor of 2/3 # in the example for lecture it was about 1/2 selector=SelectKBest(score_func=f_regression,k=5) X_train_selected=selector.fit_transform(X_train,y_train) X_test_selected=selector.transform(X_test,y_test) X_test["y"]=y_test X_train["y"]=y_train X_test.dropna(inplace=True) X_train.dropna(inplace=True) y_test=X_test['y'] y_train=X_train['y'] X_test.drop('y',axis=1,inplace=True) X_train.drop('y',axis=1,inplace=True)The Lemke Howson algorithmThe vertex and support enumeration algorithms are all algorithms that use an exhaustive search. For large games, this can take a long time and/or have a high computational cost. The following algorithm gives an approach to create a path through vertices in both best response polytopes to find a pair that is fully labelled.--- The Lemke Howson algorithm[Video](https://youtu.be/HekHAuWR_30?list=PLnC5h3PY-znxMsG0TRYGOyrnEO-QhVwLb)For a nondegenerate 2 player game $(A, B)\in{\mathbb{R}^{m\times n}_{>0}}^2$ the following algorithm returns a nash equilibrium:1. Start at the artificial equilibrium: $(0, 0)$2. Choose a label to drop.3. Remove this label from the corresponding vertex by traversing an edge of the corresponding polytope to another vertex. 4. The new vertex will now have a duplicate label in the other polytope. Remove this label from the vertex of the other polytope and traverse an edge of that polytope to another vertex.5. Repeat step 4 until the pair of vertices is fully labelled.---As an example let us consider the matching pennies game:$$A = \begin{pmatrix} 1 & -1\\ -1& 1\end{pmatrix}\qquadB = \begin{pmatrix} -1 & 1\\ 1& -1\end{pmatrix}$$First let us add 2 to all utilities:$$A = \begin{pmatrix} 3 & 1\\ 1 & 3\end{pmatrix}\qquadB = \begin{pmatrix} 1 & 3\\ 3 & 1\end{pmatrix}$$The vertices for $\mathcal{P}$ are:1. $a=(0, 0)$ has labels $\{0, 1\}$ 2. $b=(1/3, 0)$ has labels $\{1, 3\}$3. $c=(1/4, 1/4)$ has labels $\{2, 3\}$4. $d=(0, 1/3)$ has labels $\{0, 2\}$The vertics and labels for $\mathcal{Q}$ are:1. $w=(0, 0)$ has labels $\{2, 3\}$2. $x=(1/3, 0)$ has labels $\{0, 3\}$3. $y=(1/4, 1/4)$ has labels $\{0, 1\}$4. $z=(0, 1/3)$ has labels $\{1, 2\}$Let us apply the algorithm:- $(a, w)$ have labels: $\{0, 1\}, \{2, 3\}$. Drop 0 (arbitrary decision) in $\mathcal{P}$.- $\to (b, w)$ have labels: $\{1, 3\}, \{2, 3\}$. In $\mathcal{Q}$ drop 3.- $\to (b, z)$ have labels: $\{1, 3\}, \{1, 2\}$. In $\mathcal{P}$ drop 1.- $\to (c, z)$ have labels: $\{2, 3\}, \{1, 2\}$. In $\mathcal{Q}$ drop 2.- $\to (c, y)$ have labels: $\{2, 3\}, \{0, 1\}$. Fully labeled vertex pair.We now return the strategy pair by normalising these vertices:$$((1/2, 1/2), (1/2, 1/2))$$This is also implemented in `Nashpy`:import numpy as np import nashpy as nash A = np.array([[1, -1], [-1, 1]]) matching_pennies = nash.Game(A) matching_pennies.lemke_howson(initial_dropped_label=0)You can also iterate over all possible starting labels:for eq in matching_pennies.lemke_howson_enumeration(): print(eq)(array([0.5, 0.5]), array([0.5, 0.5])) (array([0.5, 0.5]), array([0.5, 0.5])) (array([0.5, 0.5]), array([0.5, 0.5])) (array([0.5, 0.5]), array([0.5, 0.5]))Read Filesimport pandas as pd import numpy as np from glob import glob import os, psutil from tqdm.notebook import tqdm fns = glob('/media/robmulla/moardata/reddit_place2/*.parquet') process = psutil.Process(os.getpid()) print(process.memory_info().rss) fns.sort() dfs = [] print(process.memory_info().rss) for f in tqdm(fns): df = pd.read_parquet(f) df['pixel_color'] = df['pixel_color'].astype('category') df['x'] = df['x'].astype('int32') df['y'] = df['y'].astype('int32') dfs.append(df) print('About to concat') print(process.memory_info().rss) # Does this double our memory use dfs = pd.concat(dfs) print('Done with concat') print(process.memory_info().rss) dfs.to_parquet('/media/robmulla/moardata/reddit_place2/combined_v1.parquet') dfs.head()Lab 0: Getting familiar with the camera The main purpose of Lab 0 is to familiarize yourselves with the camera. You should be able to capture an image, establish a basic understanding of camera parameters and adjust them in live image display to obatin the best image possible Import basic Python modulesimport os import sys # Add the path which contains wrappers for II harwares sys.path.append(r'.\Hardware_src') # Make the change according to your path # Python modules for figure visualization import numpy as np import matplotlib.pyplot as plt from helper import check_minmax from IPython.display import clear_output from IPython.display import display, HTMLCamera object instantiationfrom camera import Camera # Instantiate Camera object c = Camera() c.open() c.initialize_memory()Check camera propertiesc.get_properties()Capture your first imageimg = c.capture().copy() # Note: it's always a good habit to use np.copy() when assigning values to another variables # to avoid they share the same address # Visualize your captured image ######################### ######## YOUR CODE#######Think about the following questions:+ How does your image look like? In-focus or out-of-focus? Is there any saturation in image readout?+ Based on what we have discussed, what causes saturation? How do you control the detector pixel readout? Change the imaging settings# Set the frame rate c.set_framerate(5) # Check the framerate c.get_framerate() # Set the pixel clock c.set_pixel_clock(7) # Check the pixel clock c.get_pixel_clock() # Set exposure c.set_exposure(50) # Check current exposure c.get_exposure()Live image display Feel free to customize the example code for live image display# Live image display for fine-tuning the camera parameters fig, ax = plt.subplots(figsize=(7, 7)) plt.close(fig) # live image display with exception handling try: while True: ax.imshow(c.capture().copy(), clim=[0, 1023], cmap='gray') clear_output(wait=True) display(fig) except KeyboardInterrupt: # interrupt python kernel (press i twice in command mode) to stop image capture print('Live image display has stopped') finally: print('Statement here will be executed even if an exception occurs in the try block')Seting up the imaging Region-of-Interest (ROI) You don't always have to capture the full-field image, feel free to narow down to a specific ROI and focus on your target of interest **Attention**: Keep in mind that raw measurement is **the transpose of your camera view** due to different memory order in camera and in Python. It's a little bit confusing, but always remember it when setting up ROI# Set the imaging ROI (in raw measurement view) roi_shape = [120,120] # ROI size MUST BE MULTIPLE OF 4 roi_pos = [1000,500] # The location of top-left corner of AOI on axis 0 and 1; # Full size is (1280,1024) c.set_roi(roi_shape, roi_pos)Always visualize the captured image to check if the ROI is what you desired######################################## ############ YOUR CODE #################Close the camera# Close the camera c.close()Procesamiento Digital de Señales Primeras pruebas hacia el análisis espectral Comenzamos a tomar contacto con la transformada discreta de Fourier (DFT) y su implementación eficiente la FFT. Aprovechamos la oportunidad para presentar una aplicación de los notebooks de Jupyter y su potencial para presentar resultados de forma ordenada y elegante.# Módulos para Jupyter import warnings warnings.filterwarnings('ignore') import numpy as np import matplotlib as mpl #%% Inicialización de librerías # Setup inline graphics: Esto lo hacemos para que el tamaño de la salida, # sea un poco más adecuada al tamaño del documento mpl.rcParams['figure.figsize'] = (14,7) # Módulos para mi script propiamente dicho import numpy as np import matplotlib.pyplot as plt from scipy.fftpack import fft from pdsmodulos import print_markdown, print_subtitle, print_latexPodemos intercalar bloques de texto y código casi sin restricciones. En este caso el código de inicialización lo dejamos resuelto en el bloque anterior.nn = 1000 fs = 1000 tt = np.arange(0.0, nn/fs, 1/fs) ff = np.arange(0.0, fs, nn/fs) # ahora podemos simular que los canales están desconectados, # o que una señal de ruido blanco, normalmente distribuido ingresa al ADC. canales_ADC = 1 a0 = 1 # Volt f0 = nn/4 * fs/nn # dd = np.sin(2*np.pi*f0*tt) dd = np.random.uniform(-np.sqrt(12)/2, +np.sqrt(12)/2, size = [nn,canales_ADC]) # dd = np.random.normal(0, 1.0, size = [N,canales_ADC]) DD = fft( dd, axis = 0 ) bfrec = ff <= fs/2 plt.figure() plt.plot( ff[bfrec], np.abs(DD[bfrec]) ) plt.ylabel('Módulo [¿Unidades?]') plt.xlabel('Frecuencia [Hz]') plt.figure() plt.plot( ff[bfrec], np.abs(DD[bfrec])**2 ) plt.ylabel('Densidad de Potencia [¿Unidades?]') plt.xlabel('Frecuencia [Hz]') plt.show()Teorema de ParsevalPara practicar te dejo las siguientes consignas:1. Editá este notebook y agregá una breve explicación de cómo aplicarías el teorema de Parseval a las señales que te presento más arriba en este mismo notebook.2. Escribí la ecuación del teorema en Latex, podés copiarla de la bibliografía.$ \sum\limits_{n=0}^{N-1} ?? = \frac{1}{N} \sum\limits_{k=0}^{N-1} ?? $3. En un bloque de código, verificá que dicho teorema se cumple, con alguna experimentación con señales que vos generes.# Algo que podría resultarte útil alguna vez es generar Markdown en tu propio código, tal vez # para presentar una tabla, resultado, etc. Acá te dejo unos ejemplos print_subtitle('Teorema de Parseval (generado dinámicamente desde código)') print_markdown('Te dejo unas funciones que te pueden servir si alguna vez quisieras generar Markdown desde tus scripts.') # ojo que la "r" antes del string es IMPORTANTE! print_latex(r'\sigma^2 = \frac{s+2}{p+1}') ## Escribí tu respuesta a partir de aquí ...Gift Recommender Engine: Evaluation Use celebrities Tweets to see their interests. Import Librariesimport numpy as np import pandas as pd df = pd.read_csv('datasets/twitter-profiles/report_barackobama/tweets.csv') df.head()Importing Modelsimport pickle # Naive Bayes Model filename = open('models/nb_baseline2.sav', 'rb') nb = pickle.load(filename) # Support Vector Classifier Model filename = open('models/linear_svc_baseline2.sav', 'rb') ovr_svc = pickle.load(filename) # Import Vectorizer filename = open('models/tfidf_vectorizer2.sav', 'rb') tfidf_model = pickle.load(filename) # Import Reference Dictionary filename = open('models/reference-dict.pickle', 'rb') ref = pickle.load(filename) import re import string import nltk import spacy from nltk.probability import FreqDist stopwords = nltk.corpus.stopwords.words('english') stopwords.extend(['im', "oh", "i'm", "lol", "gonna", 'ill']) nlp = spacy.load('en_core_web_sm') def spacy_lemmatize(text): if type(text) == list: doc = nlp(u"{}".format(' '.join(text))) else: doc = nlp(u"{}".format(text)) lemmatized = list() for token in doc: lemmatized.append(token.lemma_) return lemmatized def deEmojify(text): regrex_pattern = re.compile(pattern = "[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) "]+", flags = re.UNICODE) return regrex_pattern.sub(r'',text) def preprocess(text): text=re.sub(r'http\S+', '',text) text = re.sub('@[^\s]+','',text) text = re.sub('</?[a-z]+>', '', text) text = text.replace('&', '&') text = re.sub(r"[^\w\s]", "", text) text = deEmojify(text) text = text.split() #split into list #text = [re.sub(r'^https?:\/\/.*[\r\n]*', '', s, flags=re.MULTILINE) for s in text] #remove any links #text = [re.sub('@[^\s]+','', s) for s in text] #remove @ text = [s.lower() for s in text] #convert every character into lowercase #text = [re.sub(rf"[{string.punctuation}]", " ", s) for s in text] #remove punctuations text = [re.sub(r'[0-9]', ' ', s) for s in text] #remove all digits text = ' '.join(text) #resplits text = [s for s in text.split() if len(s) >= 2] #removes words with one word length text = [s for s in text if s not in stopwords] #remove all stopwords text = ' '.join(spacy_lemmatize(text)) #lemmatize text using spacy and join into a string text = ' '.join([s for s in text.split() if len(s) > 2]) return text df['clean-tweets'] = df['Tweet Content'].map(preprocess) tweets = df[['Tweet Content', 'clean-tweets']].rename(columns={'Tweet Content': 'tweet'}) all_words = ' '.join([char for char in tweets['clean-tweets'].to_list()]).split() tweet_length = tweets['tweet'].apply(lambda x: len(x.split())).to_list() clean_tweet_length = tweets['clean-tweets'].apply(lambda x: len(x.split())).to_list() tweets['tweet-len'] = tweet_length tweets['clean-len'] = clean_tweet_length tweets = tweets[tweets['clean-len'] >= 5] from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() tweets['vader-sentiment'] = tweets['tweet'].apply(lambda x: analyzer.polarity_scores(x)) tweets['vader-pos'] = tweets['vader-sentiment'].apply(lambda x: x['pos']) tweets['vader-neu'] = tweets['vader-sentiment'].apply(lambda x: x['neu']) tweets['vader-neg'] = tweets['vader-sentiment'].apply(lambda x: x['neg']) tweets['vader-compound'] = tweets['vader-sentiment'].apply(lambda x: x['compound']) tweets_filtered = tweets[tweets['vader-compound'] >= 0.6] tweets_filtered.shape clean_tweets = tweets_filtered['clean-tweets'].to_list() nb_topic_pred = list() svc_topic_pred = list() for tweet in clean_tweets: nb_topic = ref[nb.predict(tfidf_model.transform([tweet]))[0]] nb_topic_pred.append(nb_topic) svc_topic = ref[ovr_svc.predict(tfidf_model.transform([tweet]))[0]] svc_topic_pred.append(svc_topic) nb_series = pd.Series(nb_topic_pred).value_counts()[:3] nb_series svc_series = pd.Series(svc_topic_pred).value_counts()[:3] svc_series # TRAIN LDA ON AMAZON DATASET --> USE LDA MODEL ON TWEETS TO IDENTIFY KEYWORDS --> INPUT TO CLASSIFIERPython LibrariesWe will be using a several different libraries throughout this course. If you've successfully completed the [installation instructions](https://github.com/cs109/content/wiki/Installing-Python), all of the following statements should run.#IPython is what you are using now to run the notebook import jupyter #print ("Jupyter version: %6.6s (need at least 1.0)" % jupyter.__version__) # Numpy is a library for working with Arrays import numpy as np print ("Numpy version: %6.6s (need at least 1.7.1)" % np.__version__) # SciPy implements many different numerical algorithms import scipy as sp print ("SciPy version: %6.6s (need at least 0.12.0)" % sp.__version__) # Pandas makes working with data tables easier import pandas as pd print ("Pandas version: %6.6s (need at least 0.11.0)" % pd.__version__) # Module for plotting import matplotlib print ("Mapltolib version: %6.6s (need at least 1.2.1)" % matplotlib.__version__) # SciKit Learn implements several Machine Learning algorithms import sklearn print ("Scikit-Learn version: %6.6s (need at least 0.13.1)" % sklearn.__version__) # Requests is a library for getting data from the Web import requests print ("requests version: %6.6s (need at least 1.2.3)" % requests.__version__) # Networkx is a library for working with networks import networkx as nx print ("NetworkX version: %6.6s (need at least 1.7)" % nx.__version__) #BeautifulSoup is a library to parse HTML and XML documents import beautifulSoup print ("BeautifulSoup version:%6.6s (need at least 3.2)" % BeautifulSoup.__version__) #MrJob is a library to run map reduce jobs on Amazon's computers import mrjob print ("Mr Job version: %6.6s (need at least 0.4)" % mrjob.__version__) #Pattern has lots of tools for working with data from the internet import pattern print ("Pattern version: %6.6s (need at least 2.6)" % pattern.__version__)Numpy version: 1.12.1 (need at least 1.7.1) SciPy version: 0.19.0 (need at least 0.12.0) Pandas version: 0.20.1 (need at least 0.11.0) Mapltolib version: 2.0.2 (need at least 1.2.1) Scikit-Learn version: 0.18.1 (need at least 0.13.1) requests version: 2.14.2 (need at least 1.2.3) NetworkX version: 1.11 (need at least 1.7)If any of these libraries are missing or out of date, you will need to [install them](https://github.com/cs109/content/wiki/Installing-Pythoninstalling-additional-libraries) and restart IPython Hello matplotlib The notebook integrates nicely with Matplotlib, the primary plotting package for python. This should embed a figure of a sine wave:#this line prepares IPython for working with matplotlib %matplotlib inline # this actually imports matplotlib import matplotlib.pyplot as plt x = np.linspace(0, 10, 30) #array of 30 points from 0 to 10 y = np.sin(x) z = y + np.random.normal(size=30) * .2 plt.plot(x, y, 'ro-', label='A sine wave') plt.plot(x, z, 'b-', label='Noisy sine') plt.legend(loc = 'lower right') plt.xlabel("X axis") plt.ylabel("Y axis")If that last cell complained about the `%matplotlib` line, you need to update IPython to v1.0, and restart the notebook. See the [installation page](https://github.com/cs109/content/wiki/Installing-Python) Hello NumpyThe Numpy array processing library is the basis of nearly all numerical computing in Python. Here's a 30 second crash course. For more details, consult Chapter 4 of Python for Data Analysis, or the [Numpy User's Guide](http://docs.scipy.org/doc/numpy-dev/user/index.html)print ("Make a 3 row x 4 column array of random numbers") x = np.random.random((3, 4)) print (x) print print ("Add 1 to every element") x = x + 1 print (x) print print ("Get the element at row 1, column 2") print (x[1, 2]) print # The colon syntax is called "slicing" the array. print ("Get the first row") print (x[0, :]) print print ("Get every 2nd column of the first row") print (x[0, ::2]) printMake a 3 row x 4 column array of random numbers [[ 0.80596117 0.58085638 0.2137176 0.55817447] [ 0.71444621 0.56043767 0.33289552 0.11722417] [ 0.71265339 0.78697654 0.81669209 0.65359971]] Add 1 to every element [[ 1.80596117 1.58085638 1.2137176 1.55817447] [ 1.71444621 1.56043767 1.33289552 1.11722417] [ 1.71265339 1.78697654 1.81669209 1.65359971]] Get the element at row 1, column 2 1.33289552396 Get the first row [ 1.80596117 1.58085638 1.2137176 1.55817447] Get every 2nd column of the first row [ 1.80596117 1.2137176 ]Print the maximum, minimum, and mean of the array. This does **not** require writing a loop. In the code cell below, type `x.m`, to find built-in operations for common array statistics like this#your code hereCall the `x.max` function again, but use the `axis` keyword to print the maximum of each row in x.#your code hereHere's a way to quickly simulate 500 coin "fair" coin tosses (where the probabily of getting Heads is 50%, or 0.5)x = np.random.binomial(500, .5) print "number of heads:", xRepeat this simulation 500 times, and use the [plt.hist() function](http://matplotlib.org/api/pyplot_api.htmlmatplotlib.pyplot.hist) to plot a histogram of the number of Heads (1s) in each simulation#your code hereThe Monty Hall ProblemHere's a fun and perhaps surprising statistical riddle, and a good way to get some practice writing python functionsIn a gameshow, contestants try to guess which of 3 closed doors contain a cash prize (goats are behind the other two doors). Of course, the odds of choosing the correct door are 1 in 3. As a twist, the host of the show occasionally opens a door after a contestant makes his or her choice. This door is always one of the two the contestant did not pick, and is also always one of the goat doors (note that it is always possible to do this, since there are two goat doors). At this point, the contestant has the option of keeping his or her original choice, or swtiching to the other unopened door. The question is: is there any benefit to switching doors? The answer surprises many people who haven't heard the question before.We can answer the problem by running simulations in Python. We'll do it in several parts.First, write a function called `simulate_prizedoor`. This function will simulate the location of the prize in many games -- see the detailed specification below:""" Function -------- simulate_prizedoor Generate a random array of 0s, 1s, and 2s, representing hiding a prize between door 0, door 1, and door 2 Parameters ---------- nsim : int The number of simulations to run Returns ------- sims : array Random array of 0s, 1s, and 2s Example ------- >>> print simulate_prizedoor(3) array([0, 0, 2]) """ def simulate_prizedoor(nsim): #compute here return answer #your code hereNext, write a function that simulates the contestant's guesses for `nsim` simulations. Call this function `simulate_guess`. The specs:""" Function -------- simulate_guess Return any strategy for guessing which door a prize is behind. This could be a random strategy, one that always guesses 2, whatever. Parameters ---------- nsim : int The number of simulations to generate guesses for Returns ------- guesses : array An array of guesses. Each guess is a 0, 1, or 2 Example ------- >>> print simulate_guess(5) array([0, 0, 0, 0, 0]) """ #your code hereNext, write a function, `goat_door`, to simulate randomly revealing one of the goat doors that a contestant didn't pick.""" Function -------- goat_door Simulate the opening of a "goat door" that doesn't contain the prize, and is different from the contestants guess Parameters ---------- prizedoors : array The door that the prize is behind in each simulation guesses : array THe door that the contestant guessed in each simulation Returns ------- goats : array The goat door that is opened for each simulation. Each item is 0, 1, or 2, and is different from both prizedoors and guesses Examples -------- >>> print goat_door(np.array([0, 1, 2]), np.array([1, 1, 1])) >>> array([2, 2, 0]) """ #your code hereWrite a function, `switch_guess`, that represents the strategy of always switching a guess after the goat door is opened.""" Function -------- switch_guess The strategy that always switches a guess after the goat door is opened Parameters ---------- guesses : array Array of original guesses, for each simulation goatdoors : array Array of revealed goat doors for each simulation Returns ------- The new door after switching. Should be different from both guesses and goatdoors Examples -------- >>> print switch_guess(np.array([0, 1, 2]), np.array([1, 2, 1])) >>> array([2, 0, 0]) """ #your code hereLast function: write a `win_percentage` function that takes an array of `guesses` and `prizedoors`, and returns the percent of correct guesses""" Function -------- win_percentage Calculate the percent of times that a simulation of guesses is correct Parameters ----------- guesses : array Guesses for each simulation prizedoors : array Location of prize for each simulation Returns -------- percentage : number between 0 and 100 The win percentage Examples --------- >>> print win_percentage(np.array([0, 1, 2]), np.array([0, 0, 0])) 33.333 """ #your code hereNow, put it together. Simulate 10000 games where contestant keeps his original guess, and 10000 games where the contestant switches his door after a goat door is revealed. Compute the percentage of time the contestant wins under either strategy. Is one strategy better than the other?#your code heredownload the necessary libraries!pip install tokenizers !pip install transformersCollecting tokenizers Downloading tokenizers-0.11.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (6.8 MB)  |████████████████████████████████| 6.8 MB 4.8 MB/s [?25hInstalling collected packages: tokenizers Successfully installed tokenizers-0.11.2 Collecting transformers Downloading transformers-4.15.0-py3-none-any.whl (3.4 MB)  |████████████████████████████████| 3.4 MB 5.4 MB/s [?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.4.2) Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.62.3) Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0) Collecting pyyaml>=5.1 Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)  |████████████████████████████████| 596 kB 71.6 MB/s [?25hCollecting huggingface-hu[...]import librariesimport tensorflow as tf import os import tokenizers from tokenizers.models import BPE from tokenizers import Tokenizer from tokenizers.decoders import ByteLevel as ByteLevelDecoder from tokenizers.normalizers import Sequence from tokenizers.pre_tokenizers import ByteLevel from tokenizers.trainers import BpeTrainer from transformers import GPT2Config, TFGPT2LMHeadModel, GPT2Tokenizer from pathlib import Pathclass with all the tokenization steps to process all the input filesclass BPE_token(object): def __init__(self): self.tokenizer = Tokenizer(BPE()) # self.tokenizer.normalizer = Sequence([ # NFKC() # ]) self.tokenizer.pre_tokenizer = ByteLevel() self.tokenizer.decoder = ByteLevelDecoder() def bpe_train(self, paths): trainer = BpeTrainer(vocab_size=25000, show_progress=True, inital_alphabet=ByteLevel.alphabet(), special_tokens=[ "", "", "", "", "" ]) self.tokenizer.train(trainer = trainer, files = paths) def save_tokenizer(self, location, prefix=None): if not os.path.exists(location): os.makedirs(location) self.tokenizer.model.save(location, prefix)read all the input files# the folder 'text' contains all the files paths = [str(x) for x in Path("./gdrive/MyDrive/project/data/").glob("**/*processed.txt")] pathsperform tokenizationtokenizer = BPE_token() # train the tokenizer model tokenizer.bpe_train(paths) # saving the tokenized data in our specified folder save_path = 'gdrive/MyDrive/project/model' tokenizer.save_tokenizer(save_path)load and config GPT-2# loading tokenizer from the saved model path tokenizer = GPT2Tokenizer.from_pretrained(save_path) tokenizer.add_special_tokens({ "eos_token": "", "bos_token": "", "unk_token": "", "pad_token": "", "mask_token": "" }) # creating the configurations from which the model can be made config = GPT2Config( vocab_size=tokenizer.vocab_size, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id ) # creating the model model = TFGPT2LMHeadModel(config)file gdrive/MyDrive/project/model/config.json not foundmerge all the fairytales in one stringsingle_string = '' for filename in paths: with open(filename, "r", encoding='utf-8') as f: x = f.read() single_string += x + tokenizer.eos_token string_tokenized = tokenizer.encode(single_string)create the input dataset for the modelexamples = [] block_size = 100 BATCH_SIZE = 12 BUFFER_SIZE = 1000 for i in range(0, len(string_tokenized) - block_size + 1, block_size): examples.append(string_tokenized[i:i + block_size]) inputs, labels = [], [] for ex in examples: inputs.append(ex[:-1]) labels.append(ex[1:]) dataset = tf.data.Dataset.from_tensor_slices((inputs, labels)) dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)define the training hyperparameters# defining our optimizer # clipnorm: # Gradient norm scaling involves changing the derivatives of the loss function # to have a given vector norm when the L2 vector norm (sum of the squared values) # of the gradient vector exceeds a threshold value. For example, we could specify # a norm of 1.0, meaning that if the vector norm for a gradient exceeds 1.0, then # the values in the vector will be rescaled so that the norm of the vector equals 1.0. # epsilon: avoid zero division optimizer = tf.keras.optimizers.Adam(learning_rate=6e-5, epsilon=1e-08, clipnorm=1.0) # definining our loss function loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # defining our metric which we want to observe metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy') # compiling the model model.compile(optimizer=optimizer, loss=[loss, *[None] * model.config.n_layer], metrics=[metric])train the modelnum_epoch = 35 checkpoint_filepath = 'gdrive/MyDrive/project/model/checkpoint' model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_filepath) # Model weights are saved at the end of every epoch, if it's the best seen # so far. history = model.fit(dataset, epochs=num_epoch, callbacks=[model_checkpoint_callback]) # The model weights (that are considered the best) are loaded into the model. # model.load_weights(checkpoint_filepath) #save_path = 'gdrive/MyDrive/project/model' model.save_pretrained(save_directory = save_path, save_config = True)load weights and continue the training processloaded_model = model.load_weights(checkpoint_filepath) num_epoch = 5 checkpoint_filepath = 'gdrive/MyDrive/project/model/checkpoint' history2 = model.fit(dataset, epochs=num_epoch, callbacks=[model_checkpoint_callback]) save_path = 'gdrive/MyDrive/project/model' model.save_pretrained(save_directory = save_path, save_config = True)generate short storiestext = "Ο νεαρός βοσκός ήταν πολύ στεναχωρημένος" # encoding the input text input_ids = tokenizer.encode(text, return_tensors='tf') # getting out output beam_output = model.generate( input_ids, max_length = 500, num_beams = 10, #temperature = 0.1, no_repeat_ngram_size=1, num_return_sequences=10, repetition_penalty=1.5, skip_special_tokens = True, clean_up_tokenization = True, early_stopping = True ) print(tokenizer.decode(beam_output[0]))Setting `pad_token_id` to 2 (first `eos_token_id`) to generate sequencePart 5: Application using the Discretized Misfit calculationBy now, you should have looked through [Part 1](IntroductionToMetric.ipynb), [Part 2](IntroductionToResidual.ipynb) of the introductory notebook series, and [Part 3](OtherIO_options.ipynb) of the introductory notebook series. These introduced the umami `Metric` and `Residual` classes. You should have also worked through [Part 4](ExampleApplication.ipynb), which provides an example application of umami. ScopeSimilar to [Part 4](ExampleApplication.ipynb), this application will use umami alongside the [terrainbento](https://terrainbento.readthedocs.io/en/latest/) package. As in the prior example, we will define a "synthetic truth" model evaluation with a specific set of input parameters, and then do a grid search letting some of those parameters vary. In this way we will explore which statistics for model-data comparison do best at identifying the "true" parameters. This application focuses on the least intuitive of the umami calculations: the [`discretized_misfit`](https://umami.readthedocs.io/en/latest/umami.calculations.residual.discretized_misfit.html). If you have comments or questions about the notebooks, the best place to get help is through [GitHub Issues](https://github.com/TerrainBento/umami/issues).To begin, we import necessary modules.import warnings warnings.filterwarnings('ignore') from io import StringIO from itertools import product from tqdm import tqdm import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt %matplotlib inline from plotnine import * import holoviews as hv hv.notebook_extension('matplotlib') from landlab import imshow_grid from terrainbento import Basic from umami import Metric, ResidualWe begin by defining an input string that defines the terrainbento model.It evolves topography using stream power and linear diffusion. In our application it has a boundary condition of uniform uplift across the core of the model grid. It thus has the following governing equation:$\frac{\partial \eta}{\partial t} = U - KQ^{1/2}S + D\nabla^2 \eta$where $K$ and $D$ are constants, $Q$ is discharge, $S$ is local slope, $U$ is the uplift rate, and $\eta$ is the topography. See the [model Basic documentation](https://terrainbento.readthedocs.io/en/latest/source/terrainbento.derived_models.model_basic.html) for additional information. In this input file we also indicate that the model will run with timesteps of 500 yr and the model grid will have a shape of (50, 80), with grid cell spacing of 100 m. For this application, the model initial condition is for core nodes to have random noise added.A few places in the input file have curly braces around a name. * Two inputs parameters have curly brackets around them: `{duration}` and `{water_erodibility}`. These inputs will be modified using [`str.format`](https://docs.python.org/3/library/stdtypes.htmlstr.format) to set the "truth" model run and to vary the parameters in a grid search numerical experiment. * We also format the `{name}` of output files in order to prevent Windows file permissions errors.spec_string = """ # Create the Clock. clock: start: 0 step: 500 stop: {duration} # Create the Grid grid: RasterModelGrid: - [100, 120] - xy_spacing: 50 - fields: node: topographic__elevation: random: where: CORE_NODE # Set up Boundary Handlers boundary_handlers: NotCoreNodeBaselevelHandler: modify_core_nodes: True lowering_rate: -{lowering_rate} # Parameters that control output. output_interval: 1e3 save_first_timestep: True output_prefix: disc_resid.{name}. fields: - topographic__elevation # Parameters that control process and rates. water_erodibility: {water_erodibility} m_sp: 0.5 n_sp: 1.0 regolith_transport_parameter: 0.1 """Next we instantiate the "truth" model and run it.truth_duration = 3e4 truth_water_erodibility = 0.0005 lowering_rate = 100 / truth_duration truth_params = StringIO( spec_string.format(duration=truth_duration, water_erodibility=truth_water_erodibility, lowering_rate=lowering_rate, name="truth")) np.random.seed(42) truth = Basic.from_file(truth_params) truth.run()The [holoviews](https://holoviews.org) package provides capabilities to visualize the model run.ds = truth.to_xarray_dataset(time_unit='years', space_unit='meters') hvds_topo = hv.Dataset(ds.topographic__elevation) topo = hvds_topo.to(hv.Image, ['x', 'y'], label='Truth').options(interpolation='bilinear', cmap='viridis', colorbar=True) topoAs the center nodes are uplifted, a series of ridges and drainage basins form. This model has not yet reached "topographic steady state", in which $\frac{\partial \eta}{\partial t}>\epsilon$ (and $\epsilon$ is small) everywhere. Before moving on, we close the xarray dataset and remove the output netcdf files.ds.close() truth.remove_output_netcdfs()Step 2: Define the basis for model-data comparisonThe discretized_misfit takes the following parameters: Parameters ---------- model_grid : Landlab model grid data_grid : Landlab model grid name : str misfit_field : str field_1 : str field_2 : str field_1_percentile_edges : list field_2_percentile_edges : list This calculation first classifies each grid cell in the landscape into categories based on `field_1`, `field_2` and the percentile edges for each (using the data grid). This results in a set of categories, which may or may not be contiguous in space. For each category, the sum of squared residuals is calculated based on the misfit_field.Since this calculation returns one value for each category, rather than one value in total, a `name` must be provided. This is a string that will be formatted with the values for `{field_1_level}` and `{field_2_level}`. The output is an ordered dictionary with name as the keys, and the sum of squares misfit as the values.The following is the input file (as string) needed to specify a `discretized_misfit` in which the domain is discretized based on `channel__chi_index` (three percentile levels defined by `[0, 30, 60, 100]`), and `topographic__elevation` (two percentile levels defined by `[0, 50, 100]`). Within each of the six category domains, a misfit is made based on the field `topographic__elevation`. Below we will show a plot indicating where each category is located, but first we will specify the numerical experiment.residual_string = """ dm: _func: discretized_misfit name: chi_{field_1_level}.z_{field_2_level} misfit_field: topographic__elevation field_1: channel__chi_index field_2: topographic__elevation field_1_percentile_edges: - 0 - 30 - 60 - 100 field_2_percentile_edges: - 0 - 50 - 100 """Step 3: Create and run the grid search experimentWe will use a grid search to highlight how the misfit values in the `discretized_residual` calculated by umami vary across parameter space. We consider values for `duration` between $10^{3}$ and $10^{5}$ and values for $K$ (`water_erodibility`) between $10^{-4}$ and $10^{-2}$.With a resolution of 10, we evaluate $10^2=100$ simulations. Feel free to change the resolution value, though note that it will impact the run time of this notebook.resolution = 10 durations = np.logspace(3, 5, num=resolution) water_erodibilitys = np.logspace(-4, -2, num=resolution)We evaluate each pair of duration and water erodability and save the model output as a dictionary. With the line np.random.seed(42)commented out, each evaluation uses a different random seed. Feel free to uncomment this line to see how the results change if the *exact same* random seed is used for each model integration.out = {} for i, (duration, water_erodibility) in enumerate(tqdm(product(durations, water_erodibilitys))): lowering_rate = 100 / duration test_params = StringIO( spec_string.format(duration=duration, water_erodibility=water_erodibility, lowering_rate=lowering_rate, name=i)) #np.random.seed(42) test = Basic.from_file(test_params) test.run() test.remove_output_netcdfs() residual = Residual(test.grid, truth.grid, chi_finder_kwds={"min_drainage_area": 1000}) residual.add_from_file(StringIO(residual_string)) residual.calculate() values = {name: residual.value(name) for name in residual.names} out[(duration, water_erodibility)] = valuesBefore looking at the results, let's inspect the residual class. The property `residual.names` has a name for each of the six categories. The temporary strings `{field_1_level}` and `{field_2_level}` have been replaced with actual levels.residual.namesThe property `residual.values` has one value for each of the six categories.residual.valuesWe can plot the category using the `residual.category` property. Here each category gets its own panel. For example, the leftmost column represents the cells with `channel__chi_index` values in the lower 30%. The upper left panel is those that have the lower half of elevation values.fig, axes = plt.subplots(2, 3, dpi=150) for i, name in enumerate(residual.names): col, row = np.unravel_index(i, (3,2)) plt.sca(axes[row, col]) imshow_grid(truth.grid, residual.category==(i+1), cmap="cividis", allow_colorbar=False) plt.title(name) plt.xticks([]) plt.yticks([]) plt.xlabel(None) plt.ylabel(None) plt.tight_layout()Next we compile the output into a pandas dataframe and inspect.df = pd.DataFrame.from_dict(out, orient="index") df.index.names = ["duration", "water_erodibility"] df.head()Similar to the earlier notebook, we melt this dataframe in order to plot it. We also rename the column "value" to "sum_of_squared_residuals", as this is what `discretized_misfit` calculates.df_melt = df.reset_index().melt(id_vars=["duration", "water_erodibility"]) df_melt = df_melt.rename(columns={"value": "sum_of_squared_residuals"}) df_melt.head()We also use the [string.split](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.split.html) pandas function in order to turn our variable name into two columnsvariable_split = df_melt["variable"].str.split(".", n=1, expand=True) df_melt["chi"] = variable_split[0] df_melt["elev"] = variable_split[1] df_melt.head()Finally we plot the squared residual as a function:p1 = (ggplot(df_melt, aes(x="duration", y="water_erodibility", fill="sum_of_squared_residuals")) + geom_tile() + geom_point(aes(x=truth_duration, y=truth_water_erodibility)) + scale_fill_continuous(limits=[1, 100], trans="log10") + facet_grid("elev~chi") + theme_bw() + scale_x_log10() + scale_y_log10() + coord_equal()) print(p1)Objects Introduction You will read more than once that C++ is an **object oriented** programming language. This is, C++ has the ability to deal with **object** and **classes** (they are equivalent names for the same thing). An **object** is just data and operations you can perform with that data that are together within the same infrastructure. As an example, imagine we have a store, and that we want to write something to check the stock. In our store, we have several things we sell, but each thing has several properties in comon with different values:- Name- Price- Number of items availableDealing with objects in the store would be annoying without classes. If we have a **class** called `item`, we can store information about each type of item, which is the price, and how many items we have left. Actually, during this workshop, you have already used classes without knowing it, such as `vector` or `string`. In C++, do define a class is as simple as:```c++class Item { public: // Mutators or Setters void SetName(std::string name); void SetPrice(double price); void SetNumItems(int num_items); // Accessors or Getters std::string GetName(); double GetPrice(); int GetNumItems(); private: // Class members std::string name_; double price_; int num_items_;};```Let's analyze this step by step. ```c++class Item {```This first part sets the name of the class. The name of a class should be capitalized, without spaces/underscores, and if composed by several names, capitalize only the first letter of each word. ```c++ public:```This `public:` means that once you create the class, you will be able to access everything that is after it. An example would be the `size()` subrouting of a vector or a string. You create the `vector` and then you can access the function `size()`.```c++ // Mutators or Setters void SetName(std::string name); void SetPrice(double price); void SetNumItems(int num_items);```The **mutators** or **setters** are member functions or subroutines in your class that allow to modify variables of the class. In this example, we have 3 different functions that set three different variables. If a function is a setter, you usually start the function name with `Set...`, but there can be other type of functions that access the data like a print, calculate, ... ```c++ // Accessors or Getters std::string GetName(); double GetPrice(); int GetNumItems();```The **accessors** or **getters** are functions that allow to obtain the content of variables of the class. Usually, you start them with `Get...`. ```c++ private:```The `private:` keyword makes everything that is after it not accessible from the outside. **It is usually a good practice to make all the member variables private and only allow their modification via setters and getters**. Sometimes, one might add some private helper functions to the class. Not only the standard variables can be private, but there can also be functions or subroutines that are private.```c++ // Class members std::string name_; double price_; int num_items_;};```Finally, we have the **class member variables**. These variables should be private, and they contain the information of the class. Note that all the variables that belong to the class end with `_`. This is a common practice to differenciate variables that belong to the class from variables defined and declared in the class functions. It is not mandatory, but strongly recommended to improve readability of the code. In the class previously defines, we have declared the functions, but we have not defined them. Usually, we make the class declaration in the header file, and the class definition in the source file. The header file should contain all the information about the class, its member functions and its member variables, properly commented and documented. Let's look at the following example, which creates the item class and uses it in the main:!gedit src/item_v1.h !gedit src/item_v1.cpp !gedit src/main_item_v1.cpp !g++ -o item_v1.exe src/main_item_v1.cpp src/item_v1.cpp !./item_v1.exeExamples As an exercise, add a member function called `PrintItem()`, so it prints all the information of a given item. Modify the main function accordingly.!gedit src/item_v2.h !gedit src/item_v2.cpp !gedit src/main_item_v2.cpp !g++ -o item_v2.exe src/main_item_v2.cpp src/item_v2.cpp !g++ -o item_v2_sol.exe src/main_item_v2_sol.cpp src/item_v2_sol.cpp print("Your answer:") !./item_v2.exe print("\nSolution:") !./item_v2_sol.exeConstructors When we declare a class, none of its member variables are initialized unless we specify it. What is actually happening is that the code is calling the **default constructor**, which declares all the variables in the class, but does nothing else. It is a common practice **and strongly recommended** to write always a **constructor** of the class, even if it is an emty function. Important points about constructors:- The constructor is called automatically when a variable of that class type is defined, and can be used to initialize data members.- It has the same name as the class.- It doesnt have return statement.- A constructor without any arguments is called the **default constructor**, and should initialize all data members.- If there is not a constructor explicitly defined, the compiler will define it without any arguments and any statements.Let's add a constructor in the class we previously created:!gedit src/item_v3.h !gedit src/item_v3.cpp !gedit src/main_item_v3.cpp !g++ -o item_v3.exe src/main_item_v3.cpp src/item_v3.cpp !./item_v3.exeConstructor Overloading We saw in the last lesson that C++ is able to overload functions, i.e., have two functions with the same name but with different arguments. Same happens with the constructor. Is not mandatory, but sometimes is nice to give the user the chance to initialize the class members directly from the constructor. As an example, in our item class, we have created a default constructor that initializes all the member variables to a value. Similar things happen for classes that you have already used, like `vector` or `string`. ```c++std::string s; // Declares a string s, and initializes it to "" and with size 0.std::string s("something"); // Creates a string s, but when creating it, it also fills it with "something"```We want to do something similar with our class. In this case, we want to be able to initialize the class with values for price, name and number of items. Thus, we will overload the constructor to also have that option.!gedit src/item_v4.h !gedit src/item_v4.cpp !gedit src/main_item_v4.cpp !g++ -o item_v4.exe src/main_item_v4.cpp src/item_v4.cpp !./item_v4.exeDestructors When we create an object we need the constructors, either the default constructor or the overloaded one. In the constructor, or in some modifier functions, we migh perform memory allocations, that unless we free them, will yield memory errors. In this lesson is not crutial to talk about them, but is always a good practice to add the **destructors** in your classes. - The destructors are defines always by the class name preceeded by `~`.- They should free all the memory and make sure there is not a single bit allocated after the call to the destructor.In our Item class, since we don't need any statement in the destructor, we would not deed to explicitly write it. However, **it is good practice to ALWAYS write a destructor, even if is an empty function**. Let's add the destructor to our Item class. In the constructors and destructors, there is a print statement that will help us see when that is called.!gedit src/item_v5.h !gedit src/item_v5.cpp !gedit src/main_item_v5.cpp !g++ -o item_v5.exe src/main_item_v5.cpp src/item_v5.cpp !./item_v5.exeOperator Overloading It will happen that sometimes we need to "add", "multiply", or compare classes. The standart operators `+`,`-`,`*`,`==`,... are defined for the default variable types, but not for the user defined classes. A solution is to write the actual definition of the operator, known as **operator overloading**. The following code contains a class that has two double numbers for feet and inches. We can define an addition operator that will add the two variables:!gedit src/operator_overload_01.cpp !g++ -o operator_overload_01 src/operator_overload_01.cpp !./operator_overload_01As an example, overload the operators `` so the main function passes the assertions. Do not modify the main function!!gedit src/operator_overload_02.cpp !g++ -o operator_overload_02_sol src/operator_overload_02_sol.cpp !g++ -o operator_overload_02 src/operator_overload_02.cpp print("Your answer:") !./operator_overload_02 print("\nSolution:") !./operator_overload_02_solProblems Problem 1 Create a molecule class, and without modifying the `main.cpp` file (just uncomment the statements once your class is ready), make it compile and pass the assertions. The molecule class should have the following public member functions:- Default constructor clearing the vectors, and setting number of atoms to -1- Overloaded constructor that creates the molecule passing as argument **ONLY** the XYZ vector (arg1) and the atom names vector (arg2)- Destructor- Function that sets/gets the xyz (given a vector of double) (Set/GetXyx)- Function that sets/gets the atom names (given a vector of strings) (Set/GetAtNames)- Function that sets/gets the number of atoms in the molecule (Set/GetNumAts)And the following private member variables- A double vector with the coordinates- A string vector with the atom names- An integer with the number of atomsEdit the `molecule.cpp` and `molecule.h` to implement your class, and then uncomment all the commented statements in `main.cpp`!gedit src/problem_01/molecule.cpp !gedit src/problem_01/molecule.h !gedit src/problem_01/main.cpp !g++ -std=c++11 -o main.exe src/problem_01/main.cpp src/problem_01/molecule.cpp !g++ -std=c++11 -o main_sol.exe src/problem_01_sol/main_sol.cpp src/problem_01_sol/molecule_sol.cpp print("Your answer:") !./main.exe print("\nSolution") !./main_sol.exe{"549541":{"success":true,"data":{"type":"episode","name":"Robotpencil Presents: Speed Designing: Iteration is King","steam_appid":549541,"required_age":0,"is_free":false,"controller_support":"full","detailed_description":"","about_the_game":"","short_description":"Designing something you don't know
\r\nIteration is king","fullgame":{"appid":"549530","name":"Robotpencil Presents: Speed Designing"},"supported_languages":"English*<\/strong>
*<\/strong>languages with full audio support","header_image":"https:\/\/steamcdn-a.akamaihd.net\/steam\/apps\/549541\/header.jpg?t=1479148176","website":null,"pc_requirements":[],"mac_requirements":[],"linux_requirements":[],"publishers":[""],"package_groups":[],"platforms":{"windows":true,"mac":false,"linux":false},"categories":[{"id":28,"description":"Full controller support"},{"id":33,"description":"Native Steam Controller Support"}],"release_date":{"coming_soon":false,"date":"Nov 14, 2016"},"support_info":{"url":"","email":""},"background":""}}}{ "knxUltimateSceneController": { "helplink" : "  
Aiuto per configurazione", "title": "Controller scena", "properties": { "node-input-server": "Gateway", "node-input-topic": "Richiama Scena", "node-input-dpt": "Datapoint", "node-input-topicTrigger": "Trigger", "node-input-topicSave": "Salva Scena", "node-input-name": "Nome nodo", "node-input-outputtopic": "Topic" }, "placeholder": { "leaveempty": "Lascia vuoto per usare l'indirizzo di gruppo", "valueexample": "Es: false, true, altrimenti qualsiasi valore" }, "other": { "sceneConfig": "Configurazione Scena", "add": "Permi Aggiungi, per aggiungere un dispositivo alla scena" }, "advanced": { "notify-DPT3007": "Hai selezionato un DIM relativo. Per favore clicca qui per vedere un esempio e capire come gestire il relativo payload.", "notify-DPT18001": "Hai selezionato un datapoint scena. Per favore clicca qui per vedere un esempio per capire come gestire il relatico payload." ,"notify-DPT232600": "Hai selezionato un datapoint RGB. Per favore clicca qui per vedere un esempio per capire come gestire il relatico payload." ,"notify-DPT251600":"Hai selezionato un datapoint RGBW. Per favore clicca qui per vedere un esempio per capire come gestire il relatico payload." } } }elifesciences-publications/scihub-browser-data version https://git-lfs.github.com/spec/v1 oid sha256:9fb09eb405bfc27aa597ab5392cc04c257b8a77f2bccbbdf58943788c6596f6e size 433 htcondor/htcondor.org0 {"id": 5316, "title": "Ticket #5316: clean up debugging output on V8_5-CERN-Cred-branch", "description": "
\nTo get the code completed quickly, I didn't spend much time thinking about optimal debug messages, levels, etc. Before merging V8_5-CERN-Cred-branch back to master, that all needs to be tidied up.
", "remarks": "
\n
", "derived_tickets": "", "attachments": " ", "check_ins": "\n\n\n\n\n\n\n
2016-Mar-09 16:12\n\u00a0 \nCheck-in [47687]: Tidy up some debug messages. #5316 (By zmiller )
2015-Dec-08 16:00\n\u00a0 \nCheck-in [46558]: clean up debug messages and move to a less-noisy debug level. #5316 (By zmiller )
", "type": "enhance", "last_change": "2016-Apr-26 20:58", "status": "resolved", "created": "2015-Oct-15 13:46", "fixed_version": "2015-Oct-15 13:46", "broken_version": "", "priority": "1", "subsystem": "Security", "assigned_to": "zmiller", "derived_from": "#5272", "creator": "zmiller", "rust": "", "customer_group": "cern", "visibility": "public", "notify": ", ", "due_date": "20151210"}1-10 {"FileAPI.js":","FileAPI.min.js":","ng-file-upload-all.js":","ng-file-upload-all.min.js":"sha512-RCn8A999Vvm2ZeZM25bykqz0x4zR1WftkiSeia6F/1AhFBD7pZK7dAsbjG/iHsv510L38f14IyPLGJbNo7LsPw==","ng-file-upload-shim.js":"sha512-Zdgt5oN0SP3IVwgD5jS0XtVJxIJLij9vRpPNrq4fWyHlLhFsrowoF8btvfYF4794Sk0VDyoqHISMVh0wx65dUg==","ng-file-upload-shim.min.js":"sha512-DIxSdKD+9cI4fHy9KvusLK+bQZBc7gO+4S4+t8fheD5w14is0KW1xsmKE33ag3ExSHUcp0nLcMzRfGoO1vdz9Q==","ng-file-upload.js":"sha512-C3UlpvGXya3opy55eZ2snITRI3muqIHUzw5/D8iUX/LfLyHOO+x01NpGKAvaYfMyCH341VCo4Zu10Qcv/ZkG5Q==","ng-file-upload.min.js":"}jasonmb626/react-native-media-player { "name": "example", "version": "0.0.1", "private": true, "scripts": { "start": "node node_modules/react-native/local-cli/cli.js start" }, "dependencies": { "react": "15.3.2", "react-native": "0.35.0", "react-native-extra-dimensions-android": "github:belinchung/react-native-extra-dimensions-android", "react-native-fs": "^1.2.0", "react-native-media-player": "*", "react-native-sk-toast": "^1.0.1", "underscore": "^1.8.3" }, "devDependencies": { "babel-eslint": "^4.1.8", "eslint": "^1.10.3", "eslint-plugin-react": "^3.16.1" } } keys/STR009_en.json {"code": "STR009", "lang": "en", "description": "\nTurnover tax on supplies and services\n\nExplanatory notes for the following statistics:\n73311 Turnover tax statistics (advance returns)\n73321 Turnover tax statistics (assessments)\n\nDefinition:\nApplying the tax rates to the assessment basis for supplies\nand services provides the turnover tax on supplies and\nservices. There is a special regulation for turnover\nachieved by agricultural or forestry holdings in accordance\nwith Section 24 (1) of the Turnover Tax Law.\n\nTurnover is measured as follows:\n- for supplies and other services, it is generally measured\nby the remuneration (Section 10 (1) of the Turnover Tax\nLaw),\n- for own consumption as defined in Section 3 (1b) of the\nTurnover Tax Law, it is measured by the purchase price\nplus incidental expenses or, in the absence of a purchase\nprice, by the cost price (Section 10 (4) of the Turnover\nTax Law),\n- for travel services as defined in Section 25 (1) of the\nTurnover Tax Law, it is measured by the difference between\nthe amount spent by the customer and the amount spent by\nthe entrepreneur on intermediate consumption (Section 25\n(3) of the Turnover Tax Law - margin scheme),\n- for turnover regarding movable tangible property, it is\nmeasured - under certain conditions - by the amount by\nwhich the selling price exceeds the purchase price\n(Section 25a (3) of the Turnover Tax Law - margin scheme).\n\nThe turnover tax which, in accordance with Section 10 (4),\nsecond sentence of the Turnover Tax Law, is not part of the\nassessment basis should always be calculated on the basis of\nthe remuneration agreed upon (accrual accounting) (Section\n16 (1) of the Turnover Tax Law). Tax calculation by\nremuneration received (cash accounting) is limited to\nenterprises with a total turnover of not more than 500,000\neuros in the previous year (from 1 September 2009), to those\nnot obliged to keep accounts and to those belonging to the\nliberal professions (Section 20 of the Turnover Tax Law).\n\nSince 1 July 2007, the turnover tax has amounted to 19\npercent of the assessment basis for any taxable turnover\n(Section 12 (1) of the Turnover Tax Law); for a number of\nturnover types it is reduced to 7 percent (Section 12 (2) of\nthe Turnover Tax Law), among other things, for the supply,\nimportation, intra-Community acquisition and letting of the\nitems listed in the Annex to the Turnover Tax Law (e.g.\nagricultural and forestry products, food, printed books,\nnewspapers, pictures and other products of the printing\nindustry, manuscripts, typescripts and plans, specific\nappliances for sick people, art objects); the reduced tax\nrate also applies to specific services in the cultural area\nand to short-distance passenger transport in accordance with\nSection 12 (2), number 10 of the Turnover Tax Law.\n\n\n\u00a9 Statistisches Bundesamt, Wiesbaden 2015", "name": "Turnover tax on supplies and services", "type": "variable"}templates/nanopublications.json {% set qr = this.graph.query(''' select distinct ?np ?about ?quoted ?contributor ?created ?modified (coalesce(?modified, ?created) as ?updated) ?derived_from ?generation_type ?content ?reply_of ?work where { { graph ?np { ?np sio:isAbout ?e.}} UNION {graph ?assertion { ?e ?p ?o.}} ?np a np:Nanopublication; np:hasProvenance ?provenance; np:hasPublicationInfo ?pubinfo; np:hasAssertion ?assertion. graph ?pubinfo { ?np frbr:realizationOf ?work. } optional { ?np sio:isAbout ?about } optional { ?assertion prov:wasQuotedFrom ?quoted } optional { ?assertion dc:contributor ?contributor } optional { graph ?pubinfo { ?assertion dc:created ?created. } } optional { graph ?pubinfo { ?assertion dc:modified ?modified. } } optional { ?assertion prov:wasDerivedFrom ?derived_from. } optional { ?assertion prov:wasGeneratedBy ?generating_event. ?generating_event a ?generation_type. } optional { ?assertion sioc:content ?content. } optional { ?assertion sioc:reply_of ?reply_of. } }''', initNs=dict(np=ns.np, sio=ns.sio, prov=ns.prov, dc=ns.dc, frbr=ns.frbr, sioc=ns.sioc), initBindings={"e":this.identifier}) %} [ {% for row in qr %} {{row.asdict()|tojson}}{% if not loop.last %},{% endif %} {% endfor %} ] fredatgithub/SqlQueryStress { "profiles": { "SqlQueryStressCLI": { "commandName": "Project", "commandLineArgs": "-s sample.json -t 8 -i script.sql" } } }{ "commentStamp" : "GastonDallOglio 9/12/2011 20:38", "super" : "JQWidgetExample", "category" : "JQWidgetBox-MapQuery-Dev-Examples", "classinstvars" : [ ], "pools" : [ ], "classvars" : [ ], "instvars" : [ "idMap", "idLayerManager", "idOverviewMap", "idFeatureInfo", "idLayerControl", "idZoomButtons", "idZoomSlider", "idPopup", "idMousePosition" ], "name" : "JQMapQueryExample", "type" : "normal" }Deivis/FleaMarket { "extends": "airbnb", "parserOptions": { "ecmaVersion": 6, "sourceType": "module", "ecmaFeatures": { "jsx": true } }, "rules": { "import/no-extraneous-dependencies": "off", "react/jsx-filename-extension": "off", "no-debugger": "warn", "import/prefer-default-export": "warn" }, "env": { "node": true, "browser": true, "es6": true, "jest": true }, "plugins": [ "react", "jsx-a11y", "import" ] } {"nom":"Yvernaumont","circ":"1ère circonscription","dpt":"Ardennes","inscrits":91,"abs":42,"votants":49,"blancs":6,"nuls":0,"exp":43,"res":[{"nuance":"LR","nom":"","voix":34},{"nuance":"REM","nom":"","voix":9}]}kerlos/goodwork { "private": true, "scripts": { "dev": "node node_modules/cross-env/dist/bin/cross-env.js NODE_ENV=development node_modules/webpack/bin/webpack.js --progress --hide-modules --config=node_modules/laravel-mix/setup/webpack.config.js", "build": "node node_modules/cross-env/dist/bin/cross-env.js NODE_ENV=development node_modules/webpack/bin/webpack.js --progress --hide-modules --config=node_modules/laravel-mix/setup/webpack.config.js", "watch": "node node_modules/cross-env/dist/bin/cross-env.js NODE_ENV=development node_modules/webpack/bin/webpack.js --watch --progress --hide-modules --config=node_modules/laravel-mix/setup/webpack.config.js", "observe": "node node_modules/cross-env/dist/bin/cross-env.js NODE_ENV=development node_modules/webpack/bin/webpack.js --watch --progress --hide-modules --config=node_modules/laravel-mix/setup/webpack.config.js", "hot": "node node_modules/cross-env/dist/bin/cross-env.js NODE_ENV=development node_modules/webpack-dev-server/bin/webpack-dev-server.js --inline --hot --config=node_modules/laravel-mix/setup/webpack.config.js", "production": "node node_modules/cross-env/dist/bin/cross-env.js NODE_ENV=production node_modules/webpack/bin/webpack.js --progress --hide-modules --config=node_modules/laravel-mix/setup/webpack.config.js", "watch-prod": "node node_modules/cross-env/dist/bin/cross-env.js NODE_ENV=production node_modules/webpack/bin/webpack.js --watch --progress --hide-modules --config=node_modules/laravel-mix/setup/webpack.config.js", "test": "jest", "watch:test": "jest --watch --bail" }, "dependencies": { "laravel-echo-server": "^1.3.6" }, "devDependencies": { "@fortawesome/fontawesome-svg-core": "^1.2.6", "@fortawesome/free-solid-svg-icons": "^5.4.1", "@fortawesome/vue-fontawesome": "^0.1.1", "axios": "^0.15.2", "babel-jest": "^22.4.3", "cross-env": "^3.2.4", "glob-all": "^3.1.0", "jest": "^22.4.3", "jquery": "^3.3.1", "jsdom": "^11.8.0", "jsdom-global": "^3.0.2", "laravel-echo": "^1.3.5", "laravel-mix": "^2.0.0", "luxon": "^1.0.0", "purgecss-webpack-plugin": "^1.3.0", "quill": "^1.3.6", "tailwindcss": "^0.6.5", "vue": "^2.5.13", "vue-jest": "^2.5.0", "vue-test-utils": "^1.0.0-beta.11", "vuejs-datepicker": "^0.9.25", "webpack": "^3.10.0" }, "jest": { "moduleFileExtensions": [ "js", "json", "vue" ], "moduleNameMapper": { "^vue$": "vue/dist/vue.common.js" }, "moduleDirectories": [ "node_modules", "/resources/assets/js/" ], "setupTestFrameworkScriptFile": "/tests/JS/setup.js", "transform": { ".*\\.(vue)$": "/node_modules/vue-jest", "^.+\\.js$": "/node_modules/babel-jest" }, "roots": [ "/resources/assets/js/", "/tests/JS/" ] }, "standard": { "globals": [ "Vue", "axios", "luxon", "Laravel", "Echo", "describe", "test", "expect", "jest", "it" ] } } [{"team_id": 68, "team": "Boston", "id": "383", "name": "", "year": "Sophomore", "hometown": "Milwaukee, WI", "high_school": "Divine Savior Holy Angels", "previous_school": null, "height": "6'2\"", "position": "F/C", "jersey": "1", "url": "/sports/womens-basketball/roster/ashley-carr/383", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "384", "name": "", "year": "Sophomore", "hometown": "Lafayette Hill, PA", "high_school": "Mt. St. Joseph Academy", "previous_school": null, "height": "5'8\"", "position": "G", "jersey": "3", "url": "/sports/womens-basketball/roster/courtney-jones/384", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "385", "name": "", "year": "Junior", "hometown": "Hornell, NY", "high_school": "Hornell", "previous_school": null, "height": "5'7\"", "position": "G", "jersey": "10", "url": "/sports/womens-basketball/roster/alison-argentieri/385", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "386", "name": "", "year": "Sophomore", "hometown": "Madrid, Spain", "high_school": "Westhill (NY", "previous_school": null, "height": "6'5\"", "position": "C", "jersey": "11", "url": "/sports/womens-basketball/roster/amparo-lopez/386", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "387", "name": "", "year": "Sophomore", "hometown": "Springfield, MA", "high_school": "Longmeadow", "previous_school": null, "height": "6'2\"", "position": "F", "jersey": "12", "url": "/sports/womens-basketball/roster/marisa-moseley/387", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "388", "name": "", "year": "Sophomore", "hometown": "Port Murray, NJ", "high_school": "Warren Hills", "previous_school": null, "height": "5'11\"", "position": "G", "jersey": "15", "url": "/sports/womens-basketball/roster/katie-terhune/388", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "389", "name": "", "year": "Senior", "hometown": "Blue Bell, PA", "high_school": "Germantown Academy", "previous_school": null, "height": "6'2\"", "position": "F/C", "jersey": "20", "url": "/sports/womens-basketball/roster/anne-nelson/389", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "390", "name": "", "year": "Senior", "hometown": "Boston, MA", "high_school": "Boston Latin", "previous_school": null, "height": "5'7\"", "position": "G", "jersey": "22", "url": "/sports/womens-basketball/roster/annie-tomasini/390", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "391", "name": "", "year": "Sophomore", "hometown": "Oyster Bay, NY", "high_school": "St. Dominic", "previous_school": null, "height": "5'8\"", "position": "G", "jersey": "23", "url": "/sports/womens-basketball/roster/lashaunda-mitchell/391", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "392", "name": "", "year": "Senior", "hometown": "Malone, NY", "high_school": "Franklin Academy", "previous_school": null, "height": "6'0\"", "position": "G/F", "jersey": "30", "url": "/sports/womens-basketball/roster/dia-dufault/392", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "393", "name": "", "year": "Freshman", "hometown": "Delafield, WI", "high_school": "Kettle Moraine", "previous_school": null, "height": "6'1\"", "position": "F", "jersey": "32", "url": "/sports/womens-basketball/roster/adrienne-norris/393", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "394", "name": "", "year": "Senior", "hometown": "Madrid, Spain", "high_school": "Patracino de San Jose", "previous_school": null, "height": "5'9\"", "position": "G", "jersey": "33", "url": "/sports/womens-basketball/roster/pilar-verde/394", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "395", "name": "", "year": "Freshman", "hometown": "Eau Claire, WI", "high_school": "Eau Claire North", "previous_school": null, "height": "6'0\"", "position": "F", "jersey": "42", "url": "/sports/womens-basketball/roster/larissa-parr/395", "season": "2001-02"}, {"team_id": 68, "team": "Boston", "id": "396", "name": "", "year": "Junior", "hometown": "Glendale, AZ", "high_school": "Cactus", "previous_school": null, "height": "6'2\"", "position": "F", "jersey": "54", "url": "/sports/womens-basketball/roster/rachel-werner/396", "season": "2001-02"}]{ "@metadata": { "authors": [ "Max20091" ] }, "index.wantHelp": "Bạn cần trợ giúp?", "index.wantHelpLong": "Có vẻ như đây là lần đầu tiên bạn đã sử dụng NFC Ring ControlTalk app. Bạn muốn một số trợ giúp?", "index.whatDoYouWantToDoToday": "Bạn muốn làm gì ngày hôm nay?", "index.create": "Tạo", "index.createDescription": "Viết một hành động mới để chiếc vòng của bạn", "index.createDescriptionMore": "Sử dụng trang này để viết nội dung mới hoặc hành động của bạn vào NFC Ring. Bạn sẽ cần phải biết những gì bạn muốn viết chẳng hạn như URL và bạn sẽ cần đến NFC Ring của bạn.", "index.read": "Đọc", "index.readDescription": "Xem những gì trên Ring của bạn", "index.readDescriptionMore": "Bạn muốn kiểm tra những gì trong Ring của bạn? Sử dụng tùy chọn này.", "index.settings": "Cài đặt", "index.settingsDescription": "Cấu hình ứng dụng của bạn", "index.settingsDescriptionMore": "Cấu hình ứng dụng của bạn.", "index.feedback": "Phản hồi", "index.feedbackDescription": "Thích hoặc muốn thay đổi cái gì đó? Hãy cho chúng tôi biết", "index.feedbackDescriptionMore": "Chúng tôi nghe nếu bạn thích hay ghét gì đó. Xin vui lòng cho chúng tôi biết!", "context.recentActions": "Xem hành động vừa mới làm", "context.history": "Lịch sử", "action.whatDoYouWantYourRingToDo": "Bạn muốn Ring của bạn làm gì?", "option.next": "Tiếp theo", "option.dataWontFit": "Chú ý: Số dữ liệu có thể không đủ để chứa trong NFC Ring của bạn", "writeRing.finish": "Hoàn thành", "writeRing.noNFC": "Chức năng NFC không hoạt động. NFC có được kích hoạt trên thiết bị của bạn?", "writeRing.woohoo": "Woohoo!", "writeRing.ready": "NFC Ring của bạn đã sẵn sàng", "writeRing.fail": "Không thể ghi vào NFC Ring của bạn", "readRing.contents": "nội dung trong Ring:", "settings.previousActions": "Hành động trước đó của tôi", "settings.previousActionsDescription": "Xóa lịch sử trước đó của tôi", "settings.version": "Phiên bản", "settings.phoneModel": "Model điện thoại", "settings.changeLanguage": "Thay đổi ngôn ngữ của App", "help.helpTips": "Trợ giúp", "help.volume": "bấm nút tăng âm lượng", "help.on": "Đảm bảo rằng màn hình điện thoại và điện thoại đang bật (thiết bị thường đi ngủ)", "help.slowly": "Di chuyển Ring của bạn chậm xung quanh phía sau của điện thoại", "help.another": "Hãy thử với một thiết bị khác có NFC nếu bạn có.", "help.gotIt": "OK, tôi đã nhận nó", "actions.twitter.name": "Twitter", "actions.twitter.description": "Liên kết với một người dùng Twitter", "actions.twitter.optionText": "Tên Twitter user name là gì?", "actions.facebook.name": "Facebook", "actions.facebook.description": "Liên kết đến một trang Facebook", "actions.facebook.optionText": "URL trang Facebook của bạn là gì?", "actions.link.name": "Liên kết", "actions.link.description": "Liên kết với một địa chỉ web", "actions.link.optionText": "Tên URL của trang web là gì?", "actions.youtube.name": "YouTube", "actions.youtube.description": "Liên kết đến một video hoặc kênh", "actions.youtube.optionText": "YouTube Video hoặc kênh YouTube của bạn là gì?", "actions.text.name": "Văn bản", "actions.text.description": "Viết văn bản tùy ý chẳng hạn như một khóa Bitcoin công khai.", "actions.text.optionText": "Văn bản của bạn là gì?", "actions.etherpad.name": "Etherpad", "actions.etherpad.description": "Liên kết đến một Etherpad", "actions.etherpad.optionText": "URL pad của bạn là gì?", "sweetSpot.pressTheScreen": "Nhấn màn hình vào vị trí Ring của bạn đã đăng ký.", "sweetSpot.holdRingToPhoneByDot": "Giữ NFC Ring của Bạn vào mặt sau điện thoại của bạn ở vị trí được chỉ định bởi các dấu chấm màu.", "sweetSpot.looksGood": "Có vẻ tốt! Có phải dấu chấm đỏ gần nơi Ring hoạt động trên điện thoại của bạn?", "sweetSpot.showHelp": "Hiện trợ giúp", "sweetSpot.done": "Đã xong. Cảm ơn bạn!", "sweetSpot.areYouSureActions": "Bạn có chắc bạn muốn xóa của bạn hành động trước đó không?" } { "Opel": ["Agila", "Astra", "Corsa", "Vectra"], "Škoda": ["Fabia", "Octavia", "Superb", "Yeti"], "Toyota": ["Auris", "Avensis", "Corolla", "Prius"], "Mercedes": ["A-Class", "B-Class", "C-Class", "E-Class"] }Leejunghoon1165/CardAndDungeon0 {"m_ScrollY":0.0,"m_ExpandedPrefabGameObjectFileIDs":[957587765832077697],"m_LastClickedFileID":957587765832077697}Zissan/product-workspace { "name": "product-workspace", "version": "1.0.0", "repository": "https://github.com/Zissan/product-workspace.git", "author": "", "license": "MIT", "private": true, "workspaces": [ "packages/*" ] } {"source":{"type":"local","path":"/var/folders/y1/vf772b01257__8hmsbf6vtd80000gn/T/com.blackberry.utils/package"}}{ "name_en_US": "Average Blur", "name_pt_BR": "Média Blur", "description_en_US": "Define a blur using the image average", "description_pt_BR": "Define um blur para imagem utilizando a média", "author": "VISNode team", "helpUrl": "https://raw.githubusercontent.com/Jouwee/VISNode/master/src/main/resources/visnode/pdi/process/AverageBlurProcess.md", "codeUrl": "https://raw.githubusercontent.com/GrupoTorax/PDI/master/src/main/java/org/paim/pdi/AverageBlurProcess.java", "scriptUrl": "https://raw.githubusercontent.com/Jouwee/VISNode/master/src/main/resources/visnode/pdi/process/AverageBlurProcess.js", "projectUrl": "https://raw.githubusercontent.com/Jouwee/VISNode/master/src/main/resources/visnode/pdi/process/AverageBlurProcess.vnp" } { "value" : "Topic", "concept" : "http:\/\/webconcepts.info\/concepts\/http-header\/", "id" : "http:\/\/webconcepts.info\/concepts\/http-header\/Topic", "details" : [ { "description" : "A push message topic is a string carried in a Topic header field. A topic is used to correlate push messages sent to the same subscription and does not convey any other semantics.", "documentation" : "http:\/\/tools.ietf.org\/html\/rfc8030#section-5.4", "specification" : "http:\/\/webconcepts.info\/specs\/IETF\/RFC\/8030", "spec-name" : "RFC 8030" } ] }{"leaflet.freedraw-src.js":"sha256-bnbc1T3BjV48fP2tJe4CFlLNQ4yvJ7HXheHeMUBKnXg=","leaflet.freedraw.js":"}10-100 {"title": "None Of My Business-Cher Lloyd", "author": "", "musicType": "流行", "musicNo": "0010169"}0 { "title":"What is node core versus userland", "date": "Fri Aug 26 2011 03:08:50 GMT-0700 (PST)", "tags": ["npm", "core", "userland", "terminology"], "author": "", "difficulty": 1 } 0 {"URL": "https://www.wired.com/1999/11/beauty-rest", "heading": "beauty rest", "subheading": "scientists studying sleep habits conclude that if you're one of those early-to-bed, early-to-rise types, you're probably a grouch. or at least grouchier and more stressed out than your friends who sleep in. it's hormonal, apparently. the results, published in new scientist, found that people who have to get up early have higher levels of cortisol, the body's main stress hormone. so the next time one of them flips you the bird be a little understanding, fer cryin' out loud.", "author": "wired staff", "category": "culture", "type": "article", "timestamp": "11.04.1999 03:00 AM", "text": "scientists studying sleep habits conclude that if you're one of those early-to-bed, early-to-rise types, you're probably a grouch. or at least grouchier and more stressed out than your friends who sleep in. it's hormonal, apparently. the results, published in new scientist, found that people who have to get up early have higher levels of cortisol, the body's main stress hormone. so the next time one of them flips you the bird be a little understanding, fer cryin' out loud."}[ { "op": "test", "path": "/key", "value": "le_burn_out_agile" }, { "op": "replace", "path": "/description", "value": "En 2019, le monde de l’informatique n’a que l’agilité à la bouche. Ca groome à tous les coins de rue, ça s’excite en daily, ça colle des post-it et ça empile les serious game en rétro. Bien sûr, ça gère la transformation numérique (désolé, même au second degré, je ne peux pas employer “digitale”) et au final plus personne ne comprend rien. \n\nDes chefs de projet subitement propulsés ScrumMaster qui ne savent pas ce qu’ils doivent faire. Des PO qui découvrent la priorisation. Des développeurs qui doivent composer avec des specs floues et changeantes sans vision globale du projet. Pour compléter le tableau, la subite apparition de coachs agile à peine sortis de l’école qui n’ont jamais vu un projet de leur vie et qui viennent expliquer des concepts qu’ils ne maîtrisent qu’à moitié.\n\nEn 2019 le monde de l’informatique rêve secrètement de cycles en V, de stabilité, de spécifications générales et de dossier d’architecture.\n\nEn 2019 le monde de l’informatique est au bord du burn-out agile.\n\n## Plan du talk \n\n- Petit historique : l’évolution des pratiques sur les 20 dernières années \n- Pourquoi ce bordel agile ? \n- Finalement l’agilité c’est quoi ? \n- Est-ce qu’il faut revenir au cycle en V ?" } ] { "id": 8130, "citation_title": "Cross-Country Technology Diffusion: The Case of Computers", "citation_author": [ "", "" ], "citation_publication_date": "2001-02-01", "issue_date": "2001-02-01", "revision_date": "None", "topics": [ "Macroeconomics", "Macroeconomic Models", "Development and Growth", "Innovation and R&D" ], "program": [ "International Trade and Investment", "Productivity, Innovation, and Entrepreneurship" ], "projects": null, "working_groups": null, "abstract": "\n\nWe use data on imports of computer equipment for a large sample of countries between 1 970 and 1990 to investigate the determinants of computer-technology adoption. We find strong evidence that computer adoption is associated with higher levels of human capital and with manufacturing trade openness vis-a-vis the OECD. We also find evidence that computer adoption is enhanced by high investment rates, good property rights protection, and a small share of agriculture in GDP. Finally, there is some evidence that adoption is reduced by a large share of government in GDP, and increased by a large share of manufacturing. After controlling for the above-mentioned variables, we do not find an independent role for the English- (or European-) language skills of the population.\n\n", "acknowledgement": "\n" }{ "citations" : [ { "textCitation" : "[See cncnpi on Metamath](http://us.metamath.org/mpegif/cncnpi.html)" } ], "names" : [ "cncnpi" ], "language" : "METAMATH_SET_MM", "lookupTerms" : [ "#T_cX", "#T_wceq", "#T_cuni", "#T_cJ", "#T_cF", "#T_wcel", "#T_cJ", "#T_ccn", "#T_cK", "#T_wa", "#T_cA", "#T_wcel", "#T_cX", "#T_wi", "#T_cF", "#T_wcel", "#T_cJ", "#T_ccnp", "#T_cK", "#T_cfv", "#T_cA" ], "metaLanguage" : "METAMATH", "remarks" : " A continuous function is continuous at all points. One direction of Theorem 7.2(g) of [Munkres] p. 107. (Contributed by , 20-Nov-2006.) (Proof shortened by , 21-Aug-2015.) ", "statement" : "cnsscnp.1 $e |- X = U. J $.\ncncnpi $p |- ( ( F e. ( J Cn K ) /\\ A e. X ) -> F e. ( ( J CnP K ) ` A ) ) $." }{ "title": "Nasutoceratops: 'Big-nose, horn-face' dinosaur described", "text": "An unusual new species of dinosaur, unearthed from the deserts of Utah, has been described by scientists.\n\nThe 5m-long (15ft) beast is a member of the triceratops family, but with a huge nose and exceptionally long horns, palaeontologists say it is unlike anything they have seen before.\n\nIt has been named accordingly as Nasutoceratops titusi, which means big-nose, horn-face.\n\nThe research is published in the Proceedings of the Royal Society B.\n\nDr , from the University of Utah and Natural History Museum of Utah, told BBC News: \"This dinosaur just completely blew us away.\n\n\"We would never have predicted it would look like this - it is just so outside of the norm for this group of dinosaurs.\"\n\nFearsome vegetarian?\n\nThe creature was first discovered in 2006 the Grand Staircase-Escalante Monument area of Utah.\n\nHowever, it has taken several years to prepare and then study the fossil in detail.\n\nThe rocks it was found in date to about 75-million-years old, so the beast would have roamed the Earth during the Late Cretaceous period.\n\n\"The horns are by far the absolute largest of any member of its group of dinosaurs - they curve sideways and forwards,\" explained Dr Loewen.\n\n\"In addition it has the biggest nose of its group too.\"\n\nHe added that it also had a scalloped frill at the back of its head.\n\nNasutoceratops was also hefty, weighing about 2.5 tonnes, and with its unusual looks it would have cut a fearsome figure.\n\nHowever this species, like all members of the triceratops family is a herbivore. It would have been more concerned with feasting on plants in its tropical, swampy surrounds than terrorising other dinosaurs.\n\n'Treasure trove'\n\nNasutoceratops is one of a number of species that have been discovered in this area of North America.\n\nThe desert where it was found would have once formed part of a continent called Laramidia, which has been described as a treasure trove for fossils.\n\nOther plant-eating species, including two other kinds of horned dinosaurs and duck-billed hadrosaurs, were found close to Nasutoceratops titusi, suggesting that the creatures were able to co-exist.\n\nDr Loewen said: \"All of these animals are upwards of three tonnes... You have an environment where you have all of these large herbivores competing for food.\n\n\"We aren't really sure how you can support all of these animals, but you do find them all in the rock at the same time.\"\n\nHe added that other unusual new species were also emerging from the site.", "image": "http://news.bbcimg.co.uk/media/images/68772000/jpg/_68772158_dino.jpg" } { "name": "metasyntactical/inmemory-logger", "description": "PSR-3 compatible InMemory Logger with ability to fetch logged entries from memory.", "minimum-stability": "stable", "license": "MIT", "authors": [ { "name": "", "email": "" } ], "require": { "php": "~7.2|~7.3|~7.4", "psr/log": "^1.0", "beberlei/assert": "^2.4|^3.0", "regex-guard/regex-guard": "^1.1" }, "require-dev": { "phpspec/phpspec": "^2.4", "henrikbjorn/phpspec-code-coverage": "^2.0" }, "provide": { "psr/log-implementation": "1.0.0" }, "config": { "bin-dir": "bin" }, "autoload": { "psr-4": { "MetaSyntactical\\Log\\InMemoryLogger\\": "src/" } } } Assignment3/Code_Problem1_Task2_and_3/newsData/halifax_1625417510864.json [{"publishedAt":"2021-06-04T12:30:07Z","author":null,"urlToImage":"https://toronto.citynews.ca/wp-content/blogs.dir/sites/10/2021/06/680-NEWS-thumbnail-1-1024x576.png","description":"In December 2020, Rogers Sports & Media announced a similar rebrand of 1310 NEWS in Ottawa, which became CityNews Ottawa & The Valley.","source":{"name":"Citynews.ca","id":null},"title":"Rogers rebrands 680 NEWS, other Canadian radio stations as CityNews - CityNews Toronto","url":"https://toronto.citynews.ca/2021/06/04/rogers-media-rebrands-680-news-canadian-radio-stations-as-citynews/","content":"Rogers Sports & Media announced Friday that its news radio stations \u2013 including Toronto\u2019s 680 NEWS \u2013 will be rebranded under CityNews to \u201cbecome a powerhouse local news offering in each market.\u201d\r\u2026 [+2236 chars]"},{"publishedAt":"2021-06-23T16:46:36Z","author":" Banking correspondent","urlToImage":"https://i.guim.co.uk/img/media/29005cba95288f6083229dea5b070786a28ba4b1/0_137_4000_2399/master/4000.jpg?width=1200&height=630&quality=85&auto=format&fit=crop&overlay-align=bottom%2Cleft&overlay-width=100p&overlay-base64=L2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc&enable=upscale&s=81f1f8eaa1ee467853b38a93682eda43","description":"Unions criticise lender as decision to shut 29 Lloyds and 15 Halifax branches takes closures to 100 this yearLloyds Banking Group is shutting a further 44 branches, sparking criticism from trade unions which say the lender is denying vulnerable consumers and \u2026","source":{"name":"The Guardian","id":null},"title":"Lloyds Banking Group to close 44 more branches","url":"https://amp.theguardian.com/business/2021/jun/23/lloyds-banking-group-to-close-44-more-branches","content":"Lloyds Banking Group is shutting a further 44 Lloyds and Halifax branches, sparking criticism from trade unions which say the lender is denying vulnerable consumers and small businesses of essential \u2026 [+3141 chars]"},{"publishedAt":"2021-06-07T01:53:00Z","author":"","urlToImage":"https://s1.reutersmedia.net/resources_v2/images/rcom-default.png?w=800","description":"Copper prices rose on Monday, as traders lapped up the latest U.S. monthly jobs report that signalled signs of recovery and calmed concerns of an early policy tightening.","source":{"name":"Reuters","id":"reuters"},"title":"METALS-Copper climbs as U.S. jobs data calms fears of policy tightening - Reuters","url":"https://www.reuters.com/article/global-metals-idUSL2N2NP02F","content":"By Reuters Staff\r\nHANOI, June 7 (Reuters) - Copper prices rose on Monday, as traders lapped up the latest U.S. monthly jobs report that signalled signs of recovery and calmed concerns of an early pol\u2026 [+1550 chars]"},{"publishedAt":"2021-06-07T12:08:00Z","author":"Reuters Staff","urlToImage":"https://static.reuters.com/resources/r/?m=02&d=20210607&t=2&i=1564773412&r=LYNXNPEH560PP&w=800","description":"British consumer sentiment rose last month to its highest level since April 2016, bolstered by expectations of greater job security and rising house prices, polling company YouGov said on Monday.","source":{"name":"Reuters","id":"reuters"},"title":"UK consumer sentiment rises to 5-year high as lockdown eases - YouGov - Reuters","url":"https://www.reuters.com/article/uk-britain-economy-consumersentiment-idUSKCN2DJ1BW","content":"By Reuters Staff\r\nFILE PHOTO: Shoppers carrying Primark bags walk through the city centre amid the outbreak of the coronavirus disease (COVID-19) in Chester, Britain, December 8, 2020. Picture taken \u2026 [+1607 chars]"},{"publishedAt":"2021-06-23T11:31:02Z","author":" - Personal finance correspondent, BBC News","urlToImage":"https://s.yimg.com/uu/api/res/1.2/UxiVFuiQw0lflyliKckkwA--~B/aD01NDk7dz05NzY7YXBwaWQ9eXRhY2h5b24-/https://media.zenfs.com/en/bbc_us_articles_995/e769bb41b5bea7afbae6455d988201bf","description":"The latest announcement means 100 Lloyds and Halifax branches will close this year.","source":{"name":"Yahoo Entertainment","id":null},"title":"Lloyds to close another 44 bank branches","url":"https://news.yahoo.com/lloyds-close-another-44-bank-110055200.html","content":"Lloyds Banking Group is to close another 44 branches, blaming a lack of customers at the sites as people move to digital banking.\r\nThe move comes in addition to previous announcements and means a tot\u2026 [+1941 chars]"}][ { "cmd": [ "python", "-u", "\nimport json, os, sys\nif os.path.exists(sys.argv[1]) and os.path.isdir(sys.argv[1]):\n with open(sys.argv[2], 'w') as f:\n json.dump(os.listdir(sys.argv[1]), f)\n", "[SLAVE_BUILD]/src/out/Release", "/path/to/tmp/json" ], "cwd": "[SLAVE_BUILD]", "name": "listdir build_dir", "~followup_annotations": [ "@@@STEP_LOG_LINE@json.output@[@@@", "@@@STEP_LOG_LINE@json.output@ \"chrome\"@@@", "@@@STEP_LOG_LINE@json.output@]@@@", "@@@STEP_LOG_END@json.output@@@", "@@@STEP_LOG_LINE@python.inline@@@@", "@@@STEP_LOG_LINE@python.inline@import json, os, sys@@@", "@@@STEP_LOG_LINE@python.inline@if os.path.exists(sys.argv[1]) and os.path.isdir(sys.argv[1]):@@@", "@@@STEP_LOG_LINE@python.inline@ with open(sys.argv[2], 'w') as f:@@@", "@@@STEP_LOG_LINE@python.inline@ json.dump(os.listdir(sys.argv[1]), f)@@@", "@@@STEP_LOG_END@python.inline@@@" ] }, { "cmd": [ "python", "-u", "RECIPE_MODULE[build::archive]/resources/zip_archive.py", "[TMP_BASE]/chrome_staging_tmp_1", "chrome-asan-linux-release-refs_heads_B1-123456", "[\"chrome\"]", "[SLAVE_BUILD]/src/out/Release" ], "cwd": "[SLAVE_BUILD]", "name": "zipping" }, { "cmd": [ "python", "-u", "[DEPOT_TOOLS]/gsutil.py", "--", "-h", "x-goog-meta-Cr-Commit-Position:refs/heads/B1@{#123456}", "-h", "x-goog-meta-Cr-Commit-Position-Number:123456", "-h", "x-goog-meta-Cr-Git-Commit:5e3250aadda2b170692f8e762d43b7e8deadbeef", "cp", "[TMP_BASE]/chrome_staging_tmp_1/chrome-asan-linux-release-refs_heads_B1-123456.zip", "gs://chromium/linux-release/chrome-asan-linux-release-refs_heads_B1-123456.zip" ], "cwd": "[SLAVE_BUILD]", "name": "gsutil upload", "~followup_annotations": [ "@@@STEP_LINK@gsutil.upload@https://storage.cloud.google.com/chromium/linux-release/chrome-asan-linux-release-refs_heads_B1-123456.zip@@@" ] }, { "cmd": [ "python", "-u", "\nimport os\nimport sys\nos.remove(sys.argv[1])\n", "[TMP_BASE]/chrome_staging_tmp_1/chrome-asan-linux-release-refs_heads_B1-123456.zip" ], "cwd": "[SLAVE_BUILD]", "name": "chrome-asan-linux-release-refs_heads_B1-123456.zip", "~followup_annotations": [ "@@@STEP_@@@@", "@@@STEP_LOG_LINE@python.@import os@@@", "@@@STEP_LOG_LINE@python.inline@import sys@@@", "@@@STEP_LOG_LINE@python.inline@os.remove(sys.argv[1])@@@", "@@@STEP_LOG_END@python.inline@@@" ] }, { "name": "$result", "recipe_result": null, "status_code": 0 } ]ksajan/FINTECH-OPEN-MONTH-HACKATHON {"success":true,"code":"SUCCESS","data":{"states":null,"districts":[{"name":"north twenty four parganas","registeredUsers":1130983},{"name":"kolkata","registeredUsers":868126},{"name":"south twenty four parganas","registeredUsers":487180},{"name":"hooghly","registeredUsers":480952},{"name":"murshidabad","registeredUsers":478812},{"name":"howrah","registeredUsers":410583},{"name":"","registeredUsers":389583},{"name":"nadia","registeredUsers":385179},{"name":"","registeredUsers":361979},{"name":"","registeredUsers":349928}],"pincodes":[{"name":"711101","registeredUsers":51367},{"name":"700006","registeredUsers":45036},{"name":"700156","registeredUsers":41906},{"name":"700135","registeredUsers":40010},{"name":"700091","registeredUsers":38808},{"name":"721101","registeredUsers":36261},{"name":"700039","registeredUsers":33907},{"name":"700015","registeredUsers":33113},{"name":"700019","registeredUsers":32710},{"name":"700001","registeredUsers":32530}]},"responseTimestamp":1630501496743}{ "name": "browserstack-examples-multiplatform-wdio", "version": "1.0.0", "description": "Multi-platform scenario examples using WDIO", "main": "index.js", "scripts": { "clean-reports": "rm -rf mochawesome-report", "generateMochawesome": "marge mochawesome-report/wdio-ma-merged.json --reportTitle 'WebdriverIO Boilerplate Results'", "bstack-parallel": "npm run clean-reports; npx wdio resources/conf/wdio-bstack-parallel.conf.js; npm run generateMochawesome", "bstack-local-parallel": "npm run clean-reports; npx wdio resources/conf/wdio-bstack-local-parallel.conf.js; npm run generateMochawesome", "bstack-parallel-app": "npm run clean-reports; npx wdio resources/conf/wdio-bstack-parallel-app.conf.js; npm run generateMochawesome", "bstack-multiremote": "npm run clean-reports; npx wdio resources/conf/wdio-bstack-multiremote.conf.js; npm run generateMochawesome", "bstack-local-multiremote": "npm run clean-reports; npx wdio resources/conf/wdio-bstack-local-multiremote.conf.js; npm run generateMochawesome", "bstack-wdio-standalone": "npx mocha --parallel --jobs 5 src/multiplatform_scenarios/standalone/*.js", "bstack-single": "npm run clean-reports; npx wdio resources/conf/wdio-bstack-single.conf.js; npm run generateMochawesome", "bstack-local-single": "npm run clean-reports; npx wdio resources/conf/wdio-bstack-local-single.conf.js; npm run generateMochawesome", "postinstall": "npx patch-package" }, "keywords": [ "Multi-platform", "multiremote", "standalone-wdio" ], "author": "", "license": "MIT", "devDependencies": { "@wdio/cli": "^7.7.8", "@wdio/local-runner": "^7.7.8", "@wdio/mocha-framework": "^7.7.7", "@wdio/spec-reporter": "^7.7.7", "browserstack-local": "1.4.8", "chai": "4.3.4", "lodash": "4.17.21", "mocha-parallel-tests": "^2.3.0", "mochawesome": "^6.2.2", "mochawesome-report-generator": "3.1.5", "wdio-mochawesome-reporter": "^4.0.0", "webdriverio": "7.7.8" }, "dependencies": { "patch-package": "^6.4.7" }, "mocha": { "timeout": 120000, "reporter": "mochawesome" } } charles-halifax/recipes { "directions": [ "Combine squash, onion, carrots, and garlic in a high-powered blender; pulse until vegetables are minced.", "Transfer vegetables to a skillet and add coconut oil, Chinese five-spice powder, salt, and soy sauce. Saute vegetable mixture over high heat until tender, 10 to 15 minutes. Spoon tofu-based sour cream into vegetable mixture and mash until filling is smooth. Remove skillet from heat and cool filling.", "Spoon about 1 teaspoon filling onto 1 half of each wonton wrapper. Wet the border of the wrapper and fold in half over the filling, sealing the edges together. Take 2 opposite corners, wet with water, and press together to seal.", "Heat oil in a deep-fryer or large saucepan to 350 degrees F (175 degrees C).", "Working in batches, fry wontons until golden brown, 5 to 10 minutes. Transfer cooked wontons to a paper towel-lined plate; cool slightly." ], "ingredients": [ "1/2 butternut squash - peeled, seeded, and cut into 1-inch pieces", "1 onion, coarsely chopped", "1/2 cup chopped carrots", "1 head garlic, cloves separated and peeled", "2 tablespoons coconut oil", "1 pinch Chinese five-spice powder", "salt to taste", "2 dashes soy sauce", "8 ounces tofu-based sour cream", "2 (12 ounce) packages wonton wrappers", "2 cups sesame oil, or as needed" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Butternut Squash Wontons (Vegan Version of Crab Rangoon)", "url": "http://allrecipes.com/recipe/246434/butternut-squash-wontons-vegan-versi/" } src/main/resources/queries/Diagnose.json { "q": "SELECT x/data[at0001]/items[at0002]/value/value as diagnosis, x/data[at0001]/items[at0077]/value/value as time FROM EHR e CONTAINS COMPOSITION c CONTAINS EVALUATION x[openEHR-EHR-EVALUATION.problem_diagnosis.v1] where e/ehr_id/value = $ehr_id order by x/data[at0001]/items[at0077]/value/value desc", "offset": 0, "fetch": 20, "query_parameters": { "ehr_id": "__set_by_testehr__" } }{"text":"The Board shall implement an electronic free-access system by which a covered voter may determine by telephone, electronic mail, or Internet whether:","historical":"Legislative History of Laws\n\nFor history of Law 19-137, see notes under § 1-1061.01.\n\nUniform Law:\n\nThis section is based on § 14 of the Uniform Military and Overseas Voters Act. See Vol. 13, Part II, Uniform Laws Annotated, Master Edition or ULA Database on Westlaw.\n\nDC CODE § 1-1061.14\n\nCurrent through December 11, 2012","credits":"(June 5, 2012, D.C. Law 19-137, § 114, 59 DCR 2542.)","sections":[{"prefix":"1","text":" The voter's federal postcard application or other registration or military-overseas ballot application has been received and accepted; and"},{"prefix":"2","text":" The voter's military-overseas ballot has been received and the current status of the ballot."}],"division":{"identifier":"I","text":"Government of District."},"title":{"identifier":"1","text":"Government Organization. (Refs & Annos)"},"chapter":{"identifier":"10","text":"Elections. (Refs & Annos)"},"subchapter":{"identifier":"VII","text":"Accommodations for Military and Overseas Voters."},"heading":{"title":"1","chaptersection":"1061","identifier":"1-1061.14","catch_text":"Confirmation of receipt of application and voted ballot."}}0 { "subjectName": "test user user", "subjectType": "LEGAL", "subjectCode": "10101010", "subjectStatus": null }{ "name": "rust-worker-sample", "version": "1.0.0", "main": "index.js", "repository": "ssh://git@github.com/konojunya/rust-worker-sample.git", "author": "konojunya <>", "license": "MIT", "scripts": { "build": "webpack --mode production", "start": "webpack-dev-server --debug --mode development" }, "devDependencies": { "@wasm-tool/wasm-pack-plugin": "^0.4.2", "clean-webpack-plugin": "^3.0.0", "html-webpack-plugin": "^3.2.0", "webpack": "^4.33.0", "webpack-cli": "^3.3.2", "webpack-dev-server": "^3.7.0", "worker-plugin": "^3.1.0" }, "dependencies": { "comlinkjs": "^3.2.0" } } iwasingh/Wikoogle ["Nobel Prize in Physics","Nobel Prize","Ig Nobel Prize","","Nobel Prize in Physiology or Medicine","","","","","","","","","","","","","Cavendish Laboratory","","","","","","","","","","","",""]{"html_attributions": [], "results": [{"business_status": "OPERATIONAL", "geometry": {"location": {"lat": 27.120882, "lng": 80.46427659999999}, "viewport": {"northeast": {"lat": 27.1224163302915, "lng": 80.4653655802915}, "southwest": {"lat": 27.1197183697085, "lng": 80.4626676197085}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/school-71.png", "icon_background_color": "#7B9EB0", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/school_pinlet", "name": "", "place_id": "ChIJk24KrBW9njkRvMYBAfH5V3w", "plus_code": {"compound_code": "4FC7+9P Usarha, Uttar Pradesh, India", "global_code": "7MV24FC7+9P"}, "reference": "ChIJk24KrBW9njkRvMYBAfH5V3w", "scope": "GOOGLE", "types": ["school", "point_of_interest", "establishment"], "vicinity": "Unnamed Road, Usarha"}], "status": "OK"}{ "name" : "刚剑「胴田贯」", "icon" : "flammpfeil.slashblade:slashbladenamed{ModelName: \"named/muramasa/muramasa\",TextureName: \"named/muramasa/doutanuki\"}", "category" : "category_3", "pages" : [ { "type" : "text", "text" : "有名的刚剑,能切裂耕地。$(br)切裂耕地指的是在田埂上叠放试斩用具,斩下之时能直接插入田埂之中。$(br2)现在只是被一些僵尸浪费的锈刀罢了。但我仍旧修复了一振送给了犬走家的义弟。" }, { "type" : "entity", "entity" : "minecraft:zombie{HandItems:[{id:\"flammpfeil.slashblade:slashbladenamed\",Count:1b,Damage:0s,tag:{ModelName: \"named/muramasa/muramasa\",TextureName: \"named/muramasa/sabigatana\"}},{}]}", "text" : "一只携带着锈刀的僵尸。$(br)事实上,很多持有锈刀的僵尸要么鞘遗失了,要么刀断了,甚至没了鞘也丢失的刀也更加常见。" }, { "type" : "crafting", "recipe" : "flammpfeil.slashblade:recipexx", "recipe2" : "flammpfeil.slashblade:recipex" }, { "type" : "crafting", "recipe" : "flammpfeil.slashblade:doutanuki", "text" : "修复好的胴田贯,才拥有自身真正的力量。" } ], "advancement":"lastsmith:slashblade_bamboolight", "sortnum" : 3 } logs/20191024155406-whitley_bridge.json {"html_attributions": [], "result": {"rating": 3.3, "reviews": [{"author_name": "", "author_url": "https://www.google.com/maps/contrib/108859358437786261253/reviews", "language": "en", "profile_photo_url": "https://lh5.ggpht.com/-flFARLOA5vA/AAAAAAAAAAI/AAAAAAAAAAA/bZjHmbAdd2Y/s128-c0x00000000-cc-rp-mo-ba4/photo.jpg", "rating": 1, "relative_time_description": "9 months ago", "text": "I work there so", "time": 1548417981}, {"author_name": "", "author_url": "https://www.google.com/maps/contrib/109178005379839058662/reviews", "language": "en", "profile_photo_url": "https://lh5.ggpht.com/-UKPZbGxmLTA/AAAAAAAAAAI/AAAAAAAAAAA/8chCdP9gOfg/s128-c0x00000000-cc-rp-mo-ba5/photo.jpg", "rating": 5, "relative_time_description": "a year ago", "text": "Good location but only 3 trains a day", "time": 1514573348}, {"author_name": "", "author_url": "https://www.google.com/maps/contrib/112556272770212164716/reviews", "profile_photo_url": "https://lh4.ggpht.com/-NAxuUnyB3ko/AAAAAAAAAAI/AAAAAAAAAAA/axz9OruIQlM/s128-c0x00000000-cc-rp-mo-ba5/photo.jpg", "rating": 4, "relative_time_description": "a year ago", "text": "", "time": 1514777897}]}, "status": "OK"}index/b/bierocks-german-meat-turnovers.json { "directions": [ "Saute beef, onion and garlic, salt and lemon pepper in a large skillet over medium high heat, until beef is browned. Add cabbage, Worcestershire sauce and caraway seeds. Cook until cabbage is limp; drain liquid from mixture.", "Preheat oven to 350 degrees F (175 degrees C).", "On a lightly floured board, roll each loaf of dough into a 12 inch circle. Cut each circle into 6 wedges. Spoon cabbage/beef filling onto center of each dough piece, dividing equally. Pull three points of each wedge up to the center and pinch to seal. Place bierocks on a lightly greased cookie sheet. If desired, brush dough with melted butter or egg wash (1 egg white with 2 tablespoons water).", "Bake in preheated oven for 30 minutes, or until golden brown. Serve hot, or wrap and freeze for heating later." ], "ingredients": [ "2 (1 pound) loaves frozen bread dough, thawed", "1 pound ground beef", "1 onion, chopped", "1 clove garlic, crushed", "1 1/2 teaspoons salt", "1 1/2 teaspoons lemon pepper", "1 small head cabbage, chopped", "2 tablespoons Worcestershire sauce", "2 teaspoons caraway seeds", "1/2 cup melted butter" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Bierocks (German Meat Turnovers)", "url": "http://allrecipes.com/recipe/24675/bierocks-german-meat-turnovers/" } bianapis/sd-branch-location-management-v3 { "staffandLocationPlanningStaffandLocationPlanningServiceType" : "staffandLocationPlanningStaffandLocationPlanningServiceType", "staffandLocationPlanning" : "staffandLocationPlanning", "staffandLocationPlanningBusinessUnitEmployeeReference" : "SALPBUR722119", "staffandLocationPlanningStaffandLocationPlanningServiceDescription" : "staffandLocationPlanningStaffandLocationPlanningServiceDescription", "staffandLocationPlanningWorkSchedule" : "staffandLocationPlanningWorkSchedule", "requestResponseRecord" : {}, "productandServicePlanningRequestActionTaskRecord" : {}, "staffandLocationPlanningPreconditions" : "staffandLocationPlanningPreconditions", "staffandLocationPlanningStaffandLocationPlanningServiceInputsandOuputs" : "staffandLocationPlanningStaffandLocationPlanningServiceInputsandOuputs", "productandServicePlanningRequestActionTaskReference" : "PASPRATR701194", "productandServicePlanningRequestRecordReference" : "PASPRRR716386", "staffandLocationPlanningPostconditions" : "staffandLocationPlanningPostconditions", "staffandLocationPlanningStaffandLocationPlanningServiceWorkProduct" : "staffandLocationPlanningStaffandLocationPlanningServiceWorkProduct" }100-1000 { "id": 111953, "info": { "name": "myzuka.org black + transparent & no ads", "description": "смотрите скрин)", "additionalInfo": null, "format": "uso", "category": "myzuka", "createdAt": "2015-03-28T13:15:33.000Z", "updatedAt": "2015-03-28T18:37:42.000Z", "license": "NO-REDISTRIBUTION", "author": { "id": 116495, "name": "Sanya4ever" } }, "stats": { "installs": { "total": 326, "weekly": 0 } }, "screenshots": { "main": { "name": "111953_after.jpeg", "archived": true } }, "discussions": { "stats": { "discussionsCount": 0, "commentsCount": 0 }, "data": [] }, "style": { "css": "@namespace url(http://www.w3.org/1999/xhtml);\r\n\r\n@-moz-document domain(\"myzuka.org\") {\r\n \r\n body, html {background: rgba(0,0,0,.05)url(/*[[URL]]*/)!important; \r\n background-color: #111 !important;\r\n\tbackground-attachment: fixed !important;\r\n\tbackground-clip: border-box !important;\r\n\tbackground-origin: padding-box !important;\r\n\tbackground-position: center center !important;\r\n\tbackground-repeat: no-repeat !important;\r\n\tbackground-size: cover !important;\r\n font-family:Constantia!important}\r\n \r\n\t.header .logo {width: 210px;margin: -2px 31px 0px 0px;background: url(https://leto36g.storage.yandex.net/rdisk/c634c80bb82eb4a089c5924232f16eb80157b18c84fcd31179d5f36319142e39/inf/oyzXLtOAcCiajnfVfEX0gAATFwEIwwsNT_TkNTCEFOCAweSOCNgTs9GDc8FNatiFAptP1twlRuQOl-9XD44H2Q==?uid=0&filename=myzuka.png&disposition=inline&hash=&limit=0&content_type=image%2Fpng&tknv=v2&rtoken=b7f5693d8540cd54e5c6bdb8d4c73d03&force_default=no) no-repeat scroll 1px 0px transparent;}\r\n\t \r\n .all, .main, .pagin-letters li, .tbl tr:nth-child(2n+1), .main-details .side .iwrap, .player-inline, .main-tabs li a, .pager li a {background: transparent !important;border: 0px !important;}\r\n\t\r\n\t.side-block, .profile-bar, .profile-bar .foot {background: rgba(0, 0, 0, 0.6)!important; box-shadow: 0px 0px 9px rgba(0, 0, 0, .9 ) !important}\r\n \r\n .footer, .main .content .inner, .player-inline:nth-child(2n+1), input.form-text, input[type=\"text\"], input[type=\"password\"], .main-tabs li.active a, .login-block, .login-block .form-input,.table-striped > tbody > tr:nth-child(2n+1) > td, .table-striped > tbody > tr:nth-child(2n+1) > th, .sorters li a.active {background: rgba(0, 0, 0, 0.5)!important; box-shadow: 0px 0px 5px rgba(0, 0, 0, .9 ) inset!important;border: 0px !important}\r\n\t\r\n\t.main-nav {background: rgba(0, 0, 0, 0.35)!important; box-shadow: 0px 0px 5px rgba(200, 0, 0, .9 ) inset!important;}\r\n\t.main-nav li a {color: #33ccff !important; margin-left: 50px !important; background: transparent!important; }\r\n\t.main-nav li a:hover, .pagin-letters a:hover {color: #ff0000 !important;background: transparent!important;}\r\n\t.header .search-box .form-button, input.form-text, input[type=\"text\"], input[type=\"password\"]{color: #33ccff !important;background: rgba(0, 0, 0, 0.6)!important;}\r\n\t.header .search-box .form-button:hover, input.form-text:hover, input[type=\"text\"]:hover, input[type=\"password\"]:hover{color: #3399ff !important;background: rgba(0, 0, 0, 0.6)!important;box-shadow: 0px 0px 5px rgba(0, 191, 255, .9 ) inset!important;}\r\n\t.player-controls .timeline, .player-controls .volume {background: rgba(200, 0, 0, 0.6)!important;}\r\n\t\t\r\n\ta{color: #33ccff !important;}\r\n\ta:hover{color: #ff0000 !important;}\r\n\tbody{color: #0099ff !important;}\r\n .header .logo .text{color: #20B2AA !important;}\r\n\t\r\n\t.pagin-letters {margin-left: 180px !important}\r\n\t\r\n\t.button-a, .button-b, .sorters li a{background: rgba(0, 0, 0, 0.6)!important; box-shadow: 0px 0px 5px rgba(0, 191, 255, .5 ) inset!important;}\r\n\t.button-a:hover, .button-b:hover, .sorters li a:hover{background: rgba(0, 0, 0, 0.6)!important; box-shadow: 0px 0px 5px rgba(200, 0, 0, .5 ) inset!important;opacity:1 !important}\r\n\t\r\n #ad_ph_1>embed, .aside-col>noindex>center>a, #MarketGidComposite577725, .side-col>noindex>center>a, #\\37 8c593cbd2fb7d1c7a9e51ef9e0808a9, #ad_ph_1 {display: none !important}}", "settings": [ { "key": "URL", "label": "Выберите фон", "type": "image", "options": [ { "key": "default", "label": "Как на скрине", "value": "http://ipadwallpapershop.com/wp-content/uploads/2010/08/Grey-lines.jpg", "default": true } ] } ] } }1-10 { "id": 21082, "title": [ "[Jungle Clearing]" ], "description": [ "Large stones mark the border of the clearing and stand out brightly against the rich hues of the encroaching jungle. Occupying one corner of the clearing is a series of tangled vines that have grown together in mimicry of a tree. Coarse, round kiwifruits dangle from their spindled blossoms amid ruffled yellow flowers and bright white blossoms with yellow hearts." ], "paths": [ "Obvious paths: southeast, south" ], "location": "Mist Harbor", "wayto": { "21081": "southeast", "21083": "south" }, "timeto": { "21081": 0.2, "21083": 0.2 }, "image": "ifw-wilds-1368242816.png", "image_coords": [ 1438, 2323, 1456, 2343 ] }Nozomi188/FrackinUniverse-sChinese-Project [ { "DeniedAlternatives": [], "Files": { "objects/vanity/lights/fulavalamps/specials/fuaetherlavalamp.object": [ "/shortdescription" ] }, "Texts": { "Eng": "" } }, { "DeniedAlternatives": [], "Files": { "objects/vanity/lights/fulavalamps/specials/fuaetherlavalamp.object": [ "/description" ] }, "Texts": { "Eng": "It's entrancing. I can't look away." } } ]Xtuden-com/airwaves { "id": "d180-135", "text": "-25-\nDRUZHININ:\nDIRECTOR:\nDRUZHININ:\nDIRECTOR:\nDRUZHININ:\nMUSIC:_ __\nDOCTOR:\nDIRECTOR:\nDOCTOR:\nINKELES:\nI don't want any trouble. Just want to live quietly...\npeacefully. But it pains me to see the machine—\nWhat's the matter?\nLook, look at that boy out there in the shop.\nKolya, you mean?\nYes..look, look how he drives his machine. Says he's\ngoing to break a record on my machine. They'll come\naround this afternoon, take his picture, put it in\nthe paper, call him a hero. But what of the machine?\nNobody takes a picture of the machine.\nJ3 ARC A STIC HORN^COlRaEJT. ._. .FUNNEL ING OUT (CUT g)\n(LOW, EDGY VOICE) Breathe deeply, Comrade Director.\n(BIG BREATH ON MIKE. .MQI-jENTARILY HOLDS IT .. .EXHALES)\nDoctor Konevskaya, what do you do with that\nstethoscope? Tune in Radio Moscow? (HE LAUGHS PLEASED\nWITH SELF)\n(FLATLY) Breathe deeply, Comrade Director.\n(DIRECTOR-DOCTOR REPEAT BREATHING CYCLE TWICE JUST\nUNDER FOLLOWING:)\nThis is again. A doctor holds a very\ncritical position in the Soviet Union. A certificate\nof illness from a doctor is just about the only\naccepted excuse for a Soviet worker if he's late or\nabsent from work. Otherwise the worker gets fined or\nimprisoned. The fine is stiff. Usually a quarter of\nhis paycheck for six months. And it literally means\na semi-starvation diet for the man and his family.\nThe Soviet trade union offers the worker no protection\nagainst such fines. The only hope is a certificate of\nillness from a doctor." }{ "type":"createServerGroup", "account": "aws_account", "stack": "myStack", "application": "myAwsApp", "amiName": "ami-12345", "availabilityZones": { "us-west-1": [ "us-west-1a", "us-west-1c" ] }, "capacity": { "desired": 1, "max": 1, "min": 1 }, "cloudProvider": "aws", "credentials": "aws-account1", "healthCheckType":"EC2", "iamRole":"BaseInstanceProfile", "legacyUdf": false, "instanceType": "m5.medium", "setLaunchTemplate": true, "spotAllocationStrategy":"capacity-optimized", "spotInstancePools": 3 } 1-10 { "address": "1oranGeS2xsKZ4jVsu9SVttzgkYXu4k9v", "cert_auth_type": "web", "cert_sign": " "cert_user_id": "", "files": { "data.json": { "sha512": "99d7ff5e0ecccf971b65b2d3ee9ae70ecf99a6569d33591fdf219549e384cc64", "size": 257 } }, "inner_path": "data/users/18hc78tJaoCobtwtG7VWmkqA2Ki2fAec4F/content.json", "modified": 1479643659.954045, "signs": { "18hc78tJaoCobtwtG7VWmkqA2Ki2fAec4F": " } }data/PL58Wk5g77lF94tg-F3y5zRyDeLVhTDnTg/spyKZ-p3UgE/metadata.json { "description": "Automation has come a long way in assisting with regression testing efforts. Teams worldwide are successfully running hundreds of functional regression tests at every check-in. While this provides a great source of confidence, critical regression bugs are still missed using this approach. That’s because these tests can only assert on what their human programmer asks them to. Additional errors with functionality, UX, and usability often go uncaught using today’s most common test automation techniques.\n\nFor this reason, the top companies in all sectors of the industry are turning to visual validation. Visual validation is a relatively new concept that can be used to enhance existing automated tests and provide an easy way to perform those difficult checks for things like UX, localization, usability, responsive design, and cross-device testing.\n\nIn this talk, you’ll learn how visual validation works, see a live integration into an existing test code base, and discuss the pros and cons of using various visual validation techniques.", "duration": 1311, "id": "spyKZ-p3UgE", "publishedAt": 1602017101, "ranking": { "likes": 58, "views": 1086 }, "tags": [ "Jamstack", "static site", "virtual conference", "jamstackconf", "headless cos", "netlify", "web development", "conference talk", "webdev", "angie jones", "testing automation", "developer experience" ], "thumbnails": { "large": "https://i.ytimg.com/vi/spyKZ-p3UgE/sddefault.jpg", "medium": "https://i.ytimg.com/vi/spyKZ-p3UgE/hqdefault.jpg", "small": "https://i.ytimg.com/vi/spyKZ-p3UgE/mqdefault.jpg", "tiny": "https://i.ytimg.com/vi/spyKZ-p3UgE/default.jpg" }, "title": "Your Tests Lack Vision: Adding Eyes to your Automation Framework" } version https://git-lfs.github.com/spec/v1 oid sha256:6a98c38db60742460096c32b8cdaa29565995a4ecc9a20dff0d1805c2c86a8bb size 8722 { "name": "azure-pipelines-cucumber", "version": "1.0.7", "description": "Embed Protractor HTML result into release and build tabs", "main": "index.js", "scripts": { "build": "tsc -p .", "package": "tfx extension create --root . --env dev --manifest-globs vss-extension.json", "gallery-publish": "tfx extension publish --token $PAT", "clean": "rimraf ./dist && rimraf ./*.vsix", "bump-version": "node utils/bump-version.js", "build-publish": "npm run clean && npm run bump-version && npm run build && npm run package && npm run gallery-publish" }, "repository": { "type": "git", "url": "git+https://github.com/maciejmaciejewski/azure-pipelines-cucumber.git" }, "private": true, "keywords": [ "Azure DevOps", "Cucumber" ], "author": " <>", "contributors": [ "selamanse <>" ], "license": "ISC", "bugs": { "url": "https://github.com/maciejmaciejewski/azure-pipelines-cucumber/issues" }, "homepage": "maciejmaciejewski/azure-pipelines-cucumber", "dependencies": { "vss-web-extension-sdk": "^5.141.0" }, "devDependencies": { "@types/node": "^13.7.4", "semver": "^7.3.5", "tfx-cli": "^0.9.3", "typescript": "^3.9.7" } } 0 { "manifest_version": 2, "name": "SigTrello", "description": "Modifies the checklist item convert-to-card action to replace the checklist item with a link instead of removing it outright.", "version": "0.5", "content_scripts": [ { "matches": [ "*://trello.com/*" ], "css": [ "collapsing.css" ], "js": [ "jquery-2.1.0.min.js", "trello-client.js", "sigtrello-dom-card-window.js", "sigtrello-collapsing.js", "sigtrello-checklist-item-to-card.js", "sigtrello-service-links.js", "sigtrello-sum-checklist-times.js", "all.js" ] } ], "web_accessible_resources": [ "collapsing.css", "images/link.png", "sigtrello-dom-card-window.ts", "sigtrello-dom-card-window.js.map", "sigtrello-collapsing.ts", "sigtrello-collapsing.js.map", "sigtrello-checklist-item-to-card.ts", "sigtrello-checklist-item-to-card.js.map", "sigtrello-service-links.ts", "sigtrello-service-links.js.map", "sigtrello-sum-checklist-times.ts", "sigtrello-sum-checklist-times.js.map", "all.js", "all.ts", "all.js.map" ] } {"nom":"Becquigny","circ":"4ème circonscription","dpt":"Somme","inscrits":101,"abs":63,"votants":38,"blancs":4,"nuls":0,"exp":34,"res":[{"nuance":"FN","nom":"","voix":20},{"nuance":"REM","nom":"","voix":14}]}coUrbanize/courb-tooltip { "editor.formatOnSave": true, "eslint.autoFixOnSave": true, "sasslint.enable": true, "sasslint.configFile": ".sass-lint.yml", "beautify.ignore": "**/*", "prettier.singleQuote": true, "sasslint.packageManager": "yarn", "sasslint.run": "onSave", "[handlebars]": { "editor.formatOnSave": false }, "[scss]": { "editor.formatOnSave": false }, "[markdown]": { "editor.formatOnSave": false }, "files.exclude": { "**/.git": true, "tmp/**": true, "**/.DS_Store": true }, "search.exclude": { "**/node_modules": true, "**/bower_components": true, "**/.git": true, "tmp/**": true }, "typescript.tsdk": "node_modules/typescript/lib", "git.rebaseWhenSync": true, "git.allowForcePush": true, "html.format.indentHandlebars": true } { "parent": "minecraft:block/cube_all", "textures": { "all": "cursedcraft:block/grey_goo" } }{ "editor.tabSize": 2, "python.pythonPath": "/usr/local/bin/python3", "python.formatting.provider": "autopep8", "python.linting.enabled": true }app/src/main/assets/Datapack/data/7524.json { "Artist": "", "Common name": "Japanese apricot", "Date created": "1902-07-30", "Geographic origin": "Santa Cruz, Santa Cruz County, California, United States", "Notes on original": "Chinese plum", "Physical description": "1 art original : col. ; 17 x 25 cm.", "Scientific name": "Prunus mume", "Specimen": "25315", "Variety": "Sparks Mammoth", "Year": "1902", "Name": ": Sparks Mammoth" }0 { "title": "DevLife", "url": "https://levholm.se/", "language": "en", "description": "I'm writing about code and life.", "feed": { "subtitle": "I'm writing about code and life.", "filename": "feed.xml", "path": "/feed/feed.xml", "id": "https://levholm.se/" }, "jsonfeed": { "path": "/feed/feed.json", "url": "https://levholm.se/feed/feed.json" }, "author": { "name": "", "email": "", "url": "https://levholm.se/about-me/" } } [{"navFile":"src/main/res/navigation/navigation.xml","javaFiles":["com.chand.learning.newsapp.fragments.NewsListFragmentDirections","com.chand.learning.newsapp.fragments.NewsDetailsFragmentDirections","com.chand.learning.newsapp.fragments.NewsDetailsFragmentArgs"]}]{"ast":null,"code":"import { createContext } from 'react';\nexport default /*#__PURE__*/createContext(null);","map":null,"metadata":{},"sourceType":"module"}JefferyLukas/SRIs {"angular-formly-templates-bootstrap.js":","angular-formly-templates-bootstrap.min.js":"}1-10 { "description": "cmd/gc: silence valgrind error\n\nvalgrind complained that under some circumstances, \n\n *nr = *nc\n\nwas being called when nr and nc were the same *Node. The suggestion my R\u00e9my was to introduce a tmp node to avoid the potential for aliasing in subnode.", "cc": [ "", "", "", "" ], "reviewers": [], "messages": [ { "sender": "", "recipients": [ "", "", "", "", "", "" ], "text": "Hello , , (cc: ),\n\nI'd like you to review this change to\nhttps://code.google.com/p/go", "disapproval": false, "date": "2013-03-13 10:58:52.402700", "approval": false }, { "sender": "", "recipients": [ "", "", "", "", "", "" ], "text": "The original valgrind report.\n\n==2499== Memcheck, a memory error detector\n==2499== Copyright (C) 2002-2011, and GNU GPL'd, by et al.\n==2499== Using Valgrind-3.7.0 and LibVEX; rerun with -h for copyright info\n==2499== Command: /home/dfc/go/pkg/tool/linux_386/8g -o /tmp/go-build072428647/fmt/_obj/_go_.8 -p fmt -complete -D _/home/dfc/go/src/pkg/fmt -I /tmp/go-build072428647 /home/dfc/go/src/pkg/fmt/doc.go /home/dfc/go/src/pkg/fmt/format.go /home/dfc/go/src/pkg/fmt/print.go /home/dfc/go/src/pkg/fmt/scan.go\n==2499== Parent PID: 2302\n==2499==\n==2499== Source and destination overlap in memcpy(0xbefc76d8, 0xbefc76d8, 224)\n==2499== at 0x402D9A9: memcpy (in /usr/lib/valgrind/vgpreload_memcheck-x86-linux.so)\n==2499== by 0x8049071: subnode (cplx.c:364)\n==2499== by 0x8048F11: complexmove (cplx.c:70)\n==2499== by 0x8055ED7: gmove (gsubr.c:1288)\n==2499== by 0x804ADCE: cgen (cgen.c:125)\n==2499== by 0x806AB3D: cgen_as (gen.c:745)\n==2499== by 0x80503A9: cgen_callret (ggen.c:316)\n==2499== by 0x804B5DB: cgen (cgen.c:366)\n==2499== by 0x806AB3D: cgen_as (gen.c:745)\n==2499== by 0x806A72F: gen (gen.c:468)\n==2499== by 0x806A0C9: genlist (gen.c:257)\n==2499== by 0x80501D8: cgen_call (ggen.c:259)", "disapproval": false, "date": "2013-03-13 10:59:42.990240", "approval": false }, { "sender": "", "recipients": [ "" ], "text": "LGTM\r\n\r\nThank you. I have yet to see a valgrind report that was a false positive.\r\nThis might be the closest one I've seen - who knew that the compiler was\r\nputting a memcpy there! - but it's still worth cleaning up to make the rest\r\nof the valgrind output useful.\r\n\r\nRuss\r\n", "disapproval": false, "date": "2013-03-13 16:48:29.744390", "approval": true }, { "sender": "", "recipients": [ "" ], "text": "Excellent, thank you. I'm working through all the permutations of {5,6,8}{a,c,g,l}. \r\n\r\n\r\n\r\nOn 14/03/2013, at 3:48 AM, <> wrote:\r\n\r\n> LGTM\r\n> \r\n> Thank you. I have yet to see a valgrind report that was a false positive. This might be the closest one I've seen - who knew that the compiler was putting a memcpy there! - but it's still worth cleaning up to make the rest of the valgrind output useful.\r\n> \r\n> Russ\r\n", "disapproval": false, "date": "2013-03-13 19:42:14.851160", "approval": false }, { "sender": "", "recipients": [ "", "", "", "", "", "" ], "text": "*** Submitted as https://code.google.com/p/go/source/detail?r=28dbe614d61c ***\n\ncmd/gc: silence valgrind error\n\nvalgrind complained that under some circumstances, \n\n *nr = *nc\n\nwas being called when nr and nc were the same *Node. The suggestion my R\u00e9my was to introduce a tmp node to avoid the potential for aliasing in subnode.\n\nR=remyoudompheng, minux.ma, rsc\nCC=golang-dev\nhttps://codereview.appspot.com/7780044", "disapproval": false, "date": "2013-03-13 20:11:26.608750", "approval": false } ], "owner_email": ".net", "private": false, "base_url": "", "owner": "dfc", "subject": "code review 7780044: cmd/gc: silence valgrind error", "created": "2013-03-13 04:45:48.767070", "patchsets": [ 1, 2001, 5001, 8001, 14001 ], "modified": "2013-03-13 20:11:28.310320", "closed": true, "issue": 7780044 }{"books":{"14":{"id":"6096","title":"Short Nonfiction Collection Vol. 025","description":"

A collection of short nonfiction works in the public domain. The selections included in this collection were independently chosen by the readers, and the topics encompass history, science, humor, travel, philosophy, nature, slavery, the U. S. Civil War, and politics. (summary by )<\/p>","url_text_source":"","language":"English","copyright_year":"0","num_sections":"15","url_rss":"http:\/\/librivox.org\/rss\/6096","url_zip_file":"http:\/\/www.archive.org\/download\/nonfiction025_librivox\/nonfiction025_librivox_64kb_mp3.zip","url_project":"","url_librivox":"http:\/\/librivox.org\/short-nonfiction-collection-vol-025-by-various\/","url_other":null,"totaltime":"4:28:00","totaltimesecs":16080,"authors":[{"id":"18","first_name":"","last_name":"Various","dob":"","dod":""}],"url_iarchive":"http:\/\/www.archive.org\/details\/nonfiction025_librivox","sections":[{"id":"82003","section_number":"1","title":"The American Invasion","listen_url":"","language":"English","playtime":"673","file_name":null,"readers":[{"reader_id":"6965","display_name":"NoelBadrian"}]},{"id":"82004","section_number":"2","title":"Declaration of the National Anti-Slavery Convention","listen_url":"","language":"English","playtime":"786","file_name":null,"readers":[{"reader_id":"5319","display_name":"Guero"}]},{"id":"82005","section_number":"3","title":"The End of Books","listen_url":"","language":"English","playtime":"1926","file_name":null,"readers":[{"reader_id":"92","display_name":""}]},{"id":"82006","section_number":"4","title":"Foreward to \"The Old Coast Road from Boston to Plymouth\"","listen_url":"","language":"English","playtime":"1381","file_name":null,"readers":[{"reader_id":"4813","display_name":""}]},{"id":"82007","section_number":"5","title":"Letter from a Freedman to His Old Master","listen_url":"","language":"English","playtime":"336","file_name":null,"readers":[{"reader_id":"5694","display_name":""}]},{"id":"82008","section_number":"6","title":"Letter to ","listen_url":"","language":"English","playtime":"120","file_name":null,"readers":[{"reader_id":"6080","display_name":"progressingamerica"}]},{"id":"82009","section_number":"7","title":"New Nationalism, The","listen_url":"","language":"English","playtime":"2297","file_name":null,"readers":[{"reader_id":"6080","display_name":"progressingamerica"}]},{"id":"82010","section_number":"8","title":"Nikola Tesla Sees a Wireless Vision","listen_url":"","language":"English","playtime":"477","file_name":null,"readers":[{"reader_id":"5319","display_name":"Guero"}]},{"id":"82011","section_number":"9","title":"On Nature - Aphorisms by Goethe","listen_url":"","language":"English","playtime":"742","file_name":null,"readers":[{"reader_id":"4174","display_name":"Availle"}]},{"id":"82012","section_number":"10","title":"Queer Methods of Travel in Curious Corners of the World","listen_url":"","language":"English","playtime":"2504","file_name":null,"readers":[{"reader_id":"3157","display_name":"TriciaG"}]},{"id":"82013","section_number":"11","title":"The Red Squirrel","listen_url":"","language":"English","playtime":"253","file_name":null,"readers":[{"reader_id":"688","display_name":""}]},{"id":"82014","section_number":"12","title":"Slaves of Fashion","listen_url":"","language":"English","playtime":"371","file_name":null,"readers":[{"reader_id":"6965","display_name":"NoelBadrian"}]},{"id":"82015","section_number":"13","title":"Thoughts on Government","listen_url":"","language":"English","playtime":"1127","file_name":null,"readers":[{"reader_id":"6080","display_name":"progressingamerica"}]},{"id":"82016","section_number":"14","title":"The Virgin of Guadalupe","listen_url":"","language":"English","playtime":"270","file_name":null,"readers":[{"reader_id":"3657","display_name":""}]},{"id":"82017","section_number":"15","title":"What I Saw of Shiloh","listen_url":"","language":"English","playtime":"2817","file_name":null,"readers":[{"reader_id":"5694","display_name":""}]}],"genres":[{"id":"84","name":"Essays & Short Works"}],"translators":[]}}}tabinfl/50ShadesOfGreyPill { "address": "1White24UrrwQrD86o6Vrc1apgZ1x1o51", "cert_auth_type": "web", "cert_sign": " "cert_user_id": "", "files": { "data.json": { "sha512": "c65bf8100a20de67bce40e11e6283638214f880dbc29a0031b2d6a296725e9c1", "size": 425 } }, "inner_path": "data/users/1nNSk96h8jbaGJaEpK515AKtaRrXxXYAj/content.json", "modified": 1492702808, "signs": { "1nNSk96h8jbaGJaEpK515AKtaRrXxXYAj": " } }data/separated_by_email/9.json {"Content-Type": "message/rfc822", "Content-Type-Override": "message/rfc822", "MboxParser-attention": "MADAM/SIR", "MboxParser-content-transfer-encoding": "7bit", "MboxParser-from": "r Mon Nov 4 17:41:46 2002", "MboxParser-mime-version": "1.0", "MboxParser-return-path": ["<>", "<>"], "MboxParser-status": "O", "MboxParser-x-sieve": "cmu-sieve 2.0", "Message-From": "MR USMAN ABDUL <>", "Message-Recipient-Address": "R@M", "Message-To": ["R@M", "R@M"], "Message:From-Email": "", "Message:From-Name": "MR U", "Message:Raw-Header:Content-Transfer-Encoding": "7bit", "Message:Raw-Header:Content-Type": "text/plain;charset=\"iso-8859-1\"", "Message:Raw-Header:MIME-Version": "1.0", "Message:Raw-Header:Message-Id": "<200211042241.>", "Message:Raw-Header:Return-Path": ["<>", "<>"], "Message:Raw-Header:Status": "O", "Message:Raw-Header:X-Sieve": "cmu-sieve 2.0", "X-TIKA:Parsed-By": ["org.apache.tika.parser.DefaultParser", "org.apache.tika.parser.mail.RFC822Parser"], "X-TIKA:content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nTHANK YOU\n\nCHALLENGE SECURITIES LTD.\nLAGOS, NIGERIA\n\n\n\nATTENTION:MADAM/SIR\n\nI am , the Director of Operations\nCHALLENGE SECURITIES LTD,LAGOS NIGERIA. Here in\nNIGERIA Our firm is a security company of high repute\nwith years of outstanding service to the people of\nAfrica.\n\nI have resolved to contact you through this medium\nbased on business proposal that will be of mutual\nbenefit to both of us. I got your particulars through\na consultant company base on my research for a\ntrustworthy and established person that understands\ninvestment ethic for entering into a life time\nprofitable joint partnership investment and also\nco-operation with confidence and trust that you will\nkeep the contents secret and divulged to any third\nparty.\n\nTo be explicit and straight to the point. Some time\nearly 1997, a reputable client of ours deposited a\nconsignment in our company's vault for safe keeping.\nAnd since then our client has failed to come forward\nto claim his consignment, which has accumulated a\nconsiderable amount of money in demurrage.\n\nConsequently, in our bide to contact this client to\nredeem the demurrage which his consignment had\naccumulated we discovered that our client was the\nformer president of the Federal Republic of Zaire, who\ndied of illness after he was de-throwed in the same\nyear the consignment was entrusted into our care.\n\nSince the death of our client President Mobutou\nSeseseko, none of his benefactors has come forward to\nclaim the consignment with us, which means that non of\nhis relatives or aids had any knowledge of this\nconsignment. Hence out of curiosity I decided to\nsecretly open the two boxes that our client deposited\nin our vault. And to my surprise I discovered that the\ntwo boxes that were registered as treasurer by our\nclient actually contained a considerable amount of\nmoney in United States Dollars amounting to about\nUS$30 million Dollar. Since this development I have\nbeen nursing plans secretly. I also found out from\nenquiries and the foreign media that our late client\nsiphoned a lot of money from his country while he was\nin office as head of state.\n\nIt is my conviction that the consignment in our vault\nwas part of the money that our client siphoned and now\nthat he is dead there is no race to this money in our\ncare.\n\nI am now soliciting your noble assistance to assist me\nin transferring this money out of nigeria to your\ncountry for immediate investment with your assistance.\nI have also decided that you will generously be\nentitled to 30% of the total amount. Upon my receipt\nof your reply confirming your willingness to assist me\nof this transaction, I will immediately arrange and\ntransfer all the rights of ownership of this\nconsignment to your name to facilitate your easy\nclearance and transfer of the complete funds to your\ncountry.\n\nyou have nothing to worry about, as I will be there\nto assist you in anyway necessary with all proper\ndocumentation.\n\nThis transaction is 100% risk free.\n\nPlease maintain absolute confidentiality on this\nmatter.\n\n\nThanks.\n\nYours faithfully.\n\nMR. USMAN ABDUL\n\n\n\n", "X-TIKA:content_handler": "ToTextContentHandler", "X-TIKA:embedded_depth": "1", "X-TIKA:embedded_resource_path": "/embedded-9", "X-TIKA:parse_time_millis": "7", "dc:creator": ["\"\" <>", " <>"], "dc:format": "text/plain;charset=\"iso-8859-1\"", "dc:identifier": "<>", "dc:subject": "THANK YOU", "dc:title": "THANK YOU", "dcterms:created": "2002-11-05T07:41:26Z"}mcgrue/tcgplayer-sellers-portal-chrome-extension { "name": "TCGPlayer Sellers Tool", "version": "1.0", "description": "Make the TCGPlayer Sellers page less bad.", "permissions": ["activeTab", "declarativeContent", "storage", "tabs"], "homepage_url": "http://gruniverse.com", "background": { "scripts": ["third-party/jquery-3.5.1.min.js", "background.js"] , "persistent": true }, "content_scripts": [ { "matches": ["http://*/*", "https://*/*"], "js": ["third-party/jquery-3.5.1.min.js", "contentscript.js"] } ], "browser_action": { "default_popup": "menu.html", "default_icon": { "16": "images/get_started16.png", "32": "images/get_started32.png", "48": "images/get_started48.png", "128": "images/get_started128.png" } }, "icons": { "16": "images/get_started16.png", "32": "images/get_started32.png", "48": "images/get_started48.png", "128": "images/get_started128.png" }, "manifest_version": 2 }10-100 { "directions": [ "Preheat a grill for medium heat.", "Melt butter in a skillet over medium heat. Add the garlic, and cook until fragrant, 1 to 2 minutes. Whisk in honey and lemon juice. Reserve half for basting, and brush the other half onto the chicken breasts.", "Lightly oil the grill grate, and place chicken on the grill. Cook for 6 to 8 minutes per side, turning frequently. Baste often during the last 5 minutes. Chicken is done when the meat is firm, and juices run clear." ], "ingredients": [ "2 tablespoons butter or margarine", "1 clove garlic, chopped", "1/3 cup honey", "1 tablespoon lemon juice", "4 skinless, boneless chicken breast halves" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Honey Grilled Chicken", "url": "http://allrecipes.com/recipe/86637/honey-grilled-chicken/" } teleivo/dhis2-github-action-metrics {"total_count":1,"jobs":[{"id":5475746530,"run_id":1955581637,"run_url":"https://api.github.com/repos/dhis2/dhis2-core/actions/runs/1955581637","run_attempt":1,"node_id":"CR_kwDOA_1uaM8AAAABRmFC4g","head_sha":"5c1802062a1f5b50ea9a05c63c5d31a50d76a1b1","url":"https://api.github.com/repos/dhis2/dhis2-core/actions/jobs/5475746530","html_url":"https://github.com/dhis2/dhis2-core/runs/5475746530?check_suite_focus=true","status":"completed","conclusion":"success","started_at":"2022-03-09T05:49:12Z","completed_at":"2022-03-09T06:03:15Z","name":"api-test","steps":[{"name":"Set up job","status":"completed","conclusion":"success","number":1,"started_at":"2022-03-09T05:49:12.000Z","completed_at":"2022-03-09T05:49:13.000Z"},{"name":"Run actions/checkout@v3","status":"completed","conclusion":"success","number":2,"started_at":"2022-03-09T05:49:13.000Z","completed_at":"2022-03-09T05:49:15.000Z"},{"name":"Set up JDK 11","status":"completed","conclusion":"success","number":3,"started_at":"2022-03-09T05:49:15.000Z","completed_at":"2022-03-09T05:49:39.000Z"},{"name":"Build core image","status":"completed","conclusion":"success","number":4,"started_at":"2022-03-09T05:49:39.000Z","completed_at":"2022-03-09T05:52:38.000Z"},{"name":"Login to Docker Hub","status":"completed","conclusion":"skipped","number":5,"started_at":"2022-03-09T05:52:38.000Z","completed_at":"2022-03-09T05:52:38.000Z"},{"name":"Set up Docker Buildx","status":"completed","conclusion":"success","number":6,"started_at":"2022-03-09T05:52:38.000Z","completed_at":"2022-03-09T05:52:48.000Z"},{"name":"Publish docker image","status":"completed","conclusion":"skipped","number":7,"started_at":"2022-03-09T05:52:48.000Z","completed_at":"2022-03-09T05:52:48.000Z"},{"name":"Build test image","status":"completed","conclusion":"success","number":8,"started_at":"2022-03-09T05:52:48.000Z","completed_at":"2022-03-09T05:53:36.000Z"},{"name":"Run tests","status":"completed","conclusion":"success","number":9,"started_at":"2022-03-09T05:53:36.000Z","completed_at":"2022-03-09T06:03:13.000Z"},{"name":"Upload logs","status":"completed","conclusion":"skipped","number":10,"started_at":"2022-03-09T06:03:13.000Z","completed_at":"2022-03-09T06:03:14.000Z"},{"name":"Run actions/upload-artifact@v2","status":"completed","conclusion":"skipped","number":11,"started_at":"2022-03-09T06:03:14.000Z","completed_at":"2022-03-09T06:03:14.000Z"},{"name":"Post Build test image","status":"completed","conclusion":"success","number":19,"started_at":"2022-03-09T06:03:14.000Z","completed_at":"2022-03-09T06:03:14.000Z"},{"name":"Post Set up Docker Buildx","status":"completed","conclusion":"success","number":20,"started_at":"2022-03-09T06:03:14.000Z","completed_at":"2022-03-09T06:03:14.000Z"},{"name":"Post Set up JDK 11","status":"completed","conclusion":"success","number":21,"started_at":"2022-03-09T06:03:14.000Z","completed_at":"2022-03-09T06:03:15.000Z"},{"name":"Post Run actions/checkout@v3","status":"completed","conclusion":"success","number":22,"started_at":"2022-03-09T06:03:15.000Z","completed_at":"2022-03-09T06:03:15.000Z"},{"name":"Complete job","status":"completed","conclusion":"success","number":23,"started_at":"2022-03-09T06:03:15.000Z","completed_at":"2022-03-09T06:03:15.000Z"}],"check_run_url":"https://api.github.com/repos/dhis2/dhis2-core/check-runs/5475746530","labels":["ubuntu-latest"],"runner_id":15,"runner_name":"GitHub Actions 15","runner_group_id":2,"runner_group_name":"GitHub Actions"}]}dat.json { "url": "dat://f20cedb0ecbc2fe432336e7df0304646954e01599e3488fb0360117800d6d3ff/", "title": "Dat Editor", "description": "A simple but well-featured code editor", "fallback_page": "index.html" }{ "name": "styled-breakpoints", "version": "2.0.4", "description": "Simple and powerfull css breakpoints for styled-components", "main": "dist/styled-breakpoints.common.js", "module": "dist/styled-breakpoints.es.js", "jsnext:main": "dist/styled-breakpoints.es.js", "repository": "", "keywords": [ "styled-components", "media-query", "media", "breakpoint" ], "scripts": { "unit:watch": "NODE_ENV=test jest --watch", "build": "yarn build:common && yarn build:es && yarn build:browser", "build:common": "rollup -c --environment COMMON,PRODUCTION", "build:es": "rollup -c --environment ES", "build:browser": "rollup -c --environment BROWSER,PRODUCTION" }, "files": [ "dist", "src" ], "author": " ", "license": "MIT", "peerDependencies": { "styled-components": ">= 1 < 4" }, "devDependencies": { "babel-core": "^6.26.3", "babel-jest": "^22.4.3", "babel-preset-env": "^1.6.0", "babel-preset-stage-0": "^6.24.1", "eslint": "^4.19.1", "eslint-config-airbnb": "^16.1.0", "eslint-config-prettier": "^2.9.0", "eslint-plugin-import": "^2.11.0", "eslint-plugin-jsx-a11y": "^6.0.3", "eslint-plugin-prettier": "^2.6.0", "eslint-plugin-react": "^7.7.0", "jest": "^22.4.3", "rimraf": "^2.6.1", "rollup": "^0.45.2", "rollup-plugin-babel": "^2.7.1", "rollup-plugin-commonjs": "^8.2.0", "rollup-plugin-gzip": "^1.3.0", "rollup-plugin-node-resolve": "^3.0.0", "rollup-plugin-uglify": "^2.0.1", "styled-components": "^3.1.5" }, "jest": { "collectCoverage": false, "transform": { "^.+\\.js$": "./node_modules/babel-jest" }, "testFileExtensions": [ "es6", "js" ], "moduleFileExtensions": [ "js", "json", "es6" ] } } {"configuration":["BLOCKSIZE","VERBOSE","LEVEL"],"regionsToProcessedPerformance":{"program":61584444540}}{"@context": "https://linked.art/ns/v1/linked-art.json", "about": [], "classified_as": [{"id": "aat:300133025", "label": "works of art", "type": "Type"}, {"id": "http://data.duchamparchives.org/cp/object/thesaurus/domaine-non-saisi", "label": "Domaine non saisi", "type": "Type"}], "crm:P104_is_subject_to": null, "crm:P57_has_number_of_parts": null, "current_keeper": null, "current_location": null, "current_owner": {"id": "http://data.duchamparchives.org/pompidou", "label": "Centre Pompidou", "type": "Group"}, "depicts": [], "id": "http://data.duchamparchives.org/cp/object/150000001799864", "identified_by": [{"classified_as": [{"id": "aat:300404621", "label": "repository numbers", "type": "Type"}, {"id": "aat:300404670", "label": "preferred terms", "type": "Type"}], "id": "http://data.duchamparchives.org/cp/object/150000001799864/object_id", "type": "Identifier", "value": "150000001799864"}, {"id": "http://data.duchamparchives.org/cp/object/150000001799864/title", "type": "Name", "value": "Le Moteur de l'Action (\u00e0 Marcel Duchamp)"}, {"classified_as": [{"id": "aat:300312355", "label": "accession numbers", "type": "Type"}], "id": "http://data.duchamparchives.org/cp/object/150000001799864/accession", "type": "Identifier", "value": "AM 2019-F3"}], "label": null, "language": [], "made_of": [], "ore:isAggregatedBy": null, "part_of": [], "produced_by": {"carried_out_by": ["http://data.duchamparchives.org/cp/object/person/9000000000067546"], "consists_of": [{"carried_out_by": [{"id": "http://data.duchamparchives.org/cp/object/person/9000000000067546", "identified_by": [{"id": "http://data.duchamparchives.org/cp/object/person/9000000000067546/name", "type": "Name", "value": ""}], "type": "Actor"}], "classified_as": [], "id": "http://data.duchamparchives.org/cp/object/150000001799864/production/artiste", "technique": [{"id": "aat:artist", "label": "artist", "type": "Type"}], "type": "Production"}], "id": "http://data.duchamparchives.org/cp/object/150000001799864/production", "timespan": {"begin_of_the_begin": "1981-01-01T00:00:00+00:00", "end_of_the_end": "1981-01-01T00:00:00+00:00", "id": "http://data.duchamparchives.org/cp/object/150000001799864/production/timespan", "label": "1981", "type": "TimeSpan"}, "type": "Production"}, "referred_to_by": [{"classified_as": [{"id": "aat:300266036", "label": "dimensions", "type": "Type"}], "id": "http://data.duchamparchives.org/cp/object/150000001799864/dimensions", "type": "LinguisticObject", "value": "dur\u00e9e: 8'30 "}, {"classified_as": [{"id": "aat:300026687", "label": "acknowledgments", "type": "Type"}], "id": "http://data.duchamparchives.org/cp/object/150000001799864/credit", "type": "LinguisticObject", "value": "Don de l'artiste 2019"}, {"classified_as": [{"id": "aat:300163343", "label": "media (artists' materials)", "type": "Type"}], "type": "LinguisticObject", "value": "Film original inversible double-bande 16mm, noir et blanc, son"}], "refers_to": [], "related": [], "representation": [], "subject_of": [], "type": "ManMadeObject", "used_for": []}Mertaliipek/ramsateCommand { "NTProvider": { "types": { "/FMSInfo": "FMSInfo", "/LiveWindow/DriveSubsystem": "Subsystem", "/LiveWindow/Ungrouped/AnalogGyro[0]": "Gyro", "/LiveWindow/Ungrouped/DifferentialDrive[1]": "DifferentialDrive", "/LiveWindow/Ungrouped/Scheduler": "Scheduler", "/LiveWindow/Ungrouped/Victor[4]": "Motor Controller", "/LiveWindow/Ungrouped/Victor[5]": "Motor Controller" } }, "NetworkTables": { "LiveWindow": { "Ungrouped": { "Victor[5]": { "open": true }, "open": true }, "open": true } } } { "name": "muddle", "version": "1.0.0", "description": "combine and compose connect-style middleware", "main": "index.js", "directories": { "test": "test" }, "scripts": { "test": "node node_modules/mocha/bin/mocha" }, "repository": { "type": "git", "url": "git://github.com/jden/muddle.git" }, "keywords": [ "middleware", "connect", "compose" ], "author": "jden <>", "license": "MIT", "readmeFilename": "README.md", "devDependencies": { "chai": "~1.5.0", "mocha": "~1.8.2", "sinon-chai": "~2.3.1", "sinon": "~1.6.0" } } { "name": "requirefresh", "version": "1.1.2", "description": "Require a file without adding it into the require cache", "homepage": "https://github.com/bevry/requirefresh", "keywords": [ "require", "requirefresh", "require-fresh", "cache" ], "author": { "name": "Bevry Pty Ltd", "email": "", "url": "http://bevry.me" }, "maintainers": [ { "name": "", "email": "", "url": "https://github.com/balupton" } ], "contributors": [ { "name": "", "email": "", "url": "https://github.com/balupton" } ], "bugs": { "url": "https://github.com/bevry/requirefresh/issues" }, "repository": { "type": "git", "url": "http://github.com/bevry/requirefresh.git" }, "engines": { "node": ">=0.8" }, "dependencies": {}, "devDependencies": { "coffee-script": "~1.6.3", "joe": "~1.3.0", "joe-reporter-console": "~1.2.1", "chai": "~1.7.2" }, "directories": { "lib": "./out/lib" }, "scripts": { "test": "node ./out/test/requirefresh-test.js" }, "main": "./out/lib/requirefresh.js", "readme": "# Require Fresh\n\n[![Build Status](https://secure.travis-ci.org/bevry/requirefresh.png?branch=master)](http://travis-ci.org/bevry/requirefresh \"Check this project's build status on TravisCI\")\n[![NPM version](https://badge.fury.io/js/requirefresh.png)](https://npmjs.org/package/requirefresh \"View this project on NPM\")\n[![Gittip donate button](http://badgr.co/gittip/bevry.png)](https://www.gittip.com/bevry/ \"Donate weekly to this project using Gittip\")\n[![Flattr donate button](https://raw.github.com/balupton/flattr-buttons/master/badge-89x18.gif)](http://flattr.com/thing/344188/balupton-on-Flattr \"Donate monthly to this project using Flattr\")\n[![PayPayl donate button](https://www.paypalobjects.com/en_AU/i/btn/btn_donate_SM.gif)](https://www.paypal.com/au/cgi-bin/webscr?cmd=_flow&SESSION=IHj3DG3oy_N9A9ZDIUnPksOi59v0i-EWDTunfmDrmU38Tuohg_xQTx0xcjq&dispatch=5885d80a13c0db1f8e263663d3faee8d14f86393d55a810282b64afed84968ec \"Donate once-off to this project using Paypal\")\n\nRequire a file without adding it into the require cache\n\n\n\n## Install\n\n1. [Install Node.js](http://bevry.me/node/install)\n2. `npm install --save requirefresh`\n\n\n\n## Usage\n\n``` javascript\n// Via call and return with no error handling\nvar result = require('requirefresh').requireFresh('my-module-path')\n\n// Via callback uses domains for errors (with try/catch for node 0.8 support)\nvar resultOrError = require('requireFresh').requireFreshSafe('my-module-path', function(err,result){\n\t\n});\n```\n\n\n## History\n[You can discover the history inside the `History.md` file](https://github.com/bevry/requirefresh/blob/master/History.md#files)\n\n\n\n## License\nLicensed under the incredibly [permissive](http://en.wikipedia.org/wiki/Permissive_free_software_licence) [MIT License](http://creativecommons.org/licenses/MIT/)\n
Copyright © 2013+ [Bevry Pty Ltd](http://bevry.me)\n
Copyright © 2011-2012 [](http://balupton.com)\n", "readmeFilename": "README.md", "_id": "requirefresh@1.1.2", "dist": { "shasum": "ae3b39ae89934a5d325433bb144ad26dd4b859d1" }, "_from": "requirefresh@~1.1.2", "_resolved": "https://registry.npmjs.org/requirefresh/-/requirefresh-1.1.2.tgz" } 10-100 { "title": "At the Piano", "creation_date": "1917-1918", "creation_date_earliest": "1917-01-01", "creation_date_latest": "1918-01-01", "medium": "crayon on paper", "accession_number": "18.25.14", "id": "cmoa:things/0cb0a319-e777-494b-b0f6-edef1964f39f", "credit_line": "American Artists' War Emergency Fund", "date_acquired": "1918-11-14", "department": "Fine Arts", "physical_location": "Not on View", "item_width": 14.25, "item_height": 19.25, "item_depth": 0.0, "item_diameter": 0.0, "web_url": "http://collection.cmoa.org/CollectionDetail.aspx?item=1001765", "provenance_text": "American Artists' War Fund, New York; Purchase", "classification": "drawings and watercolors", "images": [ { "image_url": "http://collection.cmoa.org/CollectionImage.aspx?irn=48023&size=Medium" } ], "creator": [ { "artist_id": "cmoa:parties/ebb74905-bbc9-4333-96ef-dd6e113dece6", "party_type": "Person", "full_name": "", "cited_name": ".", "role": null, "nationality": "American", "birth_date": "1852-01-01", "death_date": "1919-01-01", "birth_place": "West Point (Orange county, New York state, United States)", "death_place": "New York City (New York state, United States)" } ] } { "name": "daily-scrum", "version": "4.4.0", "description": "spin the wheel for a list of presenters", "main": "./dist/main/main.js", "scripts": { "electron-rebuild": "node -r ts-node/register ../../.erb/scripts/electron-rebuild.js", "link-modules": "node -r ts-node/register ../../.erb/scripts/link-modules.ts", "postinstall": "npm run electron-rebuild && npm run link-modules" }, "license": "MIT" } particle-api-js/9.1.0.json {"particle.min.js":"}src/main/resources/data/sandwichable/recipes/cooked_salmon_filet_from_toasting.json10-100 { "type": "sandwichable:toasting_recipe", "input": { "tag": "c:salmon_filet" }, "output": "sandwichable:cooked_salmon_filet" }0 { "ExpandedNodes": [ "", "\\examples" ], "SelectedNode": "\\examples\\server.cc", "PreviewInSolutionExplorer": true }{ "agent": { "exec": "archiveragent-0.1-py2.7.egg --config \"%c\" --sub \"%s\" --pub \"%p\"" }, "archiver_url": "http://ec2-54-212-252-174.us-west-2.compute.amazonaws.com/backend/api/query", "agentid": "Archiver1", "source_name": "PNNL Test LBNL Multi-RTU" } 10-100 { "directions": [ "Lay the ham slices flat on a serving plate and pat dry. Spread with cream cheese. Place a pickle spear at one end of each slice, and roll the slices into cylinders around the spears; secure with toothpicks." ], "ingredients": [ "5 slices cooked ham", "1 (8 ounce) package cream cheese, softened", "5 dill pickle spears" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Yummy Roll Ups", "url": "http://allrecipes.com/recipe/22453/yummy-roll-ups/" } initiative/monsters/will_o_wisp.json { "index": 308, "name": "Will-o'-Wisp", "size": "Tiny", "type": "undead", "subtype": null, "alignment": "chaotic evil", "armor_class": 19, "hit_points": 22, "hit_dice": "9d4", "speed": "0 ft., fly 50 ft. (hover)", "strength": 1, "dexterity": 28, "constitution": 10, "intelligence": 13, "wisdom": 14, "charisma": 11, "damage_vulnerabilities": [], "damage_resistances": [ "acid", "cold", "fire", "necrotic", "thunder", "bludgeoning, piercing, and slashing from nonmagical weapons" ], "damage_immunities": [ "lightning", "poison" ], "condition_immunities": [ "exhaustion", "grappled", "paralyzed", "poisoned", "prone", "restrained", "unconscious" ], "senses": "darkvision 120 ft., passive Perception 12", "languages": "the languages it knew in life", "challenge_rating": 2, "special_abilities": [ { "name": "Consume Life", "desc": "As a bonus action, the will-o'-wisp can target one creature it can see within 5 ft. of it that has 0 hit points and is still alive. The target must succeed on a DC 10 Constitution saving throw against this magic or die. If the target dies, the will-o'-wisp regains 10 (3d6) hit points.", "attack_bonus": 0 }, { "name": "Ephemeral", "desc": "The will-o'-wisp can't wear or carry anything.", "attack_bonus": 0 }, { "name": "Incorporeal Movement", "desc": "The will-o'-wisp can move through other creatures and objects as if they were difficult terrain. It takes 5 (1d10) force damage if it ends its turn inside an object.", "attack_bonus": 0 }, { "name": "Variable Illumination", "desc": "The will-o'-wisp sheds bright light in a 5- to 20-foot radius and dim light for an additional number of ft. equal to the chosen radius. The will-o'-wisp can alter the radius as a bonus action.", "attack_bonus": 0 } ], "actions": [ { "name": "Shock", "desc": "Melee Spell Attack: +4 to hit, reach 5 ft., one creature. Hit: 9 (2d8) lightning damage.", "attack_bonus": 4, "damage_dice": "2d8" }, { "name": "Invisibility", "desc": "The will-o'-wisp and its light magically become invisible until it attacks or uses its Consume Life, or until its concentration ends (as if concentrating on a spell).", "attack_bonus": 0 } ], "url": "http://www.dnd5eapi.co/api/monsters/308" }{"organizations": [], "uuid": "4a3977ada01b33257d104f109162beb2e3dea6b0", "thread": {"social": {"gplus": {"shares": 0}, "pinterest": {"shares": 0}, "vk": {"shares": 0}, "linkedin": {"shares": 0}, "facebook": {"likes": 0, "shares": 0, "comments": 0}, "stumbledupon": {"shares": 0}}, "site_full": "www.tripadvisor.com", "main_image": "https://media-cdn.tripadvisor.com/media/photo-s/0c/5b/42/e7/hampton-inn-south-san.jpg", "site_section": "https://www.tripadvisor.com/Hotel_Review-g33116-d119649-Reviews-Hampton_Inn_San_Francisco_Airport-South_San_Francisco_California.html", "section_title": "Hampton Inn San Francisco-Airport - UPDATED 2017 Hotel Reviews & Price Comparison (South San Francisco, CA) - TripAdvisor", "url": "https://www.tripadvisor.com/ShowUserReviews-g33116-d119649-r467535100-Hampton_Inn_San_Francisco_Airport-South_San_Francisco_California.html", "country": "ZA", "domain_rank": 189, "title": "Very Nice Hotel", "performance_score": 0, "site": "tripadvisor.com", "participants_count": 1, "title_full": "Very Nice Hotel - Review of Hampton Inn San Francisco-Airport, South San Francisco, CA - TripAdvisor", "spam_score": 0.001, "site_type": "discussions", "published": "2017-03-15T02:00:00.000+02:00", "replies_count": 0, "uuid": "4a3977ada01b33257d104f109162beb2e3dea6b0"}, "author": "FrankBDiving", "url": "https://www.tripadvisor.com/ShowUserReviews-g33116-d119649-r467535100-Hampton_Inn_San_Francisco_Airport-South_San_Francisco_California.html", "ord_in_thread": 0, "title": "Very Nice Hotel", "locations": [], "entities": {"persons": [], "locations": [], "organizations": []}, "highlightText": "", "language": "english", "persons": [], "text": "Two nights at this hotel for a family event. We had a total of three rooms. This is a clean and well maintained hotel. Rooms are spacious and the beds are really comfortable. (Hampton Inn are my favorite beds). Parking and breakfast are included as part of your stay and breakfast has a good variety of hot and cold items. Reception staff (Theresa) was very friendly and professional and made check in/out very easy. Close to airport with a shuttle service. One thing to be aware of, this hotel is not located in a highly concentrated area of restaurants within short walking distance so you’ll want to do some research on dining in the immediate area if you plan to eat by the hotel. Note, the Embassy Suites (next door) does have a restaurant so an easy place to get to. Overall, a very pleasant stay.", "external_links": [], "published": "2017-03-15T02:00:00.000+02:00", "crawled": "2017-03-26T21:06:02.131+03:00", "highlightTitle": ""}{ "name": "sweetcaptcha", "version": "0.0.2-1", "description": "SweetCaptcha Javascript Node SDK", "main": "index.js", "homepage": "http://www.sweetcaptcha.com", "keywords": [ "captcha", "sweetcatpcha" ], "repository": { "type": "git", "url": "http://github.com/sweetcaptcha/sweetcaptcha-sdk-js.git" }, "scripts": { "test": "make test" }, "author": "Sagish", "license": "MIT", "dependencies": { "superagent": "~0.15.7" }, "devDependencies": { "mocha": "~1.13.0", "expect.js": "~0.2.0", "should": "~2.0.2" } } andersonRocha091/multidatasource-scaffold { "name": "07", "version": "1.0.0", "description": "Multi data sourcing is an scaffolding project which aims to provide an initial setup to built applications which gonna consume data from mongo and SQL databases", "main": "index.js", "scripts": { "preinstall": "npm i -g pm2", "postinstall": "cross-env NODE_ENV=prod npm t ", "test": "nyc --reporter=html mocha --timeout 20000 --exit src/tests/*.js", "test:watch": "nyc mocha src/tests/*.js -w", "test:prod": "cross-env NODE_ENV=prod npm t", "prod": "cross-env NODE_ENV=prod pm2-runtime src/Api.js", "dev": "cross-env NODE_ENV=dev pm2-runtime src/Api.js" }, "repository": { "type": "git", "url": "git+https://github.com/andersonRocha091/multidatasource-scaffold.git" }, "keywords": [], "author": "", "license": "ISC", "bugs": { "url": "https://github.com/andersonRocha091/multidatasource-scaffold/issues" }, "homepage": "https://github.com/andersonRocha091/multidatasource-scaffold#readme", "dependencies": { "@hapi/hapi": "^19.1.1", "@hapi/inert": "^6.0.1", "@hapi/joi": "^17.1.1", "@hapi/vision": "^6.0.0", "bcrypt": "^4.0.1", "boom": "^7.3.0", "cross-env": "^7.0.2", "dotenv": "^8.2.0", "hapi-auth-jwt2": "^10.1.0", "hapi-swagger": "^13.0.2", "jsonwebtoken": "^8.5.1", "mongoose": "^5.9.16", "pg": "^8.2.1", "pg-hstore": "^2.3.3", "pm2": "^4.4.0", "sequelize": "^5.21.10" }, "devDependencies": { "mocha": "^7.2.0", "nyc": "^15.1.0" } } { "directions": [ "Place the potatoes, carrots, celery, onion, 2 teaspoons of salt, and 3 quarts of water into a large pot. Bring to a boil over high heat while you proceed to make the knipla.", "Combine 5 cups of flour and 1 teaspoon of salt in a large bowl; pour in the eggs and 1 1/4 cups water. Mix until the dough comes together, about two minutes. Knead on a lightly floured surface for 3 to 5 minutes until elastic. Using kitchen scissors, snip off pieces of the dough into the boiling soup. Boil until the knipla rise to the surface, then reduce heat to low, and simmer while you make the cheese sauce.", "Melt the butter in a saucepan over medium heat, then whisk in 1 cup of flour and 2 teaspoons of salt. Pour in milk and add cheese; cook until the cheese melts and the mixture thickens, stirring constantly. Pour the cheese sauce into the soup and simmer for 20 minutes more, or until ready to serve." ], "ingredients": [ "Soup:", "6 baking potatoes, peeled and diced", "2 cups carrots, peeled and thinly sliced", "2 cups finely chopped celery", "1 cup diced onion", "2 teaspoons salt", "3 quarts water", "Knipla:", "5 cups all-purpose flour", "1 teaspoon salt", "2 eggs, beaten", "1 1/4 cups water", "Cheese Sauce:", "1 cup butter", "1 cup all-purpose flour", "2 teaspoons salt", "1 quart milk", "1 cup Cheddar cheese" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Cheesy Potato Knipla Soup", "url": "http://allrecipes.com/recipe/101188/cheesy-potato-knipla-soup/" } {"text":"\n     Assistance in the form of in-kind benefits, in-kind orders, electronic benefit transfers, or emergency checks shall be used: \n     (a)     As aid payments.\n     (b)     For applicants and recipients who declare themselves to be homeless. Applicants and recipients are required to provide a verifiable rent receipt, or verifiable documentation of shared housing, or verifiable documentation of rent-free housing. Self-declared homeless applicants and recipients shall receive in-kind benefits for housing, utilities, and meals. If in-kind benefits are not available, such applicants and recipients shall receive the cash assistance equivalent to the income-in-kind value of housing, utilities, and/or meals, whichever is not available, if otherwise eligible for these amounts. Failure to comply with the rules of a housing program that results in ejection from that housing program will be considered failure to satisfy the requirements for continuing eligibility for aid and will result in discontinuance from the General Assistance Program, subject to the notice and hearing provisions of this Article. \n     (c)     For Eligible persons awaiting transportation arrangements, provided that aid shall not exceed one week.\n     (d)     For Eligible homeless persons awaiting admission into a hospital or institution.\n     (e)     For Persons who have demonstrated inability to handle cash payments for necessities of life.\n(Amended by Ord. 271-81, App. 5/21/81; Ord. 152-98, App. 5/8/98; Proposition N, 11/5/2002; Ord. 193-03, File No. 030871, App. 7/25/2003; Ord. 93-07, File No. 070208, App. 4/27/2007) \n\n","heading":{"title":"20","chaptersection":"59.3","identifier":"20.59.3","catch_text":"AID PAYMENTS; IN-KIND AID."}}data/uso-styles/21309.json { "id": 21309, "name": "Lenta.RU без рекламы Nokia (удаляет рамку Nokia)", "description": "Removes Nokia advertising - the phone frame around article picrure.\r\nУдаляет рекламу Нокии - рамку-телефон вокруг картинки к статье.\r\nПарни из Нокии исхитрились показывать рекламу даже тем, у кого она режется Adblock-ом. Этот скрипт убирает навязчивую рекламу Нокии. Покупайте ай-фон ;)", "user": { "id": 28869, "name": "vladson", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": null }, "updated": "2009-09-27T20:48:18.000Z", "weekly_install_count": 0, "total_install_count": 311, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/21309_after.gif?r=1587456310", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": null, "license": null, "created": "2009-09-27T20:48:18.000Z", "category": "site", "raw_subcategory": "lenta", "subcategory": "lenta", "additional_info": null, "style_tags": [], "css": "@-moz-document domain(\"lenta.ru\") {\r\n td[class=statya] img[usemap=\"#hole\"] {display:none;}\r\n td[class=statya] div:nth-child(3) {display:none!important;background-color:#FFFFFF!important;padding-top:2px!important;}\r\n td[class=statya] div:nth-child(5) {background-position:left top!important; margin-left:0px!important;}\r\n td[class=statya] div:nth-child(6) {padding-top:0px!important;margin-left:0px!important;margin-top:5px!important;}\r\n td[class=statya] table {margin-bottom:10px;}\r\n td[class=statya] div div[class=dt] {display:block!important;background:#EFEDDF;font-weight:bold}\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/21309/lenta-ru-nokia-nokia.user.js", "style_settings": [] }data/computational-methods-systems-biology-international-24.json1-10 {"categories":["Computational Biology","Computer Science","Engineering"],"desc":" This book constitutes the refereed proceedings of the 13th International Conference on Computational Methods in Systems Biology, CMSB 2015, held in Nantes, France, in September 2015. The 20 full papers and 2 short papers presented were carefully reviewed and selected from 43 full and 4 short paper submissions. The papers cover a wide range of topics in the analysis of biological systems, networks and data such as model checking, stochastic analysis, hybrid systems, circadian clock, time series data, logic programming, and constraints solving ranging from intercellular to multiscale.","details":{"authors":null,"format":"pdf","isbn-10":"3319234005","isbn-13":"978-3319234007","pages":"288 pages","publication date":"September 3, 2015","publisher":"Springer","size":"14.74Mb"},"img":"http://2172.16.31.108/covers/53/53699806e352749dc19918119365c06a.jpg","link":"https://rapidhosting.info/files/h85","title":"Computational Methods in Systems Biology: 13th International Conference, CMSB 2015, Nantes, France, September 16-18, 2015, Proceedings (Lecture Notes in Computer Science)"}bundestag/dip21-daten { "vorgangId": "239092", "VORGANG": { "WAHLPERIODE": "19", "VORGANGSTYP": "Schriftliche Frage", "TITEL": "Bewertung des Films \"Eingeimpft\"", "AKTUELLER_STAND": "Beantwortet", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "WICHTIGE_DRUCKSACHE": { "DRS_HERAUSGEBER": "BT", "DRS_NUMMER": "19/4075", "DRS_TYP": "Schriftliche Fragen", "DRS_LINK": "http://dipbt.bundestag.de:80/dip21/btd/19/040/1904075.pdf" }, "EU_DOK_NR": "", "SCHLAGWORT": [ { "_fundstelle": "true", "__cdata": "Film" }, "Filmförderung", "Gesundheitsvorsorge", { "_fundstelle": "true", "__cdata": "Impfung" } ], "ABSTRAKT": "Originaltext der Frage(n): \r\n \r\nWie bewertet die Bundesregierung den auch aus Bundesmitteln geförderten Film \"Eingeimpft\" vor dem Hintergrund, dass gegenwärtig über eine anwachsende Impfskepsis berichtet wird, und ist die Bundesregierung der Meinung, dass der Film geeignet ist, die politischen Bemühungen um eine Steigerung der allgemeinen Durchimpfungsrate zu konterkarieren?" }, "VORGANGSABLAUF": { "VORGANGSPOSITION": { "ZUORDNUNG": "BT", "URHEBER": "Schriftliche Frage/Schriftliche Antwort ", "FUNDSTELLE": "31.08.2018 - BT-Drucksache 19/4075, Nr. 1", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/19/040/1904075.pdf", "PERSOENLICHER_URHEBER": [ { "VORNAME": "Katrin", "NACHNAME": "Helling-Plahr", "FUNKTION": "MdB", "FRAKTION": "FDP", "AKTIVITAETSART": "Frage" }, { "VORNAME": "Monika", "NACHNAME": "Grütters", "FUNKTION": "Staatsmin.", "RESSORT": "Bundeskanzleramt", "AKTIVITAETSART": "Antwort" } ] } } } SoapPunk/decentraland-compi-meta { "compilerOptions": { "allowJs": true, "removeComments": true, "target": "es2015", "module": "CommonJS", "lib": ["es2015"], "esModuleInterop": true, "allowSyntheticDefaultImports": true, "skipLibCheck":true, "types": [ "env", "dcl", "@decentraland/ChatController", "@decentraland/EthereumController", "@decentraland/SocialController", "@decentraland/Identity", "@decentraland/web3-provider" ], "typeRoots": [ "../node_modules/decentraland-ecs/types" ] }, "include": ["../src/game.ts"] } 1-10 {"gridmest.css":","gridmest.min.css":"}alexa/alexa-dataset-redtab {"relation": [["", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], ["Name", "", "", "I Bared My Chest", "Reap The Many Fruits", "The Horror", "Ho Diddi", "Deep In The Outside", "We Make The Leap", "Confidence Makes Love", "The Sand That Falls"], ["Artist", "", "", "", "wood", "Duwood", "Duwood", "Duke Garwood", "Duke Garwood", "Duke Garwood", "Duke Garwood"], ["Time", "2:28", "3:02", "4:45", "5:10", "5:28", "2:06", "3:08", "2:39", "2:37", "6:25"], ["Price", "0,99 \u20ac", "0,99 \u20ac", "0,99 \u20ac", "0,99 \u20ac", "0,99 \u20ac", "0,99 \u20ac", "0,99 \u20ac", "0,99 \u20ac", "0,99 \u20ac", "0,99 \u20ac"], ["", "View In iTunes", "View In iTunes", "View In iTunes", "View In iTunes", "View In iTunes", "View In iTunes", "View In iTunes", "View In iTunes", "View In iTunes", "View In iTunes"]], "pageTitle": "iTunes - Music - The Sand That Falls by ", "title": "", "url": "https://itunes.apple.com/ro/album/we-make-the-leap/id324371339?i=324371368", "hasHeader": true, "headerPosition": "FIRST_ROW", "tableType": "RELATION", "tableNum": 0, "s3Link": "common-crawl/crawl-data/CC-MAIN-2015-32/segments/1438042988061.16/warc/CC-MAIN-20150728002308-00090-ip-10-236-191-2.ec2.internal.warc.gz", "recordEndOffset": 862113391, "recordOffset": 862092888, "tableOrientation": "HORIZONTAL", "TableContextTimeStampAfterTable": "{112909=Copyright \u00a9 2014 Apple Inc. Toate\u00a0drepturile\u00a0rezervate., 18974=Born: 1969 in Kent, England}", "lastModified": "Fri, 31 Jul 2015 08:25:37 GMT", "textBeforeTable": "Open iTunes to preview, buy and download music. View More by This Artist The Sand That Falls iTunes for Mac + PC Do you already have iTunes? Click I Have iTunes to open it now. We are unable to find iTunes on your computer. To preview and buy music from The Sand That Falls by , download iTunes now. iTunes is the world's easiest way to organize and add to your digital media collection. iTunes Progress Indicator If iBooks doesn't open, click the iBooks app in your Dock. Opening the iBooks Store. Progress Indicator If iTunes doesn\u2019t open, click the iTunes icon in your Dock or on your Windows desktop. Opening the iTunes", "textAfterTable": "10 Songs Biography Born: 1969 in Kent, England Genre: Alternative Years Active: '90s, '00s, '10s Duke Garwood is a London-based multi-instrumentalist and recording artist whose expertise on a wide range of instruments has graced numerous albums by an eclectic range of musicians. He is best known for... Full bio Top Albums and Songs by 1. Black Pudding View In iTunes 2.", "hasKeyColumn": true, "keyColumnIndex": 1, "headerRowIndex": 0}{"derivation": "from G1537 (\u1f10\u03ba\u03bd\u03b5\u03cd\u03c9) and G3506 (\u1f10\u03ba\u03bd\u03b5\u03cd\u03c9);", "kjv_def": "convey self away", "lemma": "\u1f10\u03ba\u03bd\u03b5\u03cd\u03c9", "frequency": 1, "strongs_def": " (by analogy) to slip off, i.e. quietly withdraw", "outline": "

  1. to bend to one side
  2. to take one's self away, to turn one's self, to avoid a thing
  3. to swim away, escape by swimming
  4. to escape, slip away secretly
"}{ "$schema": "https://developer.microsoft.com/json-schemas/spfx-build/package-solution.schema.json", "solution": { "name": "spfx-employee-performance-app-client-side-solution", "id": "6ccbf29a-fd9a-4e69-9d6d-012318579a50", "version": "1.0.0.7", "features": [ { "title": "employee-performance-app-assets", "description": "List assets for employee performance app", "id": "368d2b60-be42-4505-986a-5f775c922780", "version": "1.0.0.0", "assets": { "elementManifests": ["elements.xml"], "elementFiles": [ "employeesListSchema.xml", "achievementsListSchema.xml", "earnedAchievementsListSchema.xml", "performanceSkillsListSchema.xml" ] } } ], "includeClientSideAssets": true, "skipFeatureDeployment": false, "iconPath": "Images/EmployeePerformanceIcon.png", "webApiPermissionRequests": [ { "resource": "Microsoft Graph", "scope": "User.ReadBasic.All" } ] }, "paths": { "zippedPackage": "solution/spfx-employee-performance-app.sppkg" } } Facility_Name,Index,ODCAF_Facility_Type,Street_No,Street_Name,Postal_Code,City,Prov_Terr Bibliothèque Aux Sources,996,library or archives,33,rue de-l'église,G0A 3R0,montréal,qc { "set_name": "1st Series: Wing Team & Oz Corp", "rarity": "U", "name": "OZ-13MSX1 Vayeate", "weapon": "Beam Cannon", "effect": "Increase Clash Points by 1 when Attacking alone or with Mercurius (maximum 6 Clash Points).", "number": "MS-022", "corps_symbol": "OZ", "clash_points": "4", "type": "Mobile Suit", "price": "4" }{ "description": "Features:

  • Type: Pump Action Shotgun
  • Caliber/Gauge: 12GA (3\" Shells)
  • Barrel Length: 20\"
  • Sights: Bead
  • Capacity: 7 + 1 RDS
  • Choke: Fixed
  • Stock/Grips: Synthetic Stock
  • 3 AVAILABLE

Condition:

  • 100% Unfired Brand New in Box

Includes:

  • Factory Box

Safety:

You, as the buyer are solely responsible for determining that a used gun is safe to shoot. The firearms offered here have NOT been test fired but have only been given a visual inspection for resell purposes. It is a requirement that every used firearm be inspected by a licensed gunsmith before firing. By bidding or purchasing, you the buyer agrees to hold Plano Pawn Shop harmless against damages, injury, or death caused by defective merchandise.

* All firearms MUST be shipped to a licensed FFL dealer. We will not ship until we have a valid copy of your dealers license e-mailed or faxed to us.*

Need a Loan or Have one to sell?

Do you have the same make and model item? Are you needing a cash loan or wanting to sell your item fast for cash? Get a free quote today by clicking HERE.

***Click HERE to BUY It NOW!***", "info": [ { "label": "Seller:", "value": "Plano Pawn Shop" }, { "label": "Contact:", "value": "Joanna" }, { "label": "Phone", "value": "(972) 424-6911" }, { "label": "Address:", "value": "1202 K Avenue Plano," }, { "label": "Price:", "value": "$249.00" }, { "label": "Manf.:", "value": "" }, { "label": "Model:", "value": "Model M88SP 12GA" }, { "label": "Condition:", "value": "100% (NIB, New In the Box)" } ], "listing_id": "11605", "title": "Brand New in Box Mossberg M88SP 12GA Home Defense" } { "id" : 65, "status" : "Accepted", "summary" : "Ability to add \"OnZoomChanged\" event listener to OpenStreetMapView instance.", "labels" : [ "Type-Enhancement", "Priority-Medium" ], "stars" : 0, "commentCount" : 5, "comments" : [ { "id" : 0, "commenterId" : 3456884467250674452, "content" : "Please add an ability to be notified on zoom level change outside of OpenStreetMapView class. Just implement ability to add "OnZoomChanged" event listener to OpenStreetMapView instance. This event should be fired after:\r\n- zoomIn;\r\n- zoomOut;\r\n- setZoomLevel;\r\n- any other methods, which change map zoom level?\r\n", "timestamp" : 1277392517, "attachments" : [ ] }, { "id" : 1, "commenterId" : 3456884467250674452, "content" : "Ability to handle this event in Overlays will be also useful (for example, in custom overlay, which behavior depends on current zoom level).", "timestamp" : 1277392656, "attachments" : [ ] }, { "id" : 2, "commenterId" : 8937367184059112911, "content" : "See also issue 55.", "timestamp" : 1277401776, "attachments" : [ ] }, { "id" : 3, "commenterId" : 8937367184059112911, "content" : "", "timestamp" : 1278600042, "attachments" : [ ] }, { "id" : 4, "commenterId" : 8937367184059112911, "content" : "Do the fixes for issue 55 also fix this?", "timestamp" : 1284970465, "attachments" : [ ] } ] }{"id":"index.html","dependencies":[{"name":"./index.js","dynamic":true,"resolved":"/Users/yunfei/Codes/Conestoga/PROG8110/Mar10/public/index.js","parent":"/Users/yunfei/Codes/Conestoga/PROG8110/Mar10/public/index.html"}],"generated":{"html":"\n

\n Todo List\n

\n
\n \n
\n
\n \n"},"sourceMaps":null,"error":null,"hash":"7c5ed3531d8986ec9309842f51fcdee3","cacheData":{}}{ "name" : "aroflo-node", "version" : "2021.12.4", "description" : "NodeJS Client for the AroFlo Extended API Platform", "homepage" : "https://aroflo.com", "author" : { "email" : "", "name" : "AroFlo", "url" : "https://aroflo.com" }, "license" : "MIT", "main" : "src/aroflo.js", "dependencies" : { "crypto-js" : "^4.0.0" }, "repository" : { "type" : "git", "url" : "git+https://github.com/AroFlo/aroflo-node.git" }, "keywords" : [ "aroflo", "api", "job management" ], "bugs" : { "url" : "https://github.com/AroFlo/aroflo-node/issues" } }dfries/exiftool.js0 { "Aperture": 1, "BitsPerSample": 8, "ColorComponents": 3, "ColorSpace": "sRGB", "ComponentsConfiguration": "Y, Cb, Cr, -", "CreateDate": "1980:08:15 23:41:48", "DateTimeOriginal": "1980:08:15 23:41:48", "EncodingProcess": "Baseline DCT, Huffman coding", "ExifByteOrder": "Big-endian (Motorola, MM)", "ExifImageHeight": 480, "ExifImageWidth": 640, "ExifVersion": "0220", "ExposureCompensation": "+0.556", "ExposureTime": "1/125", "FNumber": 1, "FileSource": "Digital Camera", "FlashpixVersion": "", "ImageHeight": 8, "ImageSize": "8x8", "ImageWidth": 8, "InteropIndex": "R98 - DCF basic file (sRGB)", "InteropVersion": "0100", "Make": "SAMSUNG Electronics", "Model": "Anycall SPH-W3300", "Orientation": "Horizontal (normal)", "PhotometricInterpretation": "YCbCr", "ResolutionUnit": "inches", "SceneType": "Directly photographed", "ShutterSpeed": "1/125", "Software": "BE19", "XResolution": 72, "YCbCrPositioning": "Centered", "YCbCrSubSampling": "YCbCr4:2:0 (2 2)", "YResolution": 72 }{ "name": "nw-app", "description": "Customize the message bubbles in Messages.app in OS X 10.10 Yosemite", "author": " <>", "version": "2.1.1", "homepage": "https://github.com/kethinov/BubblePainter", "license": "CC-BY-4.0", "main": "main.html", "readmeFilename": "README.md", "engines": { "node": ">=0.10.0" }, "engineStrict": true, "window": { "show": false, "toolbar": false, "width": 720, "min_width": 720, "max_width": 720, "height": 720, "min_height": 720, "max_height": 720 }, "dependencies": { "css": "^1.6.0" }, "jshintConfig": { "camelcase": true, "curly": true, "devel": true, "evil": true, "indent": 2, "node": true }, "repository": { "type": "git", "url": "git://github.com/kethinov/BubblePainter.git" }, "private": true } 10-100 { "battle": "GA011", "operation": false, "cwsac_reference": "GA011", "forces": { "Confederate": { "casualties": 3000, "commanders": [ { "middle_name": "E.", "last_name": "Johnston", "suffix": "", "first_name": "Joseph", "rank": "General", "fullname": "", "navy": false } ], "description": "Army of Tennessee", "armies": 1 }, "US": { "casualties": 2400, "commanders": [ { "middle_name": "T.", "last_name": "Sherman", "suffix": "", "first_name": "William", "rank": "Major General", "fullname": "", "navy": false } ], "description": "Military Division of Mississippi" } }, "url": "http://www.nps.gov/abpp/battles/ga011.htm", "forces_text": "Military Division of Mississippi [US]; Army of Tennessee [CS]", "casualties_text": "5,400 total (US 2,400; CS 3,000)", "results_text": "Union victory", "dates": "May 26-June 1, 1864 (May 28, 1864)", "result": "Union", "other_names": [ "New Hope Church", "Pumpkinvine Creek" ], "significance": "C", "preservation": "IV.1", "state": "GA", "campaign": "Atlanta Campaign [May-September 1864]", "end_date": "1864-06-01", "description": "Johnston's army fell back from the vicinity of Cassville-Kinston, first to Allatoona Pass and then to the Dallas area and entrenched. Sherman's army tested the Rebel line while entrenching themselves. The Battle of Dallas occurred on May 28 when Lt. Gen. 's corps probed the Union defensive line, held by Maj. . Logan's Army of the Tennessee corps, to exploit any weakness or possible withdrawal. Fighting ensued at two different points, but the Rebels were repulsed, suffering high casualties. Sherman continued looking for a way around Johnston's line, and, on June 1, his cavalry occupied Allatoona Pass, which had a railroad and would allow his men and supplies to reach him by train. Sherman abandoned his lines at Dallas on June 5 and moved toward the railhead at Allatoona Pass forcing Johnston to follow soon afterwards.", "location": [ { "state": "GA", "place": "Paulding County" } ], "battle_name": "Dallas", "start_date": "1864-05-26", "casualties": 5400, "principal_commanders": ". [US]; [CS]" }0 { "name": "", "description": "A decadent vanilla custard crowned with caramlised sugar and aromatised with fresh, sweet blueberries.\r\n\r\nBuilding on my br\u00fbl\u00e9e infatuation, I decided to try my tried and tested brulee base with one of my favorite fruits = Blueberry.\r\nThis is a natural combo of sweet and tart which translates phenomenally well in a vape.\r\n\r\nThe Blueberry combo was not to difficult to nail but I wanted a little twist and veered away from the Holy Trinity of Blueberries and instead opted for INW Raspberry to complete my trio. The Raspberry cuts though the often monotonous notes of the Blueberry and offers a refreshing angle to this mix. \r\n\r\nLet the berries mingle for at least a week before enjoying this baby!\r\n", "id": 70953, "image_url": "https:\/\/storage.googleapis.com\/dev-images.alltheflavors.com\/uploads\/recipe\/imageUrl\/70953\/bb.jpg", "updated_at": "2020-12-12T14:05:50.000-05:00", "deleted_at": null, "recipe_type": 1, "recipe_flavors": [ { "name": " (yc)", "flavor_id": "894", "millipercent": 3000, "vendor": "INW" }, { "name": "Custard", "flavor_id": "4210", "millipercent": 2500, "vendor": "INW" }, { "name": "Blueberry", "flavor_id": "1201", "millipercent": 3000, "vendor": "FW" }, { "name": "Bilberry", "flavor_id": "533", "millipercent": 500, "vendor": "FA" }, { "name": "Raspberry", "flavor_id": "667", "millipercent": 1000, "vendor": "INW" } ], "author": "RudeRudi", "views": "2454", "created_at": "2017-10-28", "slug": "blueberry_brulee", "archive_image_url": "https:\/\/allthearchives.com\/images\/70953.jpg", "total_flavoring": "10.0%", "steep_days": "10", "best_vg": "70%", "temperature": "0", "total_reviews": 2, "recipe_score": 4, "reviews": [ { "user": "DeadHer0ine", "score": 5, "text": "This recipe is fantastic. Reminiscent of Liquid State's Cowboy Cobbler, which makes me want to play with the recipe to get a little closer. A nice sharp berry flavor that still cuts through the cream and sugar of the heavy brulee base." }, { "user": "SlashaLO", "score": 4, "text": "Let me start off with: This is tasty af. Like, really freaking good Your addition of Raspberry had me worried at first, but after it settled down, it plays with the blueberries so well, I love it. I will probably take this idea to switch up my berries too honestly. I love the INW custard\/brulee combo, one of my favorite combos, which is why I mixed this up. I get no off notes from Brulee being this high, where I know some will. The reason I gave it 4 stars, was more of confusion though. The title is Brulee, but in the description you called it a custard, but then you said it was using your brulee base? When making this, I personally expected a Brulee, which unfortunately I don't get a brulee, as much as I do a rich creaminess. I feel for me, the berries cover some of the notes that the brulee is supposed to give off. If this is supposed to be more of a custard, then I would say 5 stars. No matter what, this is freaking lovely and I love it. " } ] }Ryebread4/Rustionary {"word":"whereness","definition":"The quality or state of having a place; ubiety; situation; position. [R.] A point hath no dimensions, but only a whereness, and is next to nothing. Grew."}{ "author": { "name": "", "email": "" }, "name": "express-pagination", "description": "An express helper to generate pagination links", "version": "0.0.5", "repository": { "type": "git", "url": "git://github.com/starfish-prime/express-pagination" }, "main": "index.js", "engines": { "node": ">=0.2.0" }, "dependencies": {}, "devDependencies": { "mocha": "1.0.0", "should": "0.5.1" }, "readme": "# express-pagination\nThis is a super simple module to provide a pagination helper function in\nyour views. Code is derived from the pagination logic found in\nwordpress.\n\n## Using It\nAdd the module 'express-pagination' to your package.json and refresh\nyour dependencies via npm. \n\nSet it up in your app via:\n\n```javascript\napp.helpers(require('pagination'));\n\n```\n\nor \n\n```javascript\napp.helpers({\n paginate:require('pagination').paginate\n});\n\n```\n\nAnd use it from your view (given the following locals are defined).\n\n!= paginate(count, resultsPerPage, currentPage)\n\n## License \n\nThis software is licensed under the GNU Public License. See COPYING for\nfurther details.\n", "readmeFilename": "readme.md", "bugs": { "url": "https://github.com/starfish-prime/express-pagination/issues" }, "homepage": "https://github.com/starfish-prime/express-pagination", "_id": "express-pagination@0.0.5", "_shasum": "13dba9e8163ebfacc809a3ea179f530820da3f12", "_from": "express-pagination@", "_resolved": "https://registry.npmjs.org/express-pagination/-/express-pagination-0.0.5.tgz" } Oscar-Oliveira/OR-Datasets {"Name":"Nice25i3b5","Objects":[{"Length":484,"Height":369,"Stock":4,"Cost":178596},{"Length":1000,"Height":631,"Stock":3,"Cost":631000},{"Length":516,"Height":369,"Stock":3,"Cost":190404}],"Items":[{"Length":158,"Height":369,"Demand":1,"DemandMax":null,"Value":58302},{"Length":111,"Height":296,"Demand":1,"DemandMax":null,"Value":32856},{"Length":188,"Height":165,"Demand":1,"DemandMax":null,"Value":31020},{"Length":125,"Height":156,"Demand":1,"DemandMax":null,"Value":19500},{"Length":155,"Height":204,"Demand":1,"DemandMax":null,"Value":31620},{"Length":129,"Height":296,"Demand":1,"DemandMax":null,"Value":38184},{"Length":326,"Height":139,"Demand":1,"DemandMax":null,"Value":45314},{"Length":155,"Height":335,"Demand":1,"DemandMax":null,"Value":51925},{"Length":185,"Height":75,"Demand":1,"DemandMax":null,"Value":13875},{"Length":189,"Height":179,"Demand":1,"DemandMax":null,"Value":33831},{"Length":328,"Height":165,"Demand":1,"DemandMax":null,"Value":54120},{"Length":131,"Height":230,"Demand":1,"DemandMax":null,"Value":30130},{"Length":183,"Height":296,"Demand":1,"DemandMax":null,"Value":54168},{"Length":265,"Height":147,"Demand":1,"DemandMax":null,"Value":38955},{"Length":361,"Height":94,"Demand":1,"DemandMax":null,"Value":33934},{"Length":312,"Height":138,"Demand":1,"DemandMax":null,"Value":43056},{"Length":312,"Height":158,"Demand":1,"DemandMax":null,"Value":49296},{"Length":70,"Height":230,"Demand":1,"DemandMax":null,"Value":16100},{"Length":361,"Height":110,"Demand":1,"DemandMax":null,"Value":39710},{"Length":265,"Height":149,"Demand":1,"DemandMax":null,"Value":39485},{"Length":145,"Height":335,"Demand":1,"DemandMax":null,"Value":48575},{"Length":326,"Height":335,"Demand":1,"DemandMax":null,"Value":109210},{"Length":125,"Height":230,"Demand":1,"DemandMax":null,"Value":28750},{"Length":249,"Height":156,"Demand":1,"DemandMax":null,"Value":38844},{"Length":185,"Height":104,"Demand":1,"DemandMax":null,"Value":19240}]}{ "recommendations": [ "eg2.tslint", "steoates.autoimport", "amatiasq.sort-imports", "freebroccolo.reasonml" ] }{ "id": "ethereum-gold-project", "symbol": "etgp", "name": "Ethereum Gold Project", "platforms": { "ethereum": "0xa96f31f1c187c28980176c3a27ba7069f48abde4" }, "hashing_algorithm": null, "categories": [], "description": { "en": "ETGP is the update of ETG project with new roadmap , our vision is to facilitate & decentralize payment of companies & communities" }, "country_origin": "GB", "genesis_date": null, "contract_address": "0xa96f31f1c187c28980176c3a27ba7069f48abde4", "url": "https://www.etgproject.org", "explorers": [ "https://etherscan.io/token/0xa96f31f1c187c28980176c3a27ba7069f48abde4", "https://ethplorer.io/address/0xa96f31f1c187c28980176c3a27ba7069f48abde4" ], "twitter": "ethereumgoldetg", "facebook": "Ethereum-Gold-ETG-127521641285006", "github_org": "EthereumGoldETG" }{ "vorgangId": "65825", "VORGANG": { "WAHLPERIODE": "18", "VORGANGSTYP": "Mündliche Frage", "TITEL": "Zuständigkeit der Bundesländerbehörden hinsichtlich der Einsatzgenehmigung der Insektizide Dipel ES und Karate Forst und Aufweichung der Einsatzbeschränkungen für Insektizidausbringung mit Luftfahrzeugen", "AKTUELLER_STAND": "Beantwortet", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "PLENUM": { "PLPR_KLARTEXT": "Mündliche Frage/Mündliche Antwort", "PLPR_HERAUSGEBER": "BT", "PLPR_NUMMER": "18/93", "PLPR_SEITEN": "8831A - 8832A", "PLPR_LINK": "http://dipbt.bundestag.de:80/dip21/btp/18/18093.pdf#P.8831" }, "EU_DOK_NR": "", "SCHLAGWORT": [ "Bundesamt für Verbraucherschutz und Lebensmittelsicherheit", "Naturschutzgebiet", { "_fundstelle": "true", "__cdata": "Pflanzenschutzmittel" } ], "ABSTRAKT": "Originaltext der Frage(n): \r\n \r\nWelche Position vertritt die Bundesregierung bezüglich der Entscheidung des Bundesamtes für Verbraucherschutz und Lebensmittelsicherheit (BVL), die Einsatzgenehmigung der Insektizide Dipel ES und Karate Forst in Naturschutzgebieten in das Ermessen der Bundesländerbehörden zu stellen, ohne die bisher zwingende Voraussetzung einer Notfallzulassung durch das BVL vorzusehen (vgl. Gemeinsames Informationspapier von Bundesamt für Naturschutz [BfN] und Umweltbundesamt [UBA] \"Pflanzenschutz mit Luftfahrzeugen – Naturschutzfachliche Hinweise für die Genehmigungsprüfung\", März 2015), und wie bewertet die Bundesregierung die Tatsache, dass die bis zum 19. Februar 2015 gültigen Einsatzbeschränkungen für Insektizidausbringung mit Luftfahrzeugen (grundsätzlich keine Anwendung in Naturschutzgebieten, Anwendung auf maximal der Hälfte der zusammenhängenden Waldfläche) durch neu geschaffene Ausnahmetatbestände im Rahmen der Neuregelung aufgeweicht wurden (vgl. Anwendungsbestimmungen in den Anlagen zum Gemeinsamen Informationspapier sowie Präsentation von Dr. vom Mai 2014 unter http://bit.ly/1Cc7C8x)?" }, "VORGANGSABLAUF": { "VORGANGSPOSITION": [ { "ZUORDNUNG": "BT", "URHEBER": "Mündliche Frage ", "FUNDSTELLE": "13.03.2015 - BT-Drucksache 18/4295, Nr. 7", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/18/042/1804295.pdf" }, { "ZUORDNUNG": "BT", "URHEBER": "Mündliche Frage/Mündliche Antwort", "FUNDSTELLE": "18.03.2015 - BT-Plenarprotokoll 18/93, S. 8831A - 8832A", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btp/18/18093.pdf#P.8831", "PERSOENLICHER_URHEBER": [ { "VORNAME": "Harald", "NACHNAME": "Ebner", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Frage", "SEITE": "8831A" }, { "VORNAME": "Peter", "NACHNAME": "Bleser", "FUNKTION": "Parl. Staatssekr.", "RESSORT": "Bundesministerium für Ernährung und Landwirtschaft", "AKTIVITAETSART": "Antwort", "SEITE": "8831A" }, { "VORNAME": "Steffi", "NACHNAME": "Lemke", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Zusatzfrage", "SEITE": "8831D" } ] } ] } } {"meta":{"build_time":"2021-04-11T07:04:14.468Z","license":"CC-BY-4.0","version":"2.0-beta"},"data":{"date":"2020-09-10","state":"MD","meta":{"data_quality_grade":"A+","updated":"2020-09-10T14:00:00Z","tests":{"total_source":"totalTestsViral"}},"cases":{"total":114078,"confirmed":114078,"probable":null},"tests":{"pcr":{"total":2128024,"pending":null,"encounters":{"total":null},"specimens":{"total":2128024,"positive":136095,"negative":null},"people":{"total":1423727,"positive":114078,"negative":1309649}},"antibody":{"encounters":{"total":null,"positive":null,"negative":null},"people":{"total":125277,"positive":10638,"negative":114639}},"antigen":{"encounters":{"total":null,"positive":null,"negative":null},"people":{"total":null,"positive":null,"negative":null}}},"outcomes":{"recovered":7166,"hospitalized":{"total":14706,"currently":358,"in_icu":{"total":null,"currently":92},"on_ventilator":{"total":null,"currently":null}},"death":{"total":3824,"confirmed":3679,"probable":145}}}} { "name": "roomkit-collector", "version": "1.0.0", "description": "Collects PeopleCount metrics from Webex Room devices and computes weighted averages", "main": "server.js", "dependencies": { "debug": "^4.1.1", "express": "^4.17.1", "jsxapi": "^5.0.1", "mongodb": "^3.5.7" }, "devDependencies": {}, "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "author": " <>", "license": "MIT" } { "name": "hypercore-protocol.org", "description": "The hypercore-protocol.org website", "scripts": { "build": "eleventy", "serve": "eleventy --serve" }, "repository": { "type": "git", "url": "git+https://github.com/hypercore-protocol/new-website.git" }, "author": "", "license": "MIT", "bugs": { "url": "https://github.com/hypercore-protocol/new-website/issues" }, "homepage": "https://github.com/hypercore-protocol/new-website#readme", "devDependencies": { "@11ty/eleventy": "^0.11.1", "@11ty/eleventy-plugin-syntaxhighlight": "^3.0.4", "eleventy-plugin-sass": "^1.1.1" } } 1-10 { "name": "notr", "version": "1.1.2", "description": "Extremely simple notifications", "main": "notr.js", "scripts": { "prepublish": "coffee -m -c ./notr.coffee" }, "repository": { "type": "git", "url": "https://github.com/ttab/notr" }, "keywords": [ "notifications" ], "author": " <>", "license": "MIT", "bugs": { "url": "https://github.com/ttab/notr/issues" }, "homepage": "https://github.com/ttab/notr", "devDependencies": { "coffee-script": "^1.9.3" } } {"expireTime":9007200875569466000,"key":"/static/3eb0fab06bb36eec8ffa906c953f83d5/58aeb/alexander-andrews-zw07kvdahpw-unsplash.avif","val":""}{ "name": "keepa-api", "version": "1.0.0", "description": "The Keepa API offers numerous endpoints. Every request requires your API access key as a parameter. You can find and change your key in the keepa portal. All requests must be issued as a HTTPS GET and accept gzip encoding. If possible, use a Keep-Alive connection. Multiple requests can be made in parallel to increase throughput.", "main": "index.js", "scripts": { "prestart": "npm install", "start": "node index.js" }, "keywords": [ "openapi-tools" ], "license": "Unlicense", "private": true, "dependencies": { "connect": "^3.2.0", "js-yaml": "^3.3.0", "swagger-tools": "0.10.1" } } 0 { "link":"http://community.topcoder.com/stat?c=problem_statement&pm=3510&rd=6527", "statement":"Consider a function randomInt(integer N) that takes an integer N and returns an integer uniformly at random in the range 0 to N-1. If that function is nested, as in randomInt(randomInt(N)), the probability distribution changes, and some numbers are more likely than others. Given an int nestings defining the number of times the function is nested (1 indicates randomInt(N), 2 indicates randomInt(randomInt(N)), and so on), an int N and an int target, return the probability that the result of nestings nested calls to randomInt with the parameter N will result in target. ", "notes":[ { "order":1, "val":"Calling randomInt(0) causes an error to occur, and hence can never cause an outcome of ." }, { "order":2, "val":"Your return must have a relative or absolute error less than 1E-9." } ], "definition":[ { "definitionKey":"Class:", "definitionVal":"NestedRandomness", "order":1 }, { "definitionKey":"Method:", "definitionVal":"probability", "order":2 }, { "definitionKey":"Parameters:", "definitionVal":"int, int, int", "order":3 }, { "definitionKey":"Returns:", "definitionVal":"double", "order":4 }, { "definitionKey":"Method signature:", "definitionVal":"double probability(int N, int nestings, int target)", "order":5 } ], "examples":[ { "expect":"2", "id":0, "input":"5", "note":"There are 3 ways of ending up with a 1 after calling randomInt(randomInt(5)). The inner call can result in a 4, 3, or a 2. Each of these possibilities occurs with a probability of 1/5. The probabilities of achieving a 1 with each of those results are 1/4, 1/3, and 1/2, respectively. This gives us a final probability of (1/5)*(1/4+1/3+1/2) = 13/60.", "order":1 }, { "expect":"4", "id":0, "input":"10", "order":2 }, { "expect":"10", "id":0, "input":"1000", "order":3 } ], "constraints":[ { "order":1, "val":"N will be between 1 and 1000, inclusive." }, { "order":2, "val":"nestings will be between 1 and 10, inclusive." }, { "order":3, "val":"target will be between 0 and N-nestings, inclusive." } ] }{ "variants": { "facing=north,ladder=wood": { "model": "harvestfestival:ladder" }, "facing=south,ladder=wood": { "model": "harvestfestival:ladder", "y": 180 }, "facing=east,ladder=wood": { "model": "harvestfestival:ladder", "y": 90 }, "facing=west,ladder=wood": { "model": "harvestfestival:ladder", "y": 270 }, "facing=north,ladder=decorative": { "model": "harvestfestival:ladder" }, "facing=south,ladder=decorative": { "model": "harvestfestival:ladder", "y": 180 }, "facing=east,ladder=decorative": { "model": "harvestfestival:ladder", "y": 90 }, "facing=west,ladder=decorative": { "model": "harvestfestival:ladder", "y": 270 } } }Ondoz/sistemprediksi [ { "Id": "198524", "ThreadId": "58641", "Html": "

I tested my script getValue() function localhost, it manage to work and display without any issue.

\r\n

However, I deployed to the server, which is HP-UX PHP 5.22 without iconv and mbstring.

\r\n

it display ? in browser

\r\n

http://yfrog.com/0twphpj

\r\n

I really stuck, the ? when I copied out, it is hidden / invisible characters.

\r\n

the middle actually are date in Excel, which is correct in display

\r\n

Appreciate any advice on this. Thank you.

\r\n

 

", "PostedDate": "2009-06-06T05:14:47.28-07:00", "UserRole": null, "MarkedAsAnswerDate": null }, { "Id": "198525", "ThreadId": "58641", "Html": "

You will need at least iconv or mbstring extension enabled when you read / write xls files. Sometimes it will work even without if there are only ASCII characters, but that is another story.

The problem is that xls in general uses UTF-16 LE. If you don't have iconv or mbstring enabled PHPExcel cannot convert to UTF-8.

\r\n

 

", "PostedDate": "2009-06-06T05:26:47.697-07:00", "UserRole": null, "MarkedAsAnswerDate": null }, { "Id": "198534", "ThreadId": "58641", "Html": "

thank you for fast reply. by the way, that HP-UX was Big Endian, RISC, does it matter?

", "PostedDate": "2009-06-06T06:00:17.827-07:00", "UserRole": null, "MarkedAsAnswerDate": null }, { "Id": "198549", "ThreadId": "58641", "Html": "

>> by the way, that HP-UX was Big Endian, RISC, does it matter?

It doesn't matter. The iconv / mbstring takes care of this properly.

", "PostedDate": "2009-06-06T07:15:17.99-07:00", "UserRole": null, "MarkedAsAnswerDate": null } ]imbhargav5/new-website {"angular-hal.js":"sha256-kdVdjx3RD+jt/hKiE+OA2c/lYg2mu9/ZsPZqIvJezHQ=","angular-hal.map.js":"sha256-ueRpozG3/LxUIUQpYxYtUfuksJOG8erbx8bjSl+bsXY=","angular-hal.map.min.js":"sha256-n7Z0sFD9no9hcrDTHObeKwO8SgVaA3M664gAbe9buI8=","angular-hal.min.js":"sha256-POI1DD/qABGlCOW7C2RmAFPV/85PMGk8EKox/c0NMes="}{ "_args": [ [ "@teuteuf/react-pwa-install@1.1.2", "/home/mb4718/Desktop/flaggle" ] ], "_from": "@teuteuf/react-pwa-install@1.1.2", "_id": "@teuteuf/react-pwa-install@1.1.2", "_inBundle": false, "_integrity": "sha512-c8vfvUmGSj3HCXyQ7hLlPKiiNeLxTgpRDkpHFSevHKJ8KpPzQEwJt24YArgYgEYBa9Alzx6pDGzc8OjSgrhDlA==", "_location": "/@teuteuf/react-pwa-install", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, "raw": "@teuteuf/react-pwa-install@1.1.2", "name": "@teuteuf/react-pwa-install", "escapedName": "@teuteuf%2freact-pwa-install", "scope": "@teuteuf", "rawSpec": "1.1.2", "saveSpec": null, "fetchSpec": "1.1.2" }, "_requiredBy": [ "/" ], "_resolved": "https://registry.npmjs.org/@teuteuf/react-pwa-install/-/react-pwa-install-1.1.2.tgz", "_spec": "1.1.2", "_where": "/home/mb4718/Desktop/flaggle", "author": "", "bugs": { "url": "https://github.com/teuteuf/react-pwa-install/issues" }, "dependencies": { "mobile-device-detect": "^0.4.3" }, "description": "Install support for your PWA on several platforms", "devDependencies": { "@material-ui/core": "^4.9.12", "nwb": "0.24.x", "react": "^16.13.1", "react-dom": "^16.13.1" }, "files": [ "css", "es", "lib", "umd", "index.d.ts" ], "homepage": "https://github.com/teuteuf/react-pwa-install#readme", "keywords": [ "react-component", "react", "PWA", "install", "A2HS", "add to home screen" ], "license": "MIT", "main": "lib/index.js", "module": "es/index.js", "name": "@teuteuf/react-pwa-install", "peerDependencies": { "react": "17.x" }, "repository": { "type": "git", "url": "git+https://github.com/teuteuf/react-pwa-install.git" }, "scripts": { "build": "nwb build-react-component", "clean": "nwb clean-module && nwb clean-demo", "prepublishOnly": "npm run build", "start": "nwb serve-react-demo", "test": "nwb test-react", "test:coverage": "nwb test-react --coverage", "test:watch": "nwb test-react --server" }, "version": "1.1.2" } DOREMUS-ANR/recommender http://data.doremus.org/expression/2f31bc90-eb13-3af7-bc95-469fbcb4afba http://data.doremus.org/expression/8720d9f3-5975-325a-a0d9-cabc7599cc13 http://data.doremus.org/expression/0905545b-a976-3ceb-8019-a16ded830fe0 http://data.doremus.org/expression/3ade281b-df13-3048-8ed4-58e34315ff56 http://data.doremus.org/expression/a630bb68-b776-3d05-b31b-fa2d608ec400 http://data.doremus.org/expression/92077ae6-21b4-3a7a-92e5-b4e8201ee016 http://data.doremus.org/expression/34f922b6-44e7-38ee-9bf4-8f22d4af724f http://data.doremus.org/expression/98c69d01-743f-385e-a872-37a5da27ce35 http://data.doremus.org/expression/26bba20a-29a7-30c0-bbd9-9b099af32318 http://data.doremus.org/expression/cbc41400-dac5-3751-a542-0d1431807313 http://data.doremus.org/expression/3d455ba2-65db-31f6-a175-5fef99106e35 http://data.doremus.org/expression/3237e116-4e71-3d01-86dc-f9702ac18fca http://data.doremus.org/expression/dbc98c81-86b4-383f-b6f5-1c33005665d5 http://data.doremus.org/expression/07c1edb1-5b1b-3c6e-b8ac-d23c5e898a4c http://data.doremus.org/expression/b17d9e85-3e78-328b-b5b6-e99bac6d0182 http://data.doremus.org/expression/ff4d8741-2e83-3db2-ab6c-1c9482f51bf5 http://data.doremus.org/expression/afbcc626-db64-3a82-9b76-d556412022c9 http://data.doremus.org/expression/9e2a0fc6-ca12-3b22-b77f-a4fa59fe3ffd http://data.doremus.org/expression/a4d7be9a-0a02-3850-9822-ea27d76b1a6c http://data.doremus.org/expression/f610c8f7-65e0-333a-990c-54e85528545e http://data.doremus.org/expression/2bce42ad-4dd4-34c8-bb8c-c27213c7df50 http://data.doremus.org/expression/a96d909c-b0d7-3c5f-b5c8-f84a5560c6ec http://data.doremus.org/expression/bb52d2aa-4d0d-370e-a7a4-2f75f0204743conjugacao/atirar.json1-10 {"presente":{"eu":"atiro","tu":"atiras","ele":"atira","nos":"atiramos","vos":"atirais","eles":"atiram"},"preterito_imperfeito":{"eu":"atirava","tu":"atiravas","ele":"atirava","nos":"atirávamos","vos":"atiráveis","eles":"atiravam"},"preterito_perfeito":{"eu":"atirei","tu":"atiraste","ele":"atirou","nos":"atiramos","vos":"atirastes","eles":"atiraram"},"preterito_mais_que_perfeito":{"eu":"atirara","tu":"atiraras","ele":"atirara","nos":"atiráramos","vos":"atiráreis","eles":"atiraram"},"futuro_do_presente":{"eu":"atirarei","tu":"atirarás","ele":"atirará","nos":"atiraremos","vos":"atirareis","eles":"atirarão"},"futuro_do_preterito":{"eu":"atiraria","tu":"atirarias","ele":"atiraria","nos":"atiraríamos","vos":"atiraríeis","eles":"atirariam"}}{"database":"json-path1","resource":"shredded","old-revision":2,"new-revision":5,"diffs":[{"insert":{"nodeKey":8,"insertPositionNodeKey":1,"insertPosition":"asFirstChild","deweyID":"1.17.5","depth":2,"type":"jsonFragment","data":"{\"data\":\"data\"}"}},{"insert":{"nodeKey":5,"insertPositionNodeKey":8,"insertPosition":"asRightSibling","deweyID":"1.17.9","depth":2,"type":"jsonFragment","data":"{\"data\":\"data\"}"}},{"delete":{"nodeKey":2,"deweyID":"1.17.17","depth":2}}]}{ "name": "", "description": "A small backpack.", "url": "https://www.awaytravel.com/au/en/travel-bags/daypack" } FLOOD/ca8eb0f17640a867d1fe97f22902df4f/3a0ca0c5-af40-4242-ac94-693baa197e3b.json {"id":"3a0ca0c5-af40-4242-ac94-693baa197e3b","playerTags":["efafe75e-2f00-4418-914c-9b6675d39264"],"teamTags":["ca3f1c8c-c025-4d8e-8eef-5be6accbeb16","747b8e4a-7e50-4638-a973-ea7950a3e739"],"gameTags":["bdc58e76-1b80-4528-b3eb-5ce3cfa2ee51"],"metadata":{"play":231,"subPlay":-1},"created":"2021-03-19T08:19:35.255Z","season":13,"tournament":-1,"type":62,"day":87,"phase":6,"category":2,"description":"A surge of Immateria rushes up from Under!\nBaserunners are swept from play!\nAldon Cashmoney's Ego keeps them on base!","nuts":0}HappyShare.Api/appsettings.json { "Connections" : { "MySql" : "Server=localhost;Database=happy_share;user=root;pwd=@@-" }, "Logging": { "LogLevel": { "Default": "Warning" } }, "AllowedHosts": "*" } felixzapata/rebel-tag-input1-10 { "name": "rebel-tag-input", "version": "0.0.1", "description": "A tag input written as a ES2015 web component", "author": " @revillweb", "main": "compiled/rebel-repeater.js", "dependencies": { "webcomponentsjs": "*" } }external/Wang2017/tweets/573619430284005378.json {"content": "\u00a3100bn for Trident over 35 years. \u00a32bn a year is NOTHING in government spending terms. The NHS gets through that in a WEEK! #bbcqt", "entities": [{"offset": 11, "type": "topic keyword", "id": 4, "entity": "trident"}, {"offset": 91, "type": "topic keyword", "id": 5, "entity": "nhs"}, {"entity": "government spending", "type": "topic keyword", "id": 6, "offset": 60}], "topics": [{"topic": "nhs", "id": 1}, {"topic": "defence", "id": 2}, {"topic": "public spending", "id": 3}], "tweet_id": "573619430284005378"}{ "name": "@graphql-cli/loaders", "description": "Internal usage", "version": "4.1.0", "license": "MIT", "main": "dist/index.js", "publishConfig": { "access": "public" }, "scripts": { "build": "tsc" }, "peerDependencies": { "graphql": "15.5.0" }, "dependencies": { "@graphql-tools/apollo-engine-loader": "6.2.5", "@graphql-tools/code-file-loader": "6.3.1", "@graphql-tools/git-loader": "6.2.6", "@graphql-tools/github-loader": "6.2.5", "@graphql-tools/prisma-loader": "6.2.7", "@graphql-tools/url-loader": "6.8.1", "@graphql-tools/utils": "7.5.1" } }profiles/mariy_vitaliy_mihaylovich.json {"Department":"Генеральна Прокуратура України","Name":"","Position":"Слідчий в особливо важливих справах Генеральної прокуратури України","Region":"Загальнодержавний","analytics":[{"c":1,"fh":75.6,"fha":1,"fl":940,"fla":1,"i":111415,"y":2015},{"fi":11455,"i":162563,"y":2016}],"declarationsLinks":[{"id":"nacp_d5889301-8948-4be5-b641-1ddfe2386377","provider":"declarations.com.ua.opendata","year":2015},{"id":"nacp_0ec3d7f7-e71a-470c-b19f-f9e199065c95","provider":"declarations.com.ua.opendata","year":2016}],"key":"mariy_vitaliy_mihaylovich","type":"prosecutor","Декларації 2013":"","Декларації 2014":"","Декларації 2015":"https://public.nazk.gov.ua/declaration/d5889301-8948-4be5-b641-1ddfe2386377","Декларації 2016":"https://public.nazk.gov.ua/declaration/0ec3d7f7-e71a-470c-b19f-f9e199065c95","Декларації доброчесності":"http://www.gp.gov.ua/integrity_profile/files/34ef443a95976e34cde17678d001e6cd.pdf","Фото":"","Як живе":""}{ "id": 2831, "api_model": "exhibitions", "api_link": "https://api.artic.edu/api/v1/exhibitions/2831", "title": "Taoism and the Arts of China", "is_featured": false, "description": "Overview\nThe exhibition Taoism and the Arts of China is on view at The Art Institute of Chicago from November 4, 2000, to January 7, 2001, and at the Asian Art Museum of San Francisco from February 21 to May 13, 2001. This is the first major exhibition of Taoist art in the United States, showcasing 151 works of art illustrating many facets of the Taoist religion. The exhibition includes paintings, calligraphy, sculpture, porcelain, lacquer, and ritual robes and implements from museums and private collections in the United States, Europe, Japan, China, Taiwan, and Hong Kong. These items date from the Warring States period to the Qing dynasty and demonstrate the development of Taoism and Taoist art from its earliest precedents to its \"renaissance\" in the late imperial age.\n\nAdmission to the exhibition is free with museum admission. The Art Institute will publish a catalogue of the exhibition, with essays by prominent scholars of Taoism and Chinese art history, and sponsor a symposium in the museum's Rubloff Auditorium on December 2 and 3, 2000. There will also be public lectures and other educational events relating to Taoism during the run of the exhibition. Families visiting the exhibition with children should pick up the free Family Self-Guide at the exhibition entrance or download it as a .pdf file. You will need Adobe Acrobat Reader to read and print the document. This site provides an overview of the exhibition using 28 works that address the themes of Taoism and the Arts of China.", "short_description": "The exhibition\u00a0Taoism and the Arts of China\u00a0is on view at The Art Institute of Chicago from November 4, 2000, to January 7, 2001, and at the Asian Art Museum of San Francisco from February 21 to May 13, 2001. This is the first major exhibition of Taoist art in the United States, showcasing 151 works of art illustrating many facets of the Taoist religion. The exhibition includes paintings, calligraphy, sculpture, porcelain, lacquer, and ritual robes and implements from museums and private collections in the United States, Europe, Japan, China, Taiwan, and Hong Kong.", "web_url": "https://nocache.www.artic.edu/exhibitions/2831/taoism-and-the-arts-of-china", "image_url": "https://artic-web.imgix.net/f27467df-773b-4476-990f-a3fb4104eef4/110823.jpg?auto=compress%2Cformat&fit=min&fm=jpg&q=80&rect=0%2C226%2C1015%2C571", "type": "AIC & Other Venues", "status": "Closed", "aic_start_at": "2000-11-04T00:00:00-06:00", "aic_end_at": "2001-01-07T00:00:00-06:00", "date_display": null, "department_display": "Asian Art", "gallery_id": 2147475902, "gallery_title": null, "artwork_ids": [ 60877 ], "artwork_titles": [ "Vestment (For a First-degree Taoist Priest)" ], "artist_ids": [], "site_ids": [], "image_id": null, "alt_image_ids": [], "document_ids": [], "suggest_autocomplete_all": { "input": [ "Taoism and the Arts of China" ], "contexts": { "groupings": [ "title" ] } }, "last_updated_source": "1976-09-02T11:20:00-05:00", "last_updated": "2021-01-13T23:27:53-06:00", "timestamp": "2021-01-14T17:13:45-06:00" }{ "technologyInfrastructureInstanceReport" : { "technologyInfrastructureInstanceReport" : {}, "technologyInfrastructureInstanceReportType" : "technologyInfrastructureInstanceReportType", "technologyInfrastructureInstanceReportRecord" : {}, "technologyInfrastructureInstanceReportParameters" : "technologyInfrastructureInstanceReportParameters" }, "technologyInfrastructureRetrieveActionTaskReference" : "TIRATR735317", "technologyInfrastructureSpecificationSchedule" : "technologyInfrastructureSpecificationSchedule", "technologyInfrastructureServiceDescription" : "technologyInfrastructureServiceDescription", "technologyInfrastructureServiceType" : "technologyInfrastructureServiceType", "technologyInfrastructureRetrieveActionTaskRecord" : {}, "technologyInfrastructureRetrieveActionResponse" : "technologyInfrastructureRetrieveActionResponse", "technologyInfrastructurePreconditions" : "technologyInfrastructurePreconditions", "technologyInfrastructureServiceWorkProduct" : "technologyInfrastructureServiceWorkProduct", "technologyInfrastructureVersionNumber" : "technologyInfrastructureVersionNumber", "technologyInfrastructureServiceInputsandOuputs" : "technologyInfrastructureServiceInputsandOuputs", "technologyInfrastructureInstanceAnalysis" : { "technologyInfrastructureInstanceAnalysisReportType" : "technologyInfrastructureInstanceAnalysisReportType", "technologyInfrastructureInstanceAnalysisRecord" : {}, "technologyInfrastructureInstanceAnalysisParameters" : "technologyInfrastructureInstanceAnalysisParameters", "technologyInfrastructureInstanceAnalysisReport" : {} } }packages/manager/modules/pci/src/projects/new/error/translations/Messages_pl_PL.json { "pci_error_button_action_pay_debt": "Ureguluj należności", "pci_error_button_action_contact_support": "Skontaktuj się z pomocą techniczną", "pci_error_account_not_eligible": "Utworzenie nowego projektu Public Cloud nie jest możliwe przy użyciu Twojego konta. Prosimy o kontakt z pomocą techniczną.", "pci_error_max_projects_limit_reached": "Niestety, nie możesz utworzyć dodatkowego projektu Public Cloud, ponieważ osiągnąłeś maksymalny limit projektów. Aby zwiększyć limity, skontaktuj się z pomocą techniczną.", "pci_error_paypal_account_not_verified": "Niestety, nie możesz utworzyć projektu Public Cloud, używając Twojego konta PayPal, ponieważ nie zostało ono zweryfikowane. Prosimy o wykonanie kroków niezbędnych do zatwierdzenia konta PayPal i podjęcie nowej próby utworzenia projektu. Możesz również zmienić Twój domyślny sposób płatności.", "pci_error_unpaid_debts": "Niestety, nie możesz utworzyć projektu Public Cloud, ponieważ masz zadłużenie w OVHcloud. Ureguluj należność i spróbuj ponownie.", "pci_error_invalid_payment_mean": "Utworzenie nowego projektu Public Cloud nie jest możliwe przy użyciu Twojego konta. Prosimy o zmianę Twojego domyślnego sposobu płatności." } src/main/resources/json/PS4206.json {"acadYear":"2019/2020","description":"The module introduces the trends, approaches, and limitations of security studies in the Asia-Pacific. It explores major institutional arrangements of regional security and linkages between these regional arrangements and international security structures. It also analyses contemporary changes in the issues and priorities of security and the newly emerging security concerns in the Asia-Pacific. The implications of domestic political changes for regional security are also considered. The module can be read by honours and postgraduate students in Political Science.","title":"Regional Security in the Asia Pacific","department":"Political Science","faculty":"Arts and Social Science","workload":[0,3,0,2,7.5],"prerequisite":"Completed 80 MCs, including 28 MCs in PS or 28 MCs in GL/GL recognised non-language modules, with a minimum CAP of 3.20 or be on the Honours track.","moduleCredit":"5","moduleCode":"PS4206","semesterData":[]} skywalking-ui/node_modules/.cache/vue-loader/aa55cc8c385445a7056ae5c38a078eb6.json {"remainingRequest":"/Users/mimu/github/apache-skywalking-8.0.1-copy/skywalking-ui/node_modules/vue-loader/lib/loaders/templateLoader.js??vue-loader-options!/Users/mimu/github/apache-skywalking-8.0.1-copy/skywalking-ui/node_modules/cache-loader/dist/cjs.js??ref--0-0!/Users/mimu/github/apache-skywalking-8.0.1-copy/skywalking-ui/node_modules/vue-loader/lib/index.js??vue-loader-options!/Users/mimu/github/apache-skywalking-8.0.1-copy/skywalking-ui/src/views/components/common/trace-chart-table/trace-item.vue?vue&type=template&id=1ff67081&scoped=true&","dependencies":[{"path":"/Users/mimu/github/apache-skywalking-8.0.1-copy/skywalking-ui/src/views/components/common/trace-chart-table/trace-item.vue","mtime":1592485041000},{"path":"/Users/mimu/github/apache-skywalking-8.0.1-copy/skywalking-ui/node_modules/cache-loader/dist/cjs.js","mtime":499162500000},{"path":"/Users/mimu/github/apache-skywalking-8.0.1-copy/skywalking-ui/node_modules/vue-loader/lib/loaders/templateLoader.js","mtime":499162500000},{"path":"/Users/mimu/github/apache-skywalking-8.0.1-copy/skywalking-ui/node_modules/cache-loader/dist/cjs.js","mtime":499162500000},{"path":"/Users/mimu/github/apache-skywalking-8.0.1-copy/skywalking-ui/node_modules/vue-loader/lib/index.js","mtime":499162500000}],"contextDependencies":[],"result":["var render = function () {var _vm=this;var _h=_vm.$createElement;var _c=_vm._self._c||_h;return _c('div',[_c('div',{ref:\"traceItem\",class:['trace-item', 'level' + (_vm.data.level - 1)],on:{\"click\":_vm.showSelectSpan}},[_c('div',{class:['method', 'level' + (_vm.data.level - 1)],style:({ 'text-indent': (_vm.data.level - 1) * 10 + 'px' })},[(_vm.data.children && _vm.data.children.length)?_c('svg',{staticClass:\"icon vm cp trans\",style:(!_vm.displayChildren ? 'transform: rotate(-90deg);' : ''),on:{\"click\":function($event){$event.stopPropagation();return _vm.toggle($event)}}},[_c('use',{attrs:{\"xlink:href\":\"#arrow-down\"}})]):_vm._e(),_c('span',{directives:[{name:\"tooltip\",rawName:\"v-tooltip:bottom\",value:({ content: _vm.data.endpointName, popperCls: ['trace-table-tooltip'] }),expression:\"{ content: data.endpointName, popperCls: ['trace-table-tooltip'] }\",arg:\"bottom\"}]},[_vm._v(\"\\n \"+_vm._s(_vm.data.endpointName)+\"\\n \")])]),_c('div',{staticClass:\"start-time\"},[_vm._v(\"\\n \"+_vm._s(_vm._f(\"dateformat\")(_vm.data.startTime))+\"\\n \")]),_c('div',{staticClass:\"exec-ms\"},[_vm._v(\"\\n \"+_vm._s(_vm.data.endTime - _vm.data.startTime ? _vm.data.endTime - _vm.data.startTime : '0')+\"\\n \")]),_c('div',{staticClass:\"exec-percent\"},[_c('div',{staticClass:\"outer-progress_bar\",style:({ width: _vm.outterPercent })},[_c('div',{staticClass:\"inner-progress_bar\",style:({ width: _vm.innerPercent })})])]),_c('div',{staticClass:\"self\"},[_vm._v(\"\\n \"+_vm._s(_vm.data.dur ? _vm.data.dur + '' : '0')+\"\\n \")]),_c('div',{staticClass:\"api\"},[_c('span',{directives:[{name:\"tooltip\",rawName:\"v-tooltip:bottom\",value:(_vm.data.component || '-'),expression:\"data.component || '-'\",arg:\"bottom\"}]},[_vm._v(_vm._s(_vm.data.component || '-'))])]),_c('div',{staticClass:\"application\"},[_c('span',{directives:[{name:\"tooltip\",rawName:\"v-tooltip:bottom\",value:(_vm.data.serviceCode || '-'),expression:\"data.serviceCode || '-'\",arg:\"bottom\"}]},[_vm._v(_vm._s(_vm.data.serviceCode))])]),_c('div',{directives:[{name:\"show\",rawName:\"v-show\",value:(_vm.type === 'profile'),expression:\"type === 'profile'\"}],staticClass:\"application\"},[_c('span',{on:{\"click\":_vm.viewSpanDetail}},[_vm._v(_vm._s(this.$t('view')))])])]),_c('div',{directives:[{name:\"show\",rawName:\"v-show\",value:(_vm.data.children && _vm.data.children.length > 0 && _vm.displayChildren),expression:\"data.children && data.children.length > 0 && displayChildren\"}],staticClass:\"children-trace\"},_vm._l((_vm.data.children),function(item,index){return _c('item',{key:index,attrs:{\"data\":item,\"type\":_vm.type}})}),1)])}\nvar staticRenderFns = []\n\nexport { render, staticRenderFns }"]}v1/states/ak/20200706.json {"date":20200706,"state":"AK","positive":1138,"negative":122615,"pending":null,"hospitalizedCurrently":19,"hospitalizedCumulative":null,"inIcuCurrently":null,"inIcuCumulative":null,"onVentilatorCurrently":3,"onVentilatorCumulative":null,"recovered":548,"dataQualityGrade":"A","lastUpdateEt":"7/5/2020 00:00","dateModified":"2020-07-05T00:00:00Z","checkTimeEt":"07/04 20:00","death":16,"hospitalized":null,"dateChecked":"2020-07-05T00:00:00Z","totalTestsViral":123753,"positiveTestsViral":null,"negativeTestsViral":null,"positiveCasesViral":1138,"deathConfirmed":16,"deathProbable":null,"totalTestEncountersViral":null,"totalTestsPeopleViral":null,"totalTestsAntibody":null,"positiveTestsAntibody":null,"negativeTestsAntibody":null,"totalTestsPeopleAntibody":null,"positiveTestsPeopleAntibody":null,"negativeTestsPeopleAntibody":null,"totalTestsPeopleAntigen":null,"positiveTestsPeopleAntigen":null,"totalTestsAntigen":null,"positiveTestsAntigen":null,"fips":"02","positiveIncrease":0,"negativeIncrease":0,"total":123753,"totalTestResultsSource":"posNeg","totalTestResults":123753,"totalTestResultsIncrease":0,"posNeg":123753,"deathIncrease":0,"hospitalizedIncrease":0,"hash":"21d05ee48d4cc599e1d2ea0bdb5e75fb8157df13","commercialScore":0,"negativeRegularScore":0,"negativeScore":0,"positiveScore":0,"score":0,"grade":""} [ { "query": "LG", "display_query": "LG 宣布退出智能手机业务" }, { "query": "泰山", "display_query": "景区回应游客挤厕所过夜" }, { "query": "北部白犀牛", "display_query": "地球上仅存 2 只北部白犀牛" }, { "query": "盗墓笔记", "display_query": "盗墓笔记动漫开播" }, { "query": "ig 夺冠", "display_query": "刀塔 2 决赛 iG 夺冠" }, { "query": "快船", "display_query": "快船轻取湖人" }, { "query": "极限挑战", "display_query": "极限挑战第七季首播" }, { "query": "欧元", "display_query": "欧元之父蒙代尔去世" }, { "query": "司藤", "display_query": "《司藤》大爆的原因是什么?" }, { "query": "第十一回", "display_query": "《第十一回》解析" }, { "query": "中山大学", "display_query": "中山大学强制清算管理学院楼" }, { "query": "hm", "display_query": "HM 官网涉问题地图被约谈" }, { "query": "茶颜悦色", "display_query": "深圳茶颜悦色开业排队 3 万号" }, { "query": "苏伊士运河", "display_query": "「长赐号」搁浅系船长错误操作" }, { "query": "征婚", "display_query": "男子月薪 5 万征婚被骂过于自信" }, { "query": "致敬英烈", "display_query": "我国有约 2000 万名烈士" }, { "query": "学术造假", "display_query": "南京艺术学院毕业生学术造假" }, { "query": "烈士寻亲", "display_query": "烈士寻亲公共服务平台开通" }, { "query": "人体经络", "display_query": "研究发现人体经络存在的证据" }, { "query": "朋友去世微信要删除吗", "display_query": "朋友去世了,微信要删除吗?" } ] qingqibing/esri-leaflet-webpack { "title": "Esri Leaflet Example", "subtitle": "Testing a simple boilerplate for esri-leaflet.", "author": "Robert-W", "appVersion": "1.0.0" } { "citations" : [ { "textCitation" : "[See exmid on Metamath](http://us.metamath.org/mpegif/exmid.html)" } ], "names" : [ "exmid" ], "language" : "METAMATH_SET_MM", "lookupTerms" : [ "#T_wph", "#T_wo", "#T_wn", "#T_wph" ], "metaLanguage" : "METAMATH", "remarks" : " Law of excluded middle, also called the principle of _tertium non datur_. Theorem *2.11 of [WhiteheadRussell] p. 101. It says that something is either true or not true; there are no in-between values of truth. This is an essential distinction of our classical logic and is not a theorem of intuitionistic logic. In intuitionistic logic, if this statement is true for some ` ph ` , then ` ph ` is decideable. (Contributed by NM, 29-Dec-1992.) ", "statement" : "exmid $p |- ( ph \\/ -. ph ) $." }ssjumbreon378/YDM2-DB1-10 { "id": 75347539, "name": "", "is_illegal": false, "is_custom": false, "text": "Cannot be Normal Summoned/Set. Must first be Special Summoned (from your hand) by Tributing 1 \"Alpha The Magnet Warrior\", \"Beta The Magnet Warrior\", and \"Gamma The Magnet Warrior\" from your hand and/or field. You can Tribute this card, then target 1 \"Alpha The Magnet Warrior\", \"Beta The Magnet Warrior\", and \"Gamma The Magnet Warrior\" in your Graveyard; Special Summon them.", "images": [ "https://storage.googleapis.com/ygoprodeck.com/pics/75347539.jpg" ], "type": "Monster", "attribute": "EARTH", "atk": 3500, "species": "Rock", "monster_type": "", "is_pendulum": false, "ability": "", "has_effect": true, "def": 3850, "level": 8, "is_tuner": false }{ "author": { "id": "t2_3omfg", "name": "dysonlogos" }, "date": { "day": 1561939200, "full": 1562015770, "month": 1561939200, "week": 1561852800 }, "id": "t3_c809tf", "misc": { "postHint": "image" }, "picture": { "filesize": 96144, "fullUrl": "https://preview.redd.it/rd1bzlxadr731.jpg?auto=webp&s=08a19f12bcfd0987f90c3bda5d4b99cba09afc9e", "hash": "4d3cf9e38b", "height": 571, "lqip": "data:image/jpg;base64,/9j/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wAARCAAOABADASIAAhEBAxEB/8QAFwAAAwEAAAAAAAAAAAAAAAAABAUGB//EACMQAAEEAQQBBQAAAAAAAAAAAAECAwQRBQASEyFBBjFRYZH/xAAUAQEAAAAAAAAAAAAAAAAAAAAA/8QAFBEBAAAAAAAAAAAAAAAAAAAAAP/aAAwDAQACEQMRAD8A2/LTHGp+RZS7KckblUhp9SShIQlVgX156qz3qhbfiiVjGw85vWFqZHOVbkkXarNn60N6iwUic9ywpKWAvbzJNgKKT0evNWPz40vxmAkwJjDkh9laN4sNgpspB22Pbom9B//Z", "thumbnailUrl": "https://b.thumbs.redditmedia.com/RDbTvGIpL9UoYuqxthRm904u5_OJ-qqjZEeS1Vgn-xo.jpg", "url": "https://preview.redd.it/rd1bzlxadr731.jpg?width=640&crop=smart&auto=webp&s=15b866fcc73f0597c40f771a1f3c799753213103", "width": 640 }, "score": { "comments": 1, "downs": 0, "isCurated": false, "ratio": 1, "ups": 106, "value": 106 }, "subreddit": { "id": "t5_3isai", "name": "dndmaps" }, "tags": ["Building"], "title": "The Court of Summer Wines", "url": "https://www.reddit.com/r/dndmaps/comments/c809tf/the_court_of_summer_wines/" } {"word":"montre","definition":"1. (Organ Building) A stop, usually the open diapason, having its pipes \"shown\" as part of the organ case, or otherwise specially mounted. 2. A hole in the wall of a pottery kiln, by which the state of the pieces within can be judged."}en/pansophy.json {"word":"pansophy","definition":"Universal wisdom; esp., a system of universal knowledge proposed by Comenius (1592 -- 1671), a Moravian educator. [R.] Hartlib."}Teal-labs-developer/react-native-google-signinandroid/build/intermediates/blame/res/release/single/layout-v21.json [ { "merged": "/Users/panda/Desktop/Nido/NidoTeacher/js/nodeModules/react-native-google-signin/android/build/intermediates/res/merged/release/layout-v21/notification_template_custom_big.xml", "source": "/Users/panda/.android/build-cache/afd84280824d239370af12a94668b77cbe3fd376/output/res/layout-v21/notification_template_custom_big.xml" }, { "merged": "/Users/panda/Desktop/Nido/NidoTeacher/js/nodeModules/react-native-google-signin/android/build/intermediates/res/merged/release/layout-v21/notification_action.xml", "source": "/Users/panda/.android/build-cache/afd84280824d239370af12a94668b77cbe3fd376/output/res/layout-v21/notification_action.xml" }, { "merged": "/Users/panda/Desktop/Nido/NidoTeacher/js/nodeModules/react-native-google-signin/android/build/intermediates/res/merged/release/layout-v21/notification_template_icon_group.xml", "source": "/Users/panda/.android/build-cache/afd84280824d239370af12a94668b77cbe3fd376/output/res/layout-v21/notification_template_icon_group.xml" }, { "merged": "/Users/panda/Desktop/Nido/NidoTeacher/js/nodeModules/react-native-google-signin/android/build/intermediates/res/merged/release/layout-v21/notification_action_tombstone.xml", "source": "/Users/panda/.android/build-cache/afd84280824d239370af12a94668b77cbe3fd376/output/res/layout-v21/notification_action_tombstone.xml" } ]{"relations":[{"id":16256,"relationtype":{"id":4,"rtype":"Subentry","role_from":"Main Entry of","role_to":"Subentry of","symmetrical":false},"basket":{"id":28565,"display_name":"abuse"},"direction":"source"},{"id":132836,"relationtype":{"id":6,"rtype":"Containment","role_from":"contained by","role_to":"contains","symmetrical":false},"basket":{"id":14034,"display_name":"Freud, Sigmund"},"direction":"source"},{"id":22240,"relationtype":{"id":6,"rtype":"Containment","role_from":"contained by","role_to":"contains","symmetrical":false},"basket":{"id":38078,"display_name":"Sexual abuse -- and Freud"},"direction":"destination"}],"basket":{"id":37213,"topic_hits":[{"id":38841,"name":"Abuse -- Freud and","scope":{"id":2,"scope":"Generic"},"bypass":false,"hidden":false,"preferred":false}],"occurs":[{"id":92500,"location":{"id":24989,"document":{"title":"Faith Born of Seduction","author":""},"localid":"page_11","sequence_number":25},"basket":37213}],"display_name":"Abuse -- Freud and","description":"","review":{"reviewer":"Alex","time":"2016-12-22T16:09:36.754839Z","reviewed":true,"changed":false},"weblinks":[],"types":[]}}amaajemyfren/data { "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "Real-world JIT compilers like PyPy and Numba are complex and advanced.\nHowever, the basic ideas behind JIT compilers are easy to understand, as\nthis talk aim to show.\n\nThis is a live-coding exercise: we will start from a blank page and\nwrite a working (albeit simple and limited) JIT compiler from scratch.", "duration": 2440, "language": "eng", "recorded": "2019-07-11", "related_urls": [ { "label": "Conference schedule", "url": "https://ep2019.europython.eu/schedule/" } ], "speakers": [ "" ], "tags": [ "Compiler and Interpreters", "Development", "TDD" ], "thumbnail_url": "https://i.ytimg.com/vi/DKns_rH8rrg/maxresdefault.jpg", "title": "How to write a JIT compiler in 30 minutes", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=DKns_rH8rrg" } ] } {"ast":null,"code":"export var EthChainType;\n\n(function (EthChainType) {\n EthChainType[\"Harmony\"] = \"HARMONY\";\n})(EthChainType || (EthChainType = {}));","map":{"version":3,"sources":["../../../src/modules/rpc-provider-types.ts"],"names":[],"mappings":"AAEA,OAAA,IAAY,YAAZ;;AAAA,CAAA,UAAY,YAAZ,EAAwB;AACtB,EAAA,YAAA,CAAA,SAAA,CAAA,GAAA,SAAA;AACD,CAFD,EAAY,YAAY,KAAZ,YAAY,GAAA,EAAA,CAAxB","sourceRoot":"","sourcesContent":["export var EthChainType;\n(function (EthChainType) {\n EthChainType[\"Harmony\"] = \"HARMONY\";\n})(EthChainType || (EthChainType = {}));\n//# sourceMappingURL=rpc-provider-types.js.map"]},"metadata":{},"sourceType":"module"}0 { "Samples": [ { "Title": "Image Classification", "Description": "Pick an image and classify the scene into 1000 categories such as keyboard, mouse, pencil, and many animals.", "Icon": "\uE155", "Tag": "ImageClassifier", "XAMLGithubLink": "https://github.com/microsoft/Windows-Machine-Learning/blob/user/sheilk/winml-samples-gallery/Samples/WinMLSamplesGallery/WinMLSamplesGallery/Samples/ImageClassifier/ImageClassifier.xaml", "CSharpGithubLink": "https://github.com/microsoft/Windows-Machine-Learning/blob/user/sheilk/winml-samples-gallery/Samples/WinMLSamplesGallery/WinMLSamplesGallery/Samples/ImageClassifier/ImageClassifier.xaml.cs", "Docs": [] }, { "Title": "Batching", "Description": "Infer multiple inputs at once to increase performance.", "Icon": "\uE155", "Tag": "Batching", "XAMLGithubLink": "https://github.com/microsoft/Windows-Machine-Learning/blob/user/sheilk/winml-samples-gallery/Samples/WinMLSamplesGallery/WinMLSamplesGallery/Samples/Batching/Batching.xaml", "CSharpGithubLink": "https://github.com/microsoft/Windows-Machine-Learning/blob/user/sheilk/winml-samples-gallery/Samples/WinMLSamplesGallery/WinMLSamplesGallery/Samples/Batching/Batching.xaml.cs", "Docs": [ { "name": "BatchSizeOverride", "link": "https://docs.microsoft.com/en-us/uwp/api/windows.ai.machinelearning.learningmodelsessionoptions.batchsizeoverride?view=winrt-20348" } ] }, { "Title": "Image Effects", "Description": "Pick an image and apply a variety of effects powered by Windows AI MachineLearning like Blur, Sharpen, Contrast, and many more.", "Icon": "\uE155", "Tag": "ImageEffects", "XAMLGithubLink": "https://github.com/microsoft/Windows-Machine-Learning/blob/user/sheilk/winml-samples-gallery/Samples/WinMLSamplesGallery/WinMLSamplesGallery/Samples/ImageEffects/ImageEffects.xaml", "CSharpGithubLink": "https://github.com/microsoft/Windows-Machine-Learning/blob/user/sheilk/winml-samples-gallery/Samples/WinMLSamplesGallery/WinMLSamplesGallery/Samples/ImageEffects/ImageEffects.xaml.cs", "Docs": [] } ] }{ "classes": [ "SlotEngineMain", "controllers/SlotEngineController", "controllers/ReelsViewController", "controllers/WinVisualizationViewController", "controllers/UserInterfaceViewController", "controllers/VideoViewController", "controllers/FreespinsAnimationViewController", "controllers/CustomResultViewController", "controllers/BonusSpinsInfoController", "views/ReelView", "views/WinVisualizationView", "views/UserInterfaceView", "views/LinesView", "views/CustomResultView", "views/BonusSpinsInfoView", "models/GameSettings", "models/CurrentState", "models/SpinAnalyzer", "models/LinesViewConfig", "models/WinVisualizationViewConfig", "models/BonusSpinsModel", "messages/BaseMessage", "messages/SpinMessage", "messages/CollectMessage", "messages/GambleMessage", "messages/JackpotMessage", "StateMachine/StateMachineContext", "StateMachine/StateIdle", "StateMachine/StateWin", "StateMachine/StateGamble", "StateMachine/StateJackpot", "StateMachine/StateWaitingForSpinResult", "StateMachine/StateWaitingForCollectResult", "StateMachine/StateWaitingForGambleResult", "StateMachine/StateWaitingForJackpotResult", "StateMachine/StateWaitingForCustomResult", "../common/commonJS/controllers/GambleViewController", "../common/commonJS/controllers/JackpotViewController", "../common/commonJS/controllers/PaytableViewController", "../common/commonJS/controllers/SettingsViewController", "../common/commonJS/views/GambleView", "../common/commonJS/views/JackpotView", "../common/commonJS/views/PaytableView", "../common/commonJS/views/SettingsView", "../common/commonJS/views/LinesSelectView", "../common/commonJS/views/WaysToPayView", "../common/commonJS/views/BetButton", "../common/commonJS/views/DenomButton", "../common/commonJS/views/SettingsPaytableCloseButton", "../common/commonJS/views/JackpotBoxes", "../common/commonJS/views/JackpotCard", "../common/commonJS/views/PopUp", "../common/commonJS/views/TotalsInfoView", "../common/commonJS/CommonSettings", "../common/commonJS/MoneyAnimation" ] }0 {"ast":null,"code":"var toPositiveInteger = require('../internals/to-positive-integer');\n\nmodule.exports = function (it, BYTES) {\n var offset = toPositiveInteger(it);\n if (offset % BYTES) throw RangeError('Wrong offset');\n return offset;\n};","map":null,"metadata":{},"sourceType":"script"}{ "name": "knowledge_base_search", "version": "0.1.0", "private": true, "license": "MIT", "dependencies": { "classnames": "^2.2.5", "node-sass-chokidar": "0.0.3", "phantomjs-polyfill-find": "0.0.1", "phantomjs-polyfill-find-index": "^1.0.1", "prop-types": "^15.5.10", "react": "^15.6.1", "react-dom": "^15.6.1", "react-infinite-scroller": "^1.0.15", "react-inlinesvg": "^0.6.2", "react-scroll": "^1.5.4", "react-stickynode": "^1.3.1", "react-transition-group": "^1.1.3", "watson-react-components": "^0.6.10", "whatwg-fetch": "^2.0.3" }, "devDependencies": { "babel-cli": "^6.26.0", "babel-core": "^6.26.0", "babel-preset-es2015": "^6.24.1", "casperjs": "^1.1.4", "cross-env": "^5.1.4", "enzyme": "^2.9.1", "eslint": "^4.6.1", "eslint-config-airbnb": "^15.1.0", "eslint-plugin-import": "^2.7.0", "eslint-plugin-jsx-a11y": "^5.1.1", "eslint-plugin-react": "^7.3.0", "npm-run-all": "^4.1.1", "react-scripts": "^1.0.13", "react-test-renderer": "^15.6.1" }, "scripts": { "start": "npm-run-all -p watch-css start-js", "build": "npm run build-css && react-scripts build", "start-js": "cross-env REACT_APP_SERVER=\"http://localhost:5000\" react-scripts start", "build-css": "node-sass-chokidar --include-path ./src --include-path ./node_modules src/ -o src/", "watch-css": "npm run build-css && node-sass-chokidar --include-path ./src --include-path ./node_modules src/ -o src/ --watch --recursive", "test": "npm run test-unit && npm run build && npm run test-integration", "test-unit": "npm run build-css && npm run lint && react-scripts test --env=jsdom", "build-test-integration": "rm -rf spec_dest && babel spec --out-dir spec_dest --presets=es2015", "test-integration": "npm run build-test-integration && casperjs test --includes=spec_dest/support/setup_server.js spec_dest/scenarios", "eject": "react-scripts eject", "lint": "eslint --ext .jsx --ext .js src spec" } } {"app":"auth0-gitlab-deploy.ui.2.4.0.js","style":"auth0-gitlab-deploy.ui.2.4.0.css","vendors":"auth0-gitlab-deploy.ui.vendors.2.4.0.js"}inugroho/pemilu-2019-scraper [{"namaKab":"","originalFilename":"Didi Mahardika.jpg","namaPartai":"PARTAI GERAKAN INDONESIA RAYA","id":124035,"noUrut":1,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"","originalFilename":"Foto Endro Hermono.jpg","namaPartai":"PARTAI GERAKAN INDONESIA RAYA","id":113858,"noUrut":2,"nama":", MBA","stringJenisKelamin":"Laki-Laki"},{"namaKab":"K","originalFilename":"foto amida hanna_1.jpg","namaPartai":"PARTAI GERAKAN INDONESIA RAYA","id":178051,"noUrut":3,"nama":". ","stringJenisKelamin":"Perempuan"},{"namaKab":"","originalFilename":"foto samsul hair_1.jpg","namaPartai":"PARTAI GERAKAN INDONESIA RAYA","id":201371,"noUrut":4,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"","originalFilename":"Sigit Permadi.jpg","namaPartai":"PARTAI GERAKAN INDONESIA RAYA","id":107750,"noUrut":5,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"BLITAR","originalFilename":"foto vellya dapil 6_1.jpg","namaPartai":"PARTAI GERAKAN INDONESIA RAYA","id":196259,"noUrut":6,"nama":"","stringJenisKelamin":"Perempuan"},{"namaKab":"BLITAR","originalFilename":"foto hj nesti.jpg","namaPartai":"PARTAI GERAKAN INDONESIA RAYA","id":201701,"noUrut":7,"nama":"","stringJenisKelamin":"Perempuan"},{"namaKab":"KEDIRI","originalFilename":"foto Budi santoso Dapil 6.jpg","namaPartai":"PARTAI GERAKAN INDONESIA RAYA","id":201528,"noUrut":8,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"BOGOR","originalFilename":"Sarehwiyono.jpg","namaPartai":"PARTAI GERAKAN INDONESIA RAYA","id":110579,"noUrut":9,"nama":"Dr. , M.S.H., MH","stringJenisKelamin":"Laki-Laki"}]{"generated_at": "2017-01-27T09:37:31.519807", "required_by": ["robotframework-ipmilibrary"], "requires": ["markdown", "future"], "package": "python-ipmi"}["jquery-nor-rest","nor-api-registration","nor-api-session","nor-api-upload","nor-data-view","nor-flags","nor-interface","nor-mvc","nor-nopg","nor-passport","nor-rest","nor-rest-jquery","nor-routes-json","nor-spec","nor-stack"]variant/pli/ms/sutta/kn/tha-ap/tha-ap197_variant-pli-ms.json10-100 { "tha-ap197:1.2": "mālāvaraṁ sakosakaṁ → mālāvarasamogadhaṁ (sya1ed, sya2ed); māhāsārasamotataṁ (pts-vp-pli1)", "tha-ap197:3.4": "Pupphadānassidaṁ → buddhapūjāyidaṁ (bj, sya1ed, sya2ed, pts-vp-pli1)" }static/page-data/en/reddit/r/interestingasfuck/comments/k560zq/the_sheer_size_of_this_eagle/page-data.json0 {"componentChunkName":"component---node-modules-gatsby-theme-buzzing-src-gatsby-theme-blog-core-templates-post-query-js","path":"/en/reddit/r/interestingasfuck/comments/k560zq/the_sheer_size_of_this_eagle/","result":{"data":{"site":{"siteMetadata":{"title":"Reddit 热门","author":"Buzzing.cc","description":"用中文浏览Reddit热门内容","keywords":["buzzing","reddit","reddit中文","reddit热门"],"siteUrl":"https://reddit.buzzing.cc","telegram":"@reddit_zh","iconUrl":"https://reddit.buzzing.cc/avatar.png","defaultSocialImageUrl":null,"social":[{"name":"Reddit","url":"https://www.reddit.com","external":true},{"name":"Buzzing","url":"https://www.buzzing.cc/","external":true}],"menuLinks":[{"name":"每周精选","url":"/issues","external":null}],"disqus":null,"utterances":null,"localize":[{"title":"Buzzing on Reddit","description":"See what's buzzing on Reddit in your native language","keywords":["buzzing","reddit","reddit top"],"locale":"en","social":{"name":null,"url":null,"external":null},"menuLinks":[{"name":"Weekly Selection","url":"/en/issues","external":null}]},{"title":"Reddit 熱門","description":"用中文瀏覽Reddit熱門內容","keywords":["buzzing","reddit","reddit中文","reddit熱門"],"locale":"zh-Hant","social":null,"menuLinks":[{"name":"每週精選","url":"/zh-Hant/issues","external":null}]},{"title":"Reddit 人気の記事","description":"人気のReddit記事を日本語で閲覧","keywords":["buzzing","Reddit"],"locale":"ja","social":null,"menuLinks":[]}]}},"blogPost":{"id":"RedditPost-k560zq","excerpt":"","body":"","slug":"/reddit/r/interestingasfuck/comments/k560zq/the_sheer_size_of_this_eagle/","title":"The sheer size of this eagle","tags":["interestingasfuck","reddit"],"date":"December 02, 2020","dateISO":"2020-12-02T18:20:02.000Z","datetime":"2020-12-02 18:20","image":null,"imageAlt":null,"socialImage":null,"__typename":"SocialMediaPost","thirdPartyId":"k560zq","provider":"Reddit","url":"https://www.reddit.com/r/interestingasfuck/comments/k560zq/the_sheer_size_of_this_eagle/","originalUrl":"https://www.reddit.com/r/interestingasfuck/comments/k560zq/the_sheer_size_of_this_eagle/","imageRemote":null,"video":{"url":"https://v.redd.it/zxr5mqvgqq261/DASH_720.mp4","embed":null,"width":405,"height":720},"channel":"interestingasfuck","channelUrl":"https://www.reddit.com/r/interestingasfuck","author":"Palifaith","authorUrl":"https://www.reddit.com/user/Palifaith","authorImage":null,"authorImageRemote":null,"authorSlug":"Palifaith","score":88661,"views":null,"sharedCount":null,"likeCount":null,"sharedContent":null,"parent":{"localize":[{"title":"这只鹰的巨大体型","the_new_excerpt":null,"locale":"zh"},{"title":"這只鷹的巨大體型","the_new_excerpt":null,"locale":"zh-Hant"}]}},"previous":{"id":"RedditPost-k54vqc","excerpt":"","slug":"/reddit/r/MadeMeSmile/comments/k54vqc/in_the_who_cares_news_i_am_400_days_sober_today/","title":"In the \"Who cares\" news I am 400 days sober today and 323 smoke free. I have no one to share with so I am celebrating with you guys!","date":"December 02, 2020","__typename":"SocialMediaPost","provider":"Reddit","parent":{"localize":[{"title":"在“谁在乎”的新闻里,我今天戒酒400天,戒烟323天。我没有可以分享的人,所以我和你们一起庆祝!","the_new_excerpt":null,"locale":"zh"},{"title":"在“誰在乎”的新聞裏,我今天戒酒400天,戒煙323天。我沒有可以分享的人,所以我和妳們壹起慶祝!","the_new_excerpt":null,"locale":"zh-Hant"}]}},"next":{"id":"RedditPost-k56yfj","excerpt":"","slug":"/reddit/r/pics/comments/k56yfj/happiness_in_western_australia_home_of_the_quokka/","title":"Happiness in Western Australia, home of the Quokka","__typename":"SocialMediaPost","date":"December 02, 2020","provider":"Reddit","parent":{"localize":[{"title":"魁卡人的故乡西澳大利亚的幸福","the_new_excerpt":null,"locale":"zh"},{"title":"魁卡人的故鄉西澳大利亞的幸福","the_new_excerpt":null,"locale":"zh-Hant"}]}}},"pageContext":{"basePath":"/","pageType":"detail","id":"RedditPost-k560zq","previousId":"RedditPost-k54vqc","nextId":"RedditPost-k56yfj","maxWidth":1024,"siteMetadata":null,"locale":"en","hrefLang":"en-US","originalPath":"/reddit/r/interestingasfuck/comments/k560zq/the_sheer_size_of_this_eagle/","dateFormat":"MM/DD/YYYY"}},"staticQueryHashes":["1239077767","2744905544","3280999885"]}vercel.json { "env": { "GOOGLE_PRIVATE_KEY": "@google-private-key", "GOOGLE_CLIENT_EMAIL": "@google-client-email", "GOOGLE_CLIENT_ID": "@google-client-id" } } app/content/lexicons/strongs/entries/H4497.json1-10 {"derivation": "from H5125 (\u05e0\u05d5\u05bc\u05df);", "pron": "maw-nohn'", "outline": "
  1. grief, progeny, thankless one
    1. meaning uncertain
", "kjv_def": "son.", "lemma": "\u05de\u05b8\u05e0\u05d5\u05b9\u05df", "frequency": 1, "strongs_def": "a continuator, i.e. heir", "xlit": "m\u00e2n\u00f4wn"}{ "_from": "@contentful/rich-text-types", "_id": "@contentful/rich-text-types@15.10.1", "_inBundle": false, "_integrity": "sha512-OeAYewiCQnHJlT6Dlqlsm645AuVSRLQHttyvg/Tru0quQZxDOTUC+hyA6m9NxgGjhynixVI2zqr2GfMpEm+Cjg==", "_location": "/@contentful/rich-text-types", "_phantomChildren": { "fast-deep-equal": "3.1.3", "require-from-string": "2.0.2", "uri-js": "4.4.1" }, "_requested": { "type": "tag", "registry": true, "raw": "@contentful/rich-text-types", "name": "@contentful/rich-text-types", "escapedName": "@contentful%2frich-text-types", "scope": "@contentful", "rawSpec": "", "saveSpec": null, "fetchSpec": "latest" }, "_requiredBy": [ "#USER", "/" ], "_resolved": "https://registry.npmjs.org/@contentful/rich-text-types/-/rich-text-types-15.10.1.tgz", "_shasum": "e8f2028b80031f909f8b5afa3816222012253988", "_spec": "@contentful/rich-text-types", "_where": "/home/azmat/Desktop/cnc/JamStack/learn-jamstack/blog/gatsby-starter-blog", "bugs": { "url": "https://github.com/contentful/rich-text/issues" }, "bundleDependencies": false, "dependencies": { "ajv": "^8.8.2" }, "deprecated": false, "description": "Type definitions and constants for the Contentful rich text field type.", "devDependencies": { "@types/jest": "^27.0.1", "@types/node": "^14.17.14", "faker": "^4.1.0", "jest": "^27.1.0", "rimraf": "^2.6.3", "rollup": "^1.32.1", "rollup-plugin-commonjs": "^9.3.4", "rollup-plugin-copy": "^2.0.0", "rollup-plugin-json": "^4.0.0", "rollup-plugin-node-resolve": "^4.2.4", "rollup-plugin-sourcemaps": "^0.6.3", "rollup-plugin-typescript2": "^0.30.0", "ts-jest": "^27.0.5", "ts-node": "^10.2.1", "tslib": "^2.3.1", "tslint": "^6.1.3", "typescript": "^4.4.2", "typescript-json-schema": "^0.50.1" }, "engines": { "node": ">=6.0.0" }, "files": [ "dist" ], "gitHead": "f4fe4b43c9b233a56fd802b7995beacb6bcb3ddc", "homepage": "https://github.com/contentful/rich-text#readme", "license": "MIT", "main": "dist/index.js", "name": "@contentful/rich-text-types", "publishConfig": { "access": "public" }, "repository": { "type": "git", "url": "git+https://github.com/contentful/rich-text.git" }, "scripts": { "build": "npm run generate-json-schema && tsc --module commonjs && rollup -c rollup.config.js", "generate-json-schema": "ts-node -O '{\"module\": \"commonjs\"}' ./tools/jsonSchemaGen", "lint": "tslint -t codeFrame '@(src|bin)/*.ts'", "prebuild": "rimraf dist", "start": "tsc && rollup -c rollup.config.js -w" }, "typings": "dist/types/index.d.ts", "version": "15.10.1" } Muigai/Inventory { "name": "inventory", "version": "1.0.0", "description": "inventory sample app using rahisi.", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "author": "", "license": "ISC", "devDependencies": { "typescript": "^3.6.4" }, "dependencies": { "rahisi": "^1.1.23" } } WebArtWork/ngx-kanbanmodule.json { "repo": ":WebArtWork/kanban.git", "dependencies": { "ngx-drag-scroll": "8.0.0-beta.2", "ng2-dragula": "2.1.1" } }fuadi-star/Offers {"id": 9249, "date": "2013-03-29 19:42:10", "user": "24khost", "post": "So I got crazy and forgot I had all the extra\r\neaster egg's available So I decided to put some\r\ngoody baskets together!\r\n\r\n\r\nOur Server Technology\r\n\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\r\nEnterprise Grade Cloud Solution\r\nHigh availability\r\nAuto-failover\r\nData mirroring\r\nLoad balancing to keep your server running at peak performance\r\nSSD caching for high performance I/O, up to 5000 IOPS\r\n1 gbps port\r\nOpenVZ\r\nVpsgrid Control Panel\r\n\r\nBasket 1 - 15 Available\r\n\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\r\n\u20222 vCPU\r\n\u20221024 MB Ram Gauranteed\r\n\u202280 GB Space\r\n\u20221000 GB bandwidth\r\n\u20221 IPv4\r\n\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\r\n$4.50 - Monthly\r\nhttps://billing.24khost.com/cart.php?a=add&pid=167\r\n\r\n\r\n\r\nBasket 2 - 15 Available\r\n\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\r\n\u20224 vCPU\r\n\u20222048 MB Ram Gauranteed\r\n\u2022160 GB Space\r\n\u20222000 GB bandwidth\r\n\u20221 IPv4\r\n\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\u00bb\r\n$6.50 - Monthly\r\nhttps://billing.24khost.com/cart.php?a=add&pid=166\r\n"}nttu2188/near-sdk-as { "name": "near-sdk-simulator", "version": "3.2.0", "description": "Mock Near Runtime for integration testing.", "license": "(MIT AND Apache-2.0)", "main": "dist/index.js", "types": "dist/index.d.ts", "author": " <>", "scripts": { "build:release": "node asconfig.js", "build": "tsc && node asconfig.js --target debug --wat", "test": "yarn jest", "build:tsc": "tsc", "pretest": "yarn build" }, "devDependencies": { "near-sdk-core": "^3.2.0" } } node_modules/vuepress/node_modules/.cache/vuepress/7fd50f1a26f9d823984f5684e91a4a4c.json {"remainingRequest":"/Users/chenyu/Documents/GitHub/yhd/node_modules/vue-loader/lib/index.js??ref--1-1!/Users/chenyu/Documents/GitHub/yhd/node_modules/vuepress/lib/webpack/markdownLoader.js??ref--1-2!/Users/chenyu/Documents/GitHub/yhd/docs/zh/recommend/4Sushi fish fillet.md?vue&type=template&id=f1be9ed2&","dependencies":[{"path":"/Users/chenyu/Documents/GitHub/yhd/docs/zh/recommend/4Sushi fish fillet.md","mtime":1533900890000},{"path":"/Users/chenyu/Documents/GitHub/yhd/node_modules/cache-loader/dist/cjs.js","mtime":1533935433692},{"path":"/Users/chenyu/Documents/GitHub/yhd/node_modules/vue-loader/lib/loaders/templateLoader.js","mtime":1533935983956},{"path":"/Users/chenyu/Documents/GitHub/yhd/node_modules/cache-loader/dist/cjs.js","mtime":1533935433692},{"path":"/Users/chenyu/Documents/GitHub/yhd/node_modules/vue-loader/lib/index.js","mtime":1533935983956},{"path":"/Users/chenyu/Documents/GitHub/yhd/node_modules/vuepress/lib/webpack/markdownLoader.js","mtime":1533935985033}],"contextDependencies":[],"result":["\n
\n",null]}1-10 { "name": "known-origin-contracts-next-gen", "version": "1.0.0", "description": "KO Next Generation Smart Contracts", "main": "index.js", "repository": ":knownorigin/known-origin-contracts-next-gen.git", "author": "", "license": "MIT", "private": true, "scripts": { "compile": "npx hardhat compile --show-stack-traces", "coverage": "NODE_OPTIONS=--max-old-space-size=4096 npx hardhat coverage", "test": "npx hardhat test", "test-fast": "npx hardhat test --parallel", "clean": "rm -rf artifacts/ && rm -rf cache/", "clean-node-modules": "rm -rf node_modules/", "clean-test": "npm run clean && npm run compile && npm run test", "contract-size": "npx hardhat size-contracts", "gas": "REPORT_GAS=true npx hardhat test" }, "devDependencies": { "@nomiclabs/hardhat-ethers": "2.0.5", "@nomiclabs/hardhat-etherscan": "2.1.8", "@nomiclabs/hardhat-solhint": "2.0.0", "@nomiclabs/hardhat-truffle5": "2.0.4", "@nomiclabs/hardhat-web3": "^2.0.0", "@openzeppelin/hardhat-upgrades": "1.16.1", "@openzeppelin/test-helpers": "0.5.15", "@pinata/sdk": "^1.1.23", "axios": "^0.21.4", "chai": "^4.3.0", "commander": "^7.0.0", "dotenv": "^8.2.0", "ethereum-waffle": "^3.2.2", "ethereumjs-util": "^7.0.8", "ethers": "5.3.1", "hardhat": "2.9.1", "hardhat-abi-exporter": "^2.2.1", "hardhat-gas-reporter": "1.0.4", "lodash": "4.17.21", "solidity-coverage": "^0.7.16", "ts-node": "^8.5.4", "typescript": "^3.7.3" }, "dependencies": { "@openzeppelin/contracts": "4.2.0", "@openzeppelin/contracts-upgradeable": "^4.4.2", "hardhat-contract-sizer": "^2.0.3", "moment": "^2.29.1", "prompt-sync": "^4.2.0" } } dnouri/cloud-custodian { "body": { "eventTrigger": { "eventType": "providers/cloud.pubsub/eventTypes/topic.publish", "resource": "projects/custodian-1291/topics/custodian-auto-audit-topic-created", "service": "pubsub.googleapis.com", "failurePolicy": {} }, "status": "DEPLOY_IN_PROGRESS", "updateTime": "2018-08-05T15:36:16Z", "name": "projects/custodian-1291/locations/us-central1/functions/topic-created", "availableMemoryMb": 512, "labels": { "deployment-tool": "custodian" }, "sourceUploadUrl": "https://storage.googleapis.com/gcf-upload-us-central1-03ceb6f5-053a-4792-bd46-c6c945939065/3d25964d-3454-46bc-8d8e-34f7f840523f.zip?GoogleAccessId=&Expires=1533485174&Signature=A553GqjawIc8p%2BS8KQ8QmDCJzJFP8jZHuWeS14y31zMFfh2xmxvwGVg7VhNN1V5X0AW7Bmcq%2FXMkUpbc1GytvPwXFxXf0Iftb8z7l8okFnPECQJ6vfMxGM%2FSdjd51miAGI0SAYmu6orYE3ti3idBZgG1zdZrg%2BKHEh2rfROzEgVcEPEKLky1qR%2FWS0wHsszym9lrZcXNL0a9GYlvslK8%2FPpAMwavzsxUrtLZf4md9bwT%2B0CnzX60%2BAGd3JBqk2vWiKszz9GHvmhpfbveuqL7pu1IUIzCUosjGxAkGTZ4g3%2FvL7pZkqaKfD%2BauPjB%2BZUQ%2FK%2BP9tgfW89ai1IJPH7sBw%3D%3D", "versionId": "1", "entryPoint": "run", "serviceAccountEmail": "", "timeout": "60s", "runtime": "python37" }, "headers": { "status": "200", "content-length": "1279", "x-xss-protection": "1; mode=block", "content-location": "https://cloudfunctions.googleapis.com/v1/projects/custodian-1291/locations/us-central1/functions/topic-created?alt=json", "x-content-type-options": "nosniff", "transfer-encoding": "chunked", "vary": "Origin, X-Origin, Referer", "server": "ESF", "-content-encoding": "gzip", "cache-control": "private", "date": "Sun, 05 Aug 2018 15:36:18 GMT", "x-frame-options": "SAMEORIGIN", "alt-svc": "quic=\":443\"; ma=2592000; v=\"44,43,39,35\"", "content-type": "application/json; charset=UTF-8" } }{ "name": "certlogic-validation", "version": "0.7.5", "description": "Validators for CertLogic", "main": "dist/index.js", "scripts": { "postinstall": "npm link certlogic-js", "build": "tsc", "build-watch": "tsc --watch --incremental", "pretest": "npm run build", "test": "mocha dist/test", "test-watch": "mocha --watch dist/test", "prestart": "npm run test", "start": "node dist/test/validate-testSuite.js", "clean": "rm -rf dist/ && rm -rf node_modules/" }, "bin": { "certlogic-validate": "dist/cli.js" }, "author": "", "license": "Apache-2.0", "dependencies": { "@types/json-schema": "^7.0.7", "certlogic-js": "../certlogic-js", "deep-equal": "^2.0.5" }, "devDependencies": { "@types/chai": "^4.2.18", "@types/deep-equal": "^1.0.1", "@types/mocha": "^8.2.2", "@types/node": "^15.12.1", "chai": "^4.3.4", "mocha": "^8.4.0" } } 100-1000 { "name": "videoinu", "desc": "Create and edit screen recordings and other videos online", "url": "https://videoinu.com", "tags": ["Misc"], "maintainers": ["inu"], "addedAt": "2020-05-20" } 1-10 { "id": "138190", "key": "AC-1173", "fields": { "issuetype": { "id": "1", "description": "A problem which impairs or prevents the functions of the product.", "name": "Bug", "subtask": false }, "project": { "id": "12217", "key": "AC", "name": "Appcelerator - INBOX", "projectCategory": { "id": "10000", "description": "", "name": "Customer Service" } }, "resolution": { "id": "8", "description": "", "name": "Needs more info" }, "resolutiondate": "2014-12-20T01:06:00.000+0000", "created": "2014-10-16T13:05:49.000+0000", "labels": [ "TCSupportTriage" ], "versions": [], "issuelinks": [], "assignee": { "name": "mpmiranda", "key": "mpmiranda", "displayName": "", "active": true, "timeZone": "America/Mexico_City" }, "updated": "2016-03-08T07:37:32.000+0000", "status": { "description": "The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.", "name": "Closed", "id": "6", "statusCategory": { "id": 3, "key": "done", "colorName": "green", "name": "Done" } }, "components": [], "description": "h3. Steps to Reproduce\nLogin with the cloud user and confirm that you are logged in. Then call the Cloud.statu.create and pass the photo as the file and observer the output. \n\nHere is the code that i have written and i have tried everything from the community to the documentation available. \n\nh3. Actual Result\nThe actual result is the 'Unable to identify the photo' and error code 400. No other information is provided. BTW, when i use curl it work just fine. \n\nh3. Expected Result\nThe image should have been accepted and i get the result as the success. ", "attachment": [ { "id": "51995", "filename": ".log", "author": { "name": "ankush19902", "key": "ankush19902", "displayName": "", "active": true, "timeZone": "America/Los_Angeles" }, "created": "2014-10-16T13:06:19.000+0000", "size": 1191481, "mimeType": "text/plain" }, { "id": "51996", "filename": "diagnostic1478573847473378483.log", "author": { "name": "ankush19902", "key": "ankush19902", "displayName": "", "active": true, "timeZone": "America/Los_Angeles" }, "created": "2014-10-16T13:07:40.000+0000", "size": 45246, "mimeType": "text/plain" } ], "flagged": false, "summary": "Cloud.status.create not accepting the photo and giving error code 400", "creator": { "name": "ankush19902", "key": "ankush19902", "displayName": "", "active": true, "timeZone": "America/Los_Angeles" }, "subtasks": [], "reporter": { "name": "ankush19902", "key": "ankush19902", "displayName": "", "active": true, "timeZone": "America/Los_Angeles" }, "environment": "Titanium Studio 3.4.0", "comment": { "comments": [ { "id": "328397", "author": { "name": "mpmiranda", "key": "mpmiranda", "displayName": "", "active": true, "timeZone": "America/Mexico_City" }, "body": "Hello [~ankush19902]! \r\n\r\nPlease add the testcase that you mentioned in your bug report. \r\n\r\nBest Regards!", "updateAuthor": { "name": "mpmiranda", "key": "mpmiranda", "displayName": "", "active": true, "timeZone": "America/Mexico_City" }, "created": "2014-10-16T23:50:31.000+0000", "updated": "2014-10-16T23:50:31.000+0000" }, { "id": "337244", "author": { "name": "mpmiranda", "key": "mpmiranda", "displayName": "", "active": true, "timeZone": "America/Mexico_City" }, "body": "Please add a test case with Titanium Classic. ", "updateAuthor": { "name": "mpmiranda", "key": "mpmiranda", "displayName": "", "active": true, "timeZone": "America/Mexico_City" }, "created": "2014-12-20T01:06:00.000+0000", "updated": "2014-12-20T01:06:00.000+0000" } ], "maxResults": 2, "total": 2, "startAt": 0 } } }{ "id": 36832567, "name": "electron-es6-angular", "fullName": "yukihir0/electron-es6-angular", "owner": { "login": "yukihir0", "id": 1769420, "avatarUrl": "https://avatars0.githubusercontent.com/u/1769420?v=3", "gravatarId": "", "url": "https://api.github.com/users/yukihir0", "htmlUrl": "https://github.com/yukihir0", "followersUrl": "https://api.github.com/users/yukihir0/followers", "subscriptionsUrl": "https://api.github.com/users/yukihir0/subscriptions", "organizationsUrl": "https://api.github.com/users/yukihir0/orgs", "reposUrl": "https://api.github.com/users/yukihir0/repos", "receivedEventsUrl": "https://api.github.com/users/yukihir0/received_events", "type": "User" }, "private": false, "htmlUrl": "https://github.com/yukihir0/electron-es6-angular", "description": "electron sample using es6 and angular. ", "fork": false, "url": "https://api.github.com/repos/yukihir0/electron-es6-angular", "forksUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/forks", "teamsUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/teams", "hooksUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/hooks", "eventsUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/events", "tagsUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/tags", "languagesUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/languages", "stargazersUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/stargazers", "contributorsUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/contributors", "subscribersUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/subscribers", "subscriptionUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/subscription", "mergesUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/merges", "downloadsUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/downloads", "deploymentsUrl": "https://api.github.com/repos/yukihir0/electron-es6-angular/deployments", "createdAt": "2015-06-03T21:49:59.000Z", "updatedAt": "2015-06-03T21:58:02.000Z", "pushedAt": "2015-07-18T06:36:42.000Z", "gitUrl": "git://github.com/yukihir0/electron-es6-angular.git", "sshUrl": "git@github.com:yukihir0/electron-es6-angular.git", "cloneUrl": "https://github.com/yukihir0/electron-es6-angular.git", "svnUrl": "https://github.com/yukihir0/electron-es6-angular", "homepage": null, "size": 128, "stargazersCount": 0, "watchersCount": 0, "language": "JavaScript", "hasIssues": true, "hasDownloads": true, "hasWiki": true, "hasPages": false, "forksCount": 0, "mirrorUrl": null, "openIssuesCount": 0, "openIssues": 0, "watchers": 0, "defaultBranch": "master", "permissions": { "admin": false, "push": false, "pull": true }, "license": { "key": "mit", "name": "MIT License", "spdxId": "MIT", "url": "https://api.github.com/licenses/mit", "featured": true }, "networkCount": 0, "subscribersCount": 1, "status": 200, "packageJSON": { "name": "electron-es6-angular", "version": "1.0.0", "description": "electron-es6-angular", "main": "app/index.js", "scripts": { "start": "gulp; electron ./app" }, "keywords": [], "author": "yukihir0", "license": "MIT", "dependencies": { "electron-prebuilt": "^0.26.1", "feedparser": "^1.1.1", "superagent": "^1.2.0" }, "devDependencies": { "gulp": "^3.8.11", "gulp-babel": "^5.1.0", "electron-packager": "^5.0.1" } }, "packageStatus": 200, "firstCommit": { "sha": "8662add1aa43ca6911318f22c1f8c52249609597", "commit": { "author": { "name": "yukihir0", "email": "", "date": "2015-06-03T21:46:48Z" }, "committer": { "name": "yukihir0", "email": "", "date": "2015-06-03T21:46:48Z" }, "message": "initial commit", "tree": { "sha": "ce1b753936d50b3e329732303cca701ed8d9a9e7", "url": "https://api.github.com/repos/yukihir0/electron-es6-angular/git/trees/ce1b753936d50b3e329732303cca701ed8d9a9e7" }, "url": "https://api.github.com/repos/yukihir0/electron-es6-angular/git/commits/8662add1aa43ca6911318f22c1f8c52249609597", "commentCount": 0 } }, "filename": "yukihir0___electron-es6-angular.json", "hasProjects": true, "lastFetchedAt": "2017-05-04T05:05:36.670Z", "packageLastFetchedAt": "2017-05-04T22:33:30.926Z" }{ "name": "IOT on Chain", "symbol": "ITC", "id": "0x5E6b6d9aBAd9093fdc861Ea1600eBa1b355Cd940", "decimals": 18, "coingecko_url": "https://www.coingecko.com/en/coins/iot-chain", "market_cap_usd": 2703162, "market_cap_rank": 1858, "24_hr_volume_usd": 414477, "logoURI": "https://raw.githubusercontent.com/poolsharks-protocol/token-metadata/master/blockchains/ethereum/assets/0x5E6b6d9aBAd9093fdc861Ea1600eBa1b355Cd940/logo.png" }{"MonstrousCannibalism": ["KickTheDog", "HorrorHunger", "MoralEventHorizon", "ImAHumanitarian", "CannibalTribe", "ToServeMan", "NoPartyLikeADonnerParty", "NoZombieCannibals", "ApeShallNeverKillApe", "FoodChainOfEvil", "MonsterLord", "AlwaysABiggerFish", "SortingAlgorithmOfEvil", "Bowdlerise", "CannibalismSuperpower", "BreathWeapon", "PowerCrystal", "AssimilationPlot", "Autocannibalism", "BodyBackupDrive", "CannibalismSuperpower", "InstantArmor", "HunterOfHisOwnKind", "ImAHumanitarian", "SubvertedTrope", "TheBridge", "ElementalEmbodiment", "EatingTheEnemy", "RunningGag", "CuteMonsterGirl", "LizardFolk", "HorrorHunger", "BalloonBelly", "ElementalEmbodiment", "EatingTheEnemy", "RunningGag", "CuteMonsterGirl", "LizardFolk", "HorrorHunger", "BalloonBelly", "EatsBabies", "MonsterLord", "BigBad", "PlanetEater", "MonsterLord", "Autocannibalism", "TheGreatWall", "Slurpasaur", "MixAndMatchCritters", "BigBad", "OurOrcsAreDifferent", "EvenEvilHasStandards", "ToServeMan", "ChekhovsGun", "HalfHumanHybrid", "AlienBlood", "HorrorHunger", "Autocannibalism", "BigBad", "OurDragonsAreDifferent", "DragonHoard", "ToServeMan", "BalefulPolymorph", "Mooks", "YouHaveFailedMe", "AsskickingEqualsAuthority", "PigMan", "EatsBabies", "CarnivoreConfusion", "AnthropomorphicPersonification", "GoingPostal", "OurGoblinsAreDifferent", "FantasticRacism", "HumansAreBastards", "NoPartyLikeADonnerParty", "OurOrcsAreDifferent", "MercyKill", "OurMonstersAreDifferent", "ToServeMan", "HorrorHunger", "Autocannibalism", "BigBad", "OurDragonsAreDifferent", "DragonHoard", "ToServeMan", "BalefulPolymorph", "AnthropomorphicPersonification", "GoingPostal", "OurGoblinsAreDifferent", "FantasticRacism", "HumansAreBastards", "NoPartyLikeADonnerParty", "OurOrcsAreDifferent", "MercyKill", "GoingPostal", "OurGoblinsAreDifferent", "FantasticRacism", "HumansAreBastards", "NoPartyLikeADonnerParty", "OurOrcsAreDifferent", "TheBadGuyWins", "ParanoiaFuel", "YouHaveFailedMe", "RaptorAttack", "AlwaysChaoticEvil", "YouHaveFailedMe", "UpToEleven", "NoZombieCannibals", "KlingonPromotion", "EatsBabies", "YourSoulIsMine", "MoralEventHorizon", "FoodChainOfEvil", "FoodChainOfEvil", "CannibalClan", "MythologyGag", "ShoutOut", "TrademarkFavoriteFood", "KlingonPromotion", "KlingonPromotion", "YourSoulIsMine", "MoralEventHorizon", "FoodChainOfEvil", "FoodChainOfEvil", "CannibalClan", "MythologyGag", "ShoutOut", "TrademarkFavoriteFood", "YourSoulIsMine", "MoralEventHorizon", "FoodChainOfEvil", "FoodChainOfEvil", "CannibalClan", "MythologyGag", "ShoutOut", "AllThereInTheManual", "OurTrollsAreDifferent", "GiantMook", "HeavilyArmoredMook", "EliteMook", "HordeOfAlienLocusts", "VillainProtagonist", "FinalBoss", "EatingOptional", "HorrorHunger", "HumanoidAbomination", "HumanoidAbomination", "PetMonstrosity", "BeneathTheEarth", "NonMaliciousMonster", "AlwaysChaoticEvil", "ALoadOfBull", "RoboticReveal", "TomatoInTheMirror", "NeverSmileAtACrocodile", "OffingTheOffspring", "TruthInTelevision", "FunWithHomophones", "OlderThanDirt", "TyrannosaurusRex", "FeatheredFiend", "EatsBabies", "ScienceMarchesOn", "FusionDance", "FedToPigs", "Autocannibalism", "BodyHorror", "MakeThemRot", "OffingTheOffspring", "TruthInTelevision", "FunWithHomophones"]}JefferyLukas/SRIsinferno/1.0.7.json1-10 {"inferno-compat.js":","inferno-compat.min.js":","inferno-component.js":","inferno-component.min.js":"sha512-wZm7bJXwrmCLLdGAxEb15Tm5Uyiyi2i7ynueZuyDXXeBQx/cXA06o0ws6imuWaNI7KAAmPeu2AS6oMskac65NQ==","inferno-create-class.js":"sha512-iPog8IVPp6JxchfR6W/AOw90g3A6NaRQ3imUMODR/wVF0CC36qOBHSvNTptQRxyIMcnkghkeR884MY6pZ1PKpw==","inferno-create-class.min.js":","inferno-create-element.js":"CX278OV2o0Phml41sYWjQ==","inferno-create-element.min.js":","inferno-devtools.js":","inferno-devtools.min.js":","inferno-hyperscript.js":","inferno-hyperscript.min.js":"sha512-wOvt8ywoZ1qWgSr6TMuplWdeQydBp8fEJfTnC3rWbCHUDfgQlWzVsOPqgeqgbAFBaMGkm6OC4F572bb2Rhd6bw==","inferno-mobx.js":","inferno-mobx.min.js":"sha512-5rd/S87Uc0uu8+T5D0rOiTsqjuGSOX8IIKbMvt9DxuV/SI0b0eesapDEZVwbqb4A0crozjiMaxsnbkbtng62QQ==","inferno-redux.js":"sha512-69T2QRTu5j8SeLd2/iKLj/bcdq0Z24KCfcYyWSEw3h8SAZooZr46Sk1ihSEasNSh1XxEZ0T2eNMfcoXFcaR8EQ==","inferno-redux.min.js":","inferno-router.js":","inferno-router.min.js":","inferno-server.js":"sha512-njgQN5BimuoW/oKVAMi8e1fFjchDWcvtRxk62Z1WkrOAw3PBuJq4qzEGQNm2PStww5Se1MGca26DVOxuw2wQVA==","inferno-server.min.js":"sha512-iUkxcPP1L4VufP6g4BlhdXr5Jo0kGSwpFbTQhTNwI59RjmQI37gQ8W6ty68gCctNGRRuMd6jw2BYN3qteQbg8A==","inferno-test-utils.js":"sha512-UkoMpRr5Q9qf4GiPjgUUmdj+pFdB7Z2MuQt1/g42ZrqKD6HjDc+1KuLg8EXowWDxija0URuUIRjzSAdT5K00PA==","inferno-test-utils.min.js":","inferno-vnode-flags.js":","inferno-vnode-flags.min.js":","inferno.js":","inferno.min.js":"}{ "name": "devtools-protocol", "version": "0.0.781568", "description": "The Chrome DevTools Protocol JSON", "repository": "https://github.com/ChromeDevTools/devtools-protocol", "author": "The Chromium Authors", "license": "BSD-3-Clause", "bugs": { "url": "https://github.com/ChromeDevTools/devtools-protocol/issues" }, "types": "types/protocol.d.ts", "__npminstall_done": "Tue Sep 08 2020 12:18:01 GMT+0800 (GMT+08:00)", "_from": "devtools-protocol@0.0.781568", "_resolved": "https://registry.npm.taobao.org/devtools-protocol/download/devtools-protocol-0.0.781568.tgz?cache=0&sync_timestamp=1598498376860&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fdevtools-protocol%2Fdownload%2Fdevtools-protocol-0.0.781568.tgz" }MeatReed/LavaJS0 { "name": "@anonymousg/lavajs", "version": "3.1.0", "description": "Dynamic LavaLink wrapper for your discord bot", "main": "./dist/index.js", "types": "./typings/index.d.ts", "repository": { "type": "git", "url": "https://github.com/Projects-Me/LavaJS.git" }, "keywords": [ "Lavalink", "DiscordJS", "Discord", "Music", "LavaJS", "MusicBot" ], "author": "AnonymousG", "license": "MIT", "bugs": { "url": "https://github.com/Projects-Me/LavaJS/issues" }, "homepage": "https://github.com/Projects-Me/LavaJS#readme", "dependencies": { "events": "^3.1.0", "node-fetch": "^2.6.0", "ws": "^7.3.0" }, "devDependencies": { "@types/node-fetch": "^2.5.7", "@types/ws": "^7.2.5", "discord.js": "^12.2.0" } } { "id": "rno", "name": "Reno–Tahoe International Airport", "city": "Reno", "city2": "Tahoe", "state": "Nevada", "stateShort": "NV", "country": "USA", "description": "Built in 1929 as Hubbard Field, Reno–Tahoe International get its three-letter airport code from its home in *R*e*no*, Nevada.", "imageCredit": "", "imageCreditLink": "https://www.flickr.com/photos/kenlund/" }{ "directions": [ "Blend oats and chia seeds in a blender until powdery; add pineapple juice, chocolate soy milk, pineapple chunks, strawberries, and banana and blend until smooth." ], "ingredients": [ "1/4 cup rolled oats", "1 tablespoon chia seeds (optional)", "1 cup pineapple juice", "1/2 cup chocolate-flavored soy milk", "8 chunks frozen pineapple", "6 frozen whole strawberries", "1 small banana" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Banana Split Smoothie", "url": "http://allrecipes.com/recipe/222059/banana-split-smoothie/" } {"url.js":"sha256-Xq+mcykxBYPeSF7z9LPOp0dKVgMzrbc3J62jKgXpevo=","url.min.js":"sha256-gkVmQnFAqf8pWVje5HYC4wn2EMLDgn3inLab2d2O3/0="} dependents/light-core.json ["light-cdn","light-deploy","light-mail","light-mq","light-pay","light-sms","light-socket","light-wechat"]{ "name": "upload", "version": "1.0.0", "description": "", "main": "index.js", "scripts": { "local-serve": "firebase serve --only hosting", "deploy": "firebase deploy --except functions" }, "author": "", "license": "ISC" } {"componentChunkName":"component---src-pages-photographers-person-1-js","path":"/en/photographers/person1/","webpackCompilationHash":"","result":{"pageContext":{"isCreatedByStatefulCreatePages":true,"intl":{"language":"en","languages":["en","ru","be"],"messages":{"todayAuthor":"Photographer of the Day","welcome":"Welcome to the portal Photo.by,dedicated to photographers of Belarus. Here you can find information about the photographer and be inspired by his work.","purpose":"The portal was developed by students of Rolling Scopes School with the purpose of education about the culture of Belarus.","developers":"Development team","authorLifispan":"Years of life: ","authorAlivespan":"Date of Birth: ","birthPlace":"Place of Birth: ","infoAboutPortal":"Portal description","footerRights":"© 2019 Photo By - Belarusian cultural portal","footerPS":"Lovingly commissioned by The Rolling Scope School","headerSubtitleSpan":"Portal","headerSubtitle":"of photographers of Belarus","headerMainLink":"Home","headerLink":"Photographers","imageGalery":"Photo Gallery","searchName":"I'm looking for a photographer","searchCity":"in the town","searchButton":"Search","biography":"Biography of the photographer","video":"Video","more":"to learn more","listWork":"List of works","dateCreate":"Date","workCreate":" Аchivment","videoButton":"play another video","toAuthorPage":"go to photographer's page","noVideo":"There is no additional video for this photographer","name1":"","name2":"","name3":"","name4":"","name5":"","name6":"","name7":"","name8":"","caption1":"Evening","caption2":"«Village»","caption3":"«Little Church»","caption4":"«Spiders»","caption5":"«Paris: Avenue du Père Lachaise»","caption6":"«»"},"routed":true,"originalPath":"/photographers/person1/","redirect":true}}}}{ "out": [ "/bin/cl-fuse-meta-fs-lisp-launcher.sh", "/lib/common-lisp/cl-fuse-meta-fs/.mtn-ignore", "/lib/common-lisp/cl-fuse-meta-fs/COPYING", "/lib/common-lisp/cl-fuse-meta-fs/README", "/lib/common-lisp/cl-fuse-meta-fs/README.md", "/lib/common-lisp/cl-fuse-meta-fs/cl-fuse-meta-fs--system.fasl", "/lib/common-lisp/cl-fuse-meta-fs/cl-fuse-meta-fs.asd", "/lib/common-lisp/cl-fuse-meta-fs/dependencies.venv", "/lib/common-lisp/cl-fuse-meta-fs/lisp-meta-fs-test.lisp", "/lib/common-lisp/cl-fuse-meta-fs/lisp-meta-fs.fasl", "/lib/common-lisp/cl-fuse-meta-fs/lisp-meta-fs.lisp", "/lib/common-lisp-settings/cl-fuse-meta-fs-path-config.sh", "/lib/common-lisp-settings/cl-fuse-meta-fs-shell-config.sh", "/nix-support/propagated-build-inputs", "/share/doc/cl-fuse-meta-fs/COPYING", "/share/doc/cl-fuse-meta-fs/README", "/share/doc/cl-fuse-meta-fs/README.md" ] }{"title": "Stories from The Detective's Album", "description": " is best known for The Detective's Album, the longest-running early detective serial anywhere in the world. Written under the name and narrated by detective , The Detective's Album was serialized for forty years in the Australian Journal from 1868 to 1908. (Wikipedia)

\n\nThese stories were read from scans from the University of Queensland library - there is no online Etext.", "duration": 34778, "language": "English", "authors": [{"id": "6114", "name": " "}], "coverArt": "https://archive.org/download/LibrivoxCdCoverArt31/stories_detective_album_1305.jpg", "copyright_year": "1880", "genres": ["Detective Fiction"], "supporters": [{"role": "Read by", "name": ""}, {"role": "Book Coordinator", "name": ""}, {"role": "Meta Coordinator", "name": "Annise"}, {"role": "Proof Listener", "name": "ArchaDl"}], "sections": [{"section": 1, "title": "01 - The Window Among the Willows", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_01_fortune_64kb.mp3", "duration": 3439}, {"section": 2, "title": "02 - The Murder at the Creek", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_02_fortune_64kb.mp3", "duration": 3144}, {"section": 3, "title": "03 - Checkmate and Revenge", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_03_fortune_64kb.mp3", "duration": 3727}, {"section": 4, "title": "04 - Dream", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_04_fortune_64kb.mp3", "duration": 3195}, {"section": 5, "title": "05 - \"I'll be hung for you yet.\"", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_05_fortune_64kb.mp3", "duration": 4560}, {"section": 6, "title": "06 - The Fatal Cliff", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_06_fortune_64kb.mp3", "duration": 3464}, {"section": 7, "title": "07 - The Bell of Mount Battery", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_07_fortune_64kb.mp3", "duration": 3299}, {"section": 8, "title": "08 - The Blood of the Grape", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_08_fortune_64kb.mp3", "duration": 2730}, {"section": 9, "title": "09 - The Gutter Flag", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_09_fortune_64kb.mp3", "duration": 3613}, {"section": 10, "title": "10 - The Rosary of the Dead", "readers": [5831], "path": "https://www.archive.org/download/stories_detective_album_1305_librivox/detectivesalbum_10_fortune_64kb.mp3", "duration": 3607}]}10-100 { "directions": [ "Place the ground beef in a large skillet over medium high heat. Saute for 10 to 15 minutes, or until browned and crumbly; set aside.", "In a large pot over high heat, combine the potatoes with water to cover and cook for 20 minutes, or until potatoes are almost tender.", "Add the mixed vegetables, onion, cabbage, tomato sauce, reserved ground beef and ground black pepper.", "Bring to a boil, reduce heat to low and simmer for 1 1/2 to 2 hours. Season with salt to taste." ], "ingredients": [ "3 pounds ground beef", "6 potatoes, peeled and cubed", "water to cover", "4 (15 ounce) cans mixed vegetables, drained", "1 onion, chopped", "2 cups chopped cabbage", "1 (15 ounce) can tomato sauce", "2 tablespoons ground black pepper", "salt to taste" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "My Momma's Easy Homemade Veggie Soup", "url": "http://allrecipes.com/recipe/17636/my-mommas-easy-homemade-veggie-soup/" } package.json { "name": "barbershop", "version": "1.0.0", "description": "This repositorie is for a Barbershop/Hair Salon project called OnHair Barbershop in the centre of Málaga.", "main": "index.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "repository": { "type": "git", "url": "git+https://github.com/tomgreef/barbershop.git" }, "author": "OnHair Barbershop", "license": "MIT", "bugs": { "url": "https://github.com/tomgreef/barbershop/issues" }, "homepage": "https://github.com/tomgreef/barbershop#readme" } {"date":20200808,"state":"IL","positive":193998,"negative":2839936,"pending":null,"hospitalizedCurrently":1538,"hospitalizedCumulative":null,"inIcuCurrently":338,"inIcuCumulative":null,"onVentilatorCurrently":125,"onVentilatorCumulative":null,"recovered":null,"dataQualityGrade":"A","lastUpdateEt":"8/8/2020 00:00","dateModified":"2020-08-08T00:00:00Z","checkTimeEt":"08/07 20:00","death":7840,"hospitalized":null,"dateChecked":"2020-08-08T00:00:00Z","totalTestsViral":3032634,"positiveTestsViral":null,"negativeTestsViral":null,"positiveCasesViral":192698,"deathConfirmed":7631,"deathProbable":209,"totalTestEncountersViral":null,"totalTestsPeopleViral":null,"totalTestsAntibody":null,"positiveTestsAntibody":null,"negativeTestsAntibody":null,"totalTestsPeopleAntibody":null,"positiveTestsPeopleAntibody":null,"negativeTestsPeopleAntibody":null,"totalTestsPeopleAntigen":null,"positiveTestsPeopleAntigen":null,"totalTestsAntigen":null,"positiveTestsAntigen":null,"fips":"17","positiveIncrease":2190,"negativeIncrease":45826,"total":3033934,"totalTestResultsSource":"posNeg","totalTestResults":3033934,"totalTestResultsIncrease":48016,"posNeg":3033934,"deathIncrease":18,"hospitalizedIncrease":0,"hash":"68b6f490bc93a2e7559c62610ab50a3cfb70203d","commercialScore":0,"negativeRegularScore":0,"negativeScore":0,"positiveScore":0,"score":0,"grade":""} { "Browser not compatible !": "Navigateur non compatible !", "Connect to a random cam": "Se connecter à une caméra au hasard", "Connected users": "Utilisateurs connectés", "connected": "connecté", "connecting": "connextion en cours", "Copyrights 2012 by ": "© 2012 par ", "Error with cam !": "Erreur avec la caméra !", "Join/leave": "Rejoindre/quitter", "Next": "Suivant", "Nick name": "Pseudo", "Not Found": "Non trouvé", "Powered by wbp.js": "Propulsé par wbp.js", "Sorry we cannot find": "Désolé nous ne pouvons trouver", "Target cam": "Caméra cible", "Your cam": "Votre caméra", "waiting": "en attente", "You are now known as": "Vous êtes maintenant connu comme" }{ "vorgangId": "135575", "VORGANG": { "WAHLPERIODE": "13", "VORGANGSTYP": "Rechtsverordnung", "TITEL": "Verordnung zur Änderung besoldungsrechtlicher Vorschriften (Besoldungsänderungsverordnung 1998 - BesÄndV 98) (G-SIG: 13022090)", "INITIATIVE": "Bundesregierung", "AKTUELLER_STAND": "Abgeschlossen - Ergebnis siehe Vorgangsablauf", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "WICHTIGE_DRUCKSACHE": { "DRS_HERAUSGEBER": "BR", "DRS_NUMMER": "187/98", "DRS_TYP": "Verordnung" }, "PLENUM": { "PLPR_KLARTEXT": "BR-Sitzung", "PLPR_HERAUSGEBER": "BR", "PLPR_NUMMER": "725", "PLPR_SEITEN": "247A - 247B", "PLPR_LINK": "http://dipbt.bundestag.de:80/dip21/brp/725.pdf#P.247" }, "EU_DOK_NR": "", "VERKUENDUNG": "Verordnung vom 17.06.1998 - Bundesgesetzblatt Teil I 1998 Nr.3724.06.1998 S. 1378 https://www.bgbl.de/xaver/bgbl/start.xav?startbk=Bundesanzeiger_BGBl&start=//*[@attr_id='bgbl198s1378.pdf']", "SCHLAGWORT": [ { "_fundstelle": "true", "__cdata": "Besoldung" }, "Erschwerniszulagenverordnung" ], "ABSTRAKT": "Aufhebung der Verordnung zur vorläufigen Regelung von Erschwerniszulagen in besonderen Fällen vom 22.03.1974; Änderung der Erschwerniszulagenverordnung vom 13.03.1992, der Verordnung über die Gewährung von Mehrarbeitsvergütung für Beamte vom 13.02.1992, der Vollstreckungsvergütungsverordnung vom 08.07.1976, der Anwärtersonderzuschlags-Verordnung vom 11.06.1990 und der Zweiten Besoldungsübergangsverordnung vom 27.11.1997; Umsetzung des Versorgungsberichts der BRg (BT-Drs 13/5840) auf der Grundlage der von der BRg am 18.06.1997 beschlossenen Eckpunkte " }, "VORGANGSABLAUF": { "VORGANGSPOSITION": [ { "ZUORDNUNG": "BR", "URHEBER": "Verordnung, Urheber : Bundesregierung, Bundesministerium des Innern (federführend)", "FUNDSTELLE": "20.02.1998 - BR-Drucksache 187/98", "ZUWEISUNG": [ { "AUSSCHUSS_KLARTEXT": "Ausschuss für Innere Angelegenheiten", "FEDERFUEHRUNG": "federführend" }, { "AUSSCHUSS_KLARTEXT": "Finanzausschuss" } ] }, { "ZUORDNUNG": "BR", "URHEBER": "Empfehlungen der Ausschüsse, Urheber : Ausschuss für Innere Angelegenheiten, Finanzausschuss ", "FUNDSTELLE": "28.04.1998 - BR-Drucksache 187/1/98", "VP_ABSTRAKT": "Zustimmung/Änderungen" }, { "ZUORDNUNG": "BR", "URHEBER": "Plenarantrag, Urheber : Hamburg ", "FUNDSTELLE": "06.05.1998 - BR-Drucksache 187/2/98", "VP_ABSTRAKT": "Änderungen" }, { "ZUORDNUNG": "BR", "URHEBER": "BR-Sitzung", "FUNDSTELLE": "08.05.1998 - BR-Plenarprotokoll 725, S. 247A - 247B", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/brp/725.pdf#P.247", "BESCHLUSS": { "BESCHLUSSSEITE": "247B", "BESCHLUSSTENOR": "Zustimmung/Änderungen - gem. Art.80 Abs.2 GG" } }, { "ZUORDNUNG": "BR", "URHEBER": "Beschlussdrucksache, Urheber : Bundesrat ", "FUNDSTELLE": "08.05.1998 - BR-Drucksache 187/98(B)" } ] } } {"leaflet-src.js":","leaflet.css":","leaflet.js":"}{ "id": 45654, "info": { "name": "GlassFox - dimmer on/off", "description": "Use this together with the theme GlassFox http://userstyles.org/styles/45089/glassfox-a-clean-theme", "additionalInfo": "By enable/disable this style you can turn on extra \"light\" on the inactive tabs to make them more visible.\r\n\r\nCan be useful in combination with GlassFox http://userstyles.org/styles/45089/glassfox-a-clean-theme \r\n\r\nTry also GlassFox - change text color http://userstyles.org/styles/45746/glassfox-change-text-color\r\nand GlassFox - stamped tabs http://userstyles.org/styles/46157/glassfox-stamped-tabs?r=1302031937", "format": "uso", "category": "browser", "createdAt": "2011-03-26T01:49:12.000Z", "updatedAt": "2014-10-11T07:38:30.000Z", "license": "CC-BY-NC-SA-4.0", "author": { "id": 84197, "name": "SuperPutte" } }, "stats": { "installs": { "total": 863, "weekly": 0 } }, "screenshots": { "main": { "name": "45654_after.jpeg", "archived": false } }, "discussions": { "stats": { "discussionsCount": 0, "commentsCount": 0 }, "data": [] }, "style": { "css": "@namespace url(http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul);\r\n\r\n/* Used together with my theme GlassFox you can, by enable/disable this style, switch between more or less \"light-effect\" on your inactive tabs. */\r\n\r\n.tabbrowser-tab:not([pinned=\"true\"]){\r\nbackground-image: -moz-linear-gradient(top, rgba(255,255,255,0) 20%, rgba(255,255,255,0.65)) !important;\r\n}\r\n\r\n.tabbrowser-tab[selected=\"true\"], .tabbrowser-tab:hover{\r\n background-image: -moz-linear-gradient(rgba(0,0,0,0.05), rgba(255,255,255,.0) 95%),\r\n -moz-linear-gradient(rgba(0,255,255,0.05), rgba(255,255,255,.0) 15%) !important;\r\n}" } }{"PREVALENCE_BY_GENDER_AGE_YEAR":{"TRELLIS_NAME":"60-69","SERIES_NAME":"MALE","X_CALENDAR_YEAR":2017,"Y_PREVALENCE_1000PP":0.03438},"PREVALENCE_BY_MONTH":{"X_CALENDAR_MONTH":[],"Y_PREVALENCE_1000PP":[]},"OBS_FREQUENCY_DISTRIBUTION":{"Y_NUM_PERSONS":0,"X_COUNT":1},"OBSERVATIONS_BY_TYPE":{"CONCEPT_NAME":"Observation recorded from EHR","COUNT_VALUE":63},"AGE_AT_FIRST_OCCURRENCE":{"CATEGORY":["FEMALE","MALE"],"MIN_VALUE":[39,52],"P10_VALUE":[39,56],"P25_VALUE":[45,60],"MEDIAN_VALUE":[49,64],"P75_VALUE":[72,69],"P90_VALUE":[87,71],"MAX_VALUE":[87,73]}} 10-100 { "name": "hyper-omni-theme", "version": "1.0.0", "main": "index.js", "homepage": "https://github.com/getomni/hyper#readme", "description": "Omni theme for Hyper", "repository": { "type": "git", "url": "https://github.com/getomni/hyper.git" }, "keywords": [ "hyperterm", "hyper", "hyper.app", "hyper omni", "hyper theme", "omni-theme" ], "author": " <>", "license": "MIT" } docs/asset-manifest.json { "files": { "main.js": "/weekly25/static/js/main.ac5a1b9c.chunk.js", "main.js.map": "/weekly25/static/js/main.ac5a1b9c.chunk.js.map", "runtime-main.js": "/weekly25/static/js/runtime-main.29298dbb.js", "runtime-main.js.map": "/weekly25/static/js/runtime-main.29298dbb.js.map", "static/js/2.0e1a0b37.chunk.js": "/weekly25/static/js/2.0e1a0b37.chunk.js", "static/js/2.0e1a0b37.chunk.js.map": "/weekly25/static/js/2.0e1a0b37.chunk.js.map", "index.html": "/weekly25/index.html", "static/js/2.0e1a0b37.chunk.js.LICENSE.txt": "/weekly25/static/js/2.0e1a0b37.chunk.js.LICENSE.txt" }, "entrypoints": [ "static/js/runtime-main.29298dbb.js", "static/js/2.0e1a0b37.chunk.js", "static/js/main.ac5a1b9c.chunk.js" ] }{ "id": 7986, "source": "calvin", "verse_id": 18883, "verse_count": 1, "reference": "63:16", "title": "", "html": "

Surely thou art our Father. <\/i> God permits us to reveal our hearts familiarly before him; for prayer is nothing else than the opening up of our heart before God; as the greatest alleviation is, to pour our cares, distresses, and anxieties into his bosom. \u201cRoll thy cares on the Lord,\u201d says David. (Psalms 37:5<\/a>.) After having enumerated God\u2019s benefits, from which his goodness and power are clearly seen, so that it is evident that it is nothing else than the sins of men that hinder them from feeling it as formerly, he returns to this consideration, that the goodness of God is nevertheless so great as to exceed the wickedness of men. He calls God a Father in the name of the Church; for all cannot call him thus, but it is the peculiar privilege of the Church to address him by a father\u2019s name. Hence it ought to be inferred that Christ, as the first-born, or rather the only-begotten Son of God, always governed his Church; for in no other way than through him can God be called Father. And here we again see that believers do not contend with God, but draw an argument from his nature, that, by conquering temptation, they may strive to cherish good hope. <\/p> \n

Though Abraham do not know us. <\/i> Here a question arises, Why does he say that the patriarch does not know the people? Jerome thinks that this is done because they were degenerated, and therefore were unworthy of so high an honor; but that interpretation appears to me to be exceedingly unnatural. The true meaning is, \u201cThough our fathers deny us, yet God will reckon us as children, and will act toward us as a Father.\u201d <\/p> \n

They who say that Abraham and other believers care no more about the affairs of men, torture by excessive ingenuity the words of the Prophet. I do not speak of the fact itself, but I say that those words do not prove that the saints have no care about us. The natural and true meaning is, \u201cO Lord, that thou art our Father will be so sure and so firmly established, that even though all parentage and all relationship should cease among men, yet thou wilt not fail to be our Father. Sooner shall the rights of nature perish than thou shalt not act toward us as a Father, or the sacred adoption shall be infringed, which was founded on thy unchangeable decree, and ratified by the death of thine only-begotten Son.\u201d <\/p> \n

Yet we may infer from this that holy men present themselves before God, and pray to him, in such a manner as not to look at any intercessions of others; for they are commanded to pray so as to rely on God\u2019s fatherly kindness, and to lay aside every other confidence. And if the Prophet did not instruct the Jews, in order that God might listen to them, to turn their mind to Abraham and Jacob, to whom promises so numerous and so great had been given, assuredly much less ought we to resort, to Peter, and Paul, and others; for this is not a private prayer offered by a single individual or by a few persons, but the public and universal prayer of the whole Church, as if the Prophet laid down a general form. Besides, our confidence ought to be founded on God\u2019s favor and kindness as a Father, so as to shut our eyes on all the intercessions of men, whether living or dead. In a word, believers profess that they do not gaze around in all directions, but rely on God alone. <\/p> \n

It comes now to a question, Why did he pass by Isaac and mention in a special manner Abraham and Jacob? The reason is, that with those two persons the covenant was more solemnly ratified. Isaac was, indeed, a partaker of the covenant, but did not receive promises so large and so numerous. <\/p> \n

Our Redeemer. <\/i> Redemption is here described as a testimony of that adoption; for by this proof God manifested himself to be the Father of the people; and therefore boldly and confidently do believers call on God as their Father, because he gave a remarkable testimony of his fatherly kindness toward them, which encouraged them to confidence. But redemption alone would, not have been enough, if a promise had not likewise been added; and therefore, as he once redeemed them, he promised that he would always be their Father. <\/p> \n

From everlasting is thy name. <\/i> By the word \u201ceverlasting\u201d is pointed out the stability and continuance of his fatherly name, for we did not deserve the name of children; but his will, by which he once adopted us to be children, is unchangeable. Since, therefore, the Lord has an eternal name, it follows that the title and favor which are connected with that eternity and flow from it, shall be durable and eternal. <\/p> ", "audit": null }0 { "citations" : [ { "textCitation" : "[See pm2.01d on Metamath](http://us.metamath.org/mpegif/pm2.01d.html)" } ], "names" : [ "pm2.01d" ], "language" : "METAMATH_SET_MM", "lookupTerms" : [ "#T_wph", "#T_wi", "#T_wps", "#T_wi", "#T_wn", "#T_wps", "#T_wph", "#T_wi", "#T_wn", "#T_wps" ], "metaLanguage" : "METAMATH", "remarks" : " Deduction based on reductio ad absurdum. (Contributed by NM, 18-Aug-1993.) (Proof shortened by , 5-Mar-2013.) \n\n---\n\n Deduction based on reductio ad absurdum. (Contributed by NM, 18-Aug-1993.) (Revised by , 31-Jan-2015.) ", "statement" : "pm2.01d.1 $e |- ( ph -> ( ps -> -. ps ) ) $.\npm2.01d $p |- ( ph -> -. ps ) $." }Dataforsyningen/veje.aws.dk1-10 { "name": "veje.aws.dk", "version": "1.0.0", "description": "Find en navngiven vej på et kort", "main": "index.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1", "env": "env", "lint": "jshint index.js", "devbuild": "npm install && cd public && npm run devbuild", "build": "npm install --production && cd public && npm install --production" }, "repository": { "type": "git", "url": "git+https://github.com/DanmarksAdresser/veje.aws.dk.git" }, "author": "", "license": "MIT", "bugs": { "url": "https://github.com/DanmarksAdresser/veje.aws.dk/issues" }, "homepage": "https://github.com/DanmarksAdresser/veje.aws.dk#readme", "dependencies": { "express": "^4.15.4", "kf-getticket": "^1.0.0", "request": "^2.81.0", "request-promise": "^4.2.1" }, "devDependencies": { "jshint": "^2.9.5" } } FranciscoFornell/Feature-Yourself { "PROFILES": "Perfiles", "CREATE_PROFILE": "Crear nuevo perfil", "VIEW_PROFILE": "Ver perfil", "PROFILE_UPDATE_SUCCESS": "Perfil actualizado correctamente" }{ "id": 86276, "name": "Tumblr Animated Little Busters Komari Home Button", "description": "Bored again, so I made an animated Komari home button. I wanted a Kud home button but couldn't find any animated sprites for her.", "user": { "id": 185822, "name": "kiteku", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": null }, "updated": "2013-04-19T14:43:16.000Z", "weekly_install_count": 0, "total_install_count": 207, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/86276_after.gif?r=1615104373", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": null, "license": null, "created": "2013-04-19T14:43:16.000Z", "category": "site", "raw_subcategory": "tumblr", "subcategory": "tumblr", "additional_info": null, "style_tags": [], "css": "@namespace url(http://www.w3.org/1999/xhtml);\r\n\r\n\r\n\r\n@-moz-document url-prefix('http://www.tumblr.com/'), url-prefix('https://www.tumblr.com/') {\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#home_button {\r\n\r\n\r\n\r\n padding-right: 17px !important;\r\n\r\n\r\n\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#home_button a {\r\n\r\n\r\n\r\n height: 0 !important;\r\n\r\n\r\n\r\n width: 0 !important;\r\n\r\n\r\n\r\n top: -2px !important;\r\n\r\n\r\n\r\n padding-left: 31px !important;\r\n\r\n\r\n\r\n padding-top: 43px !important;\r\n\r\n\r\n\r\n background: url('http://s24.postimg.org/lejdtbf3l/avatar96005_17.gif') !important;\r\n\r\n\r\n\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#home_button .tab_notice {\r\n\r\n\r\n\r\n left: 45px !important;\r\n\r\n\r\n\r\n right: auto !important;\r\n\r\n\r\n\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n}\r\n", "discussions": [ { "id": 35450, "name": " it", "rating": 3, "created": "2013-04-20T13:41:53.000Z", "author_name": "Pacolind", "author_id": 185992 } ], "discussionsCount": 1, "commentsCount": 1, "userjs_url": "/styles/userjs/86276/tumblr-animated-little-busters-komari-home-button.user.js", "style_settings": [] }{ "name": "tv-scheduler", "version": "0.0.1", "description": "A web based recording scheduler for T.V. tuners", "author": { "name": "Aston" }, "repository": { "type": "git", "url": "git://github.com/astondg/tv-scheduler.git" }, "devDependencies": { "grunt": "^0.4.5", "grunt-contrib-concat": "^0.5.1", "grunt-contrib-copy": "^0.8.0", "grunt-contrib-uglify": "^0.9.1", "grunt-contrib-watch": "^0.6.1" } } Aleks0509111/Server "Beau travail ! D'autres viendront, c'est certain, mais au moins ils réfléchiront à deux fois avant de piller l'endroit."{ "user": "THEjoezack", "repos": 1, "login": "THEjoezack", "id": 81006, "avatar_url": "https://avatars3.githubusercontent.com/u/81006?v=3", "url": "https://api.github.com/users/THEjoezack", "html_url": "https://github.com/THEjoezack", "followers_url": "https://api.github.com/users/THEjoezack/followers", "following_url": "https://api.github.com/users/THEjoezack/following{/other_user}", "gists_url": "https://api.github.com/users/THEjoezack/gists{/gist_id}", "starred_url": "https://api.github.com/users/THEjoezack/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/THEjoezack/subscriptions", "organizations_url": "https://api.github.com/users/THEjoezack/orgs", "repos_url": "https://api.github.com/users/THEjoezack/repos", "events_url": "https://api.github.com/users/THEjoezack/events{/privacy}", "received_events_url": "https://api.github.com/users/THEjoezack/received_events", "type": "User", "site_admin": false, "name": "", "company": null, "blog": "http://joezack.com", "location": "Central Florida", "email": "", "hireable": null, "bio": null, "public_repos": 33, "public_gists": 1, "followers": 31, "following": 9, "created_at": "2009-05-05T01:27:26Z", "updated_at": "2017-02-05T11:07:32Z" }100-1000 {"nom":"Antran","circ":"4ème circonscription","dpt":"Vienne","inscrits":933,"abs":435,"votants":498,"blancs":0,"nuls":12,"exp":486,"res":[{"nuance":"MDM","nom":"","voix":152},{"nuance":"UDI","nom":"Mme ","voix":87},{"nuance":"ECO","nom":"Mme ","voix":74},{"nuance":"FN","nom":"M. ","voix":71},{"nuance":"FI","nom":"Mme ","voix":48},{"nuance":"COM","nom":"Mme ","voix":21},{"nuance":"ECO","nom":"M. ","voix":12},{"nuance":"DIV","nom":"M. ","voix":8},{"nuance":"EXG","nom":"M. ","voix":6},{"nuance":"DIV","nom":"Mme ","voix":4},{"nuance":"DIV","nom":"Mme ","voix":3},{"nuance":"EXD","nom":"M. ","voix":0}]}[{"pk": 1, "model": "enc.node", "fields": {"excluded_groups": [], "hostname": "testnode", "description": "testnode_description", "groups": []}}, {"pk": 1, "model": "enc.nodeclass", "fields": {"node": 1, "classname": "barclass", "classparams": null}}, {"pk": 2, "model": "enc.nodeparameter", "fields": {"node": 1, "paramkey": "foo_param", "paramvalue": "{\"foo\":\"bar\"}"}}]{"0":{"shardId":7,"payload":{"t":"GUILD_CREATE","s":199,"op":0,"d":{"rules_channel_id":null,"features":[],"lazy":true,"discovery_splash":null,"name":"NNKCCOJDR","mfa_level":0,"description":null,"emojis":[],"default_message_notifications":0,"owner_id":"895136766541815613","vanity_url_code":null,"system_channel_id":"891447114716143193","banner":null,"premium_subscription_count":0,"splash":null,"region":"qondjqn","embedded_activities":[],"joined_at":"2021-10-10T08:16:06.728Z","channels":[{"type":4,"position":0,"permission_overwrites":[],"name":"","id":"885867777277419728"},{"type":0,"topic":null,"rate_limit_per_user":0,"position":0,"permission_overwrites":[],"parent_id":"239265262563126963","name":"bhcfxzk","last_message_id":"286015079188259411","id":"568103154909613914"},{"type":4,"position":0,"permission_overwrites":[],"name":"Dhnwb Jouufwku","id":"440153742219409935"},{"user_limit":0,"type":2,"rtc_region":null,"position":0,"permission_overwrites":[],"parent_id":"359852899109935110","name":"Xudwsds","id":"141564144306800481","bitrate":64000}],"presences":[],"large":false,"premium_tier":0,"system_channel_flags":0,"explicit_content_filter":0,"roles":[{"position":0,"permissions":"9052420954480","name":"toxxmxyvl","mentionable":false,"managed":false,"id":"511849453496904673","hoist":false,"color":0},{"tags":{"bot_id":"316113679413923786"},"position":1,"permissions":"7046112318","name":"K5G6A","mentionable":false,"managed":true,"id":"182697208631507618","hoist":false,"color":0}],"nsfw_level":0,"max_members":250000,"threads":[],"application_command_count":7,"public_updates_channel_id":null,"preferred_locale":"ezlZH","stage_instances":[],"application_id":null,"icon":"eae5905ad2d18d7c8deca20478b088b5","unavailable":false,"id":"910044030245434226","premium_progress_bar_enabled":false,"verification_level":0,"afk_timeout":300,"afk_channel_id":null,"nsfw":false,"stickers":[],"max_video_channel_users":25,"members":[{"user":{"username":"Zvsqn","public_flags":65536,"id":"175382683978305711","discriminator":"2688","bot":true,"avatar":"eae5905ad2d18d7c8deca20478b088b5"},"roles":["364817118471899589"],"mute":false,"joined_at":"2021-10-10T08:16:06.728Z","hoisted_role":null,"deaf":false}],"voice_states":[],"member_count":4,"application_command_counts":{"1":7,"2":0,"3":0},"guild_hashes":{"version":1,"roles":{"omitted":false,"hash":"leyvZluwyrX"},"metadata":{"omitted":false,"hash":"sfCBKgBI6oD"},"channels":{"omitted":false,"hash":"IQNqOh6fmNh"}},"guild_scheduled_events":[]}}},"id":"7768"}0 {"name":"right","subject":1005,"date":"612010-045940","paths":{"Pen":{"strokes":[{"x":-1287,"y":83,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":-1301,"y":77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":-1301,"y":77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":-1285,"y":81,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":-1265,"y":77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":-1224,"y":82,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":-1178,"y":81,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":-1111,"y":85,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":-1037,"y":83,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":-938,"y":86,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":-838,"y":77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":-715,"y":77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":-598,"y":66,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":-465,"y":62,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":-339,"y":52,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":-204,"y":46,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":-80,"y":41,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":42,"y":40,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":147,"y":41,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":241,"y":46,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":325,"y":53,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":390,"y":62,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":442,"y":70,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":477,"y":76,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":501,"y":83,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":511,"y":84,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":511,"y":84,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":511,"y":84,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":511,"y":84,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":494,"y":76,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0},{"x":475,"y":72,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":30,"stroke_id":0},{"x":461,"y":73,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":31,"stroke_id":0},{"x":442,"y":73,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":32,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Lenovo X61 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}10-100 { "directions": [ "Arrange roast beef slices onto the flour tortilla; top with lettuce leaves, red bell pepper leaves, blue cheese, and blue cheese salad dressing. Roll tortilla around ingredients." ], "ingredients": [ "4 ounces thinly sliced deli roast beef", "1 (8 inch) flour tortilla", "2 romaine lettuce leaves", "1/4 cup red bell pepper strips", "2 tablespoons crumbled blue cheese", "2 tablespoons blue cheese salad dressing, or to taste" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Beef and Blue Cheese Wrap", "url": "http://allrecipes.com/recipe/242009/beef-and-blue-cheese-wrap/" } maps/gsiv/rooms/27437.json { "id": 27437, "title": [ "[Carnival, Visibly Vicious]" ], "description": [ "The accessible area here is limited, surrounded on all sides by wire fences, save for one narrow gap occupied by a loose-swinging door. Behind the fences are large animal enclosures, each containing various, though odd, inhabitants." ], "paths": [ "Obvious paths: none" ], "location": "Wehnimer's Landing", "wayto": { "27436": "go door" }, "timeto": { "27436": 0.2 } }[ { "appId": "491530", "title": "Notruf 112", "timestamp": 1535208271, "rating": "Silver", "notes": "Some missing textures (players / some parts of vehicles) but working", "os": "Manjaro Stable 64-bit (Kernel 4.18.3)", "gpuDriver": "NVIDIA 396.51", "specs": "FX-4300 / GTX 1050 Ti", "protonVersion": "Default" }, { "appId": "491530", "cpu": "AMD A8-3870 APU with Radeon(tm) HD Graphics", "duration": "aboutAnHour", "gpu": "NVIDIA Corporation GeForce GTS 450/PCIe/SSE2", "gpuDriver": "4.6.0 NVIDIA 390.87", "kernel": "4.15.0-34-generic", "notes": "Missing doors and other textures, including player and NPC characters, making it hard to play the game. ", "os": "Linux Mint 19 Tara (64 bit)", "protonVersion": "Default", "ram": "9467 Mb", "rating": "Bronze", "timestamp": 1538402071, "title": "Notruf 112 | Emergency Call 112" } ]1-10 { "Description": "You mimic a target that acted just before you. When activating this move, you use the identical move of the creature whose turn came immediately before yours.", "Duration": "Instantaneous", "Move Time": "1 action", "PP": 10, "Range": "Self", "Type": "Normal" }0 { "twitter": "skinnyfoetusboy", "fr": { "bio": { "short": "Développeur front-end chez Altima°", "long": [ "Développeur front-end chez Altima°. ", "Faux hipster aux obédiences incertaines." ] } }, "en": { "bio": { "short": "Front-end developer @ Altima°", "long": [ "Front-end developer @ Altima°. ", "Falsely hipsterish with no known allegiances." ] } } } { "id": 14708, "name": "", "incomplete": false, "members": false, "tradeable": true, "tradeable_on_ge": false, "stackable": false, "stacked": null, "noted": false, "noteable": false, "linked_id_item": 1355, "linked_id_noted": null, "linked_id_placeholder": null, "placeholder": true, "equipable": false, "equipable_by_player": false, "equipable_weapon": false, "cost": 1, "lowalch": null, "highalch": null, "weight": 1.133, "buy_limit": null, "quest_item": false, "release_date": "2001-01-04", "duplicate": true, "examine": "A powerful axe.", "icon": "PHY", "wiki_name": "Mithril axe", "wiki_url": "https://oldschool.runescape.wiki/w/Mithril_axe", "wiki_exchange": null, "equipment": null, "weapon": null }hensm/reloader1-10 { "page_action_idle_title": { "message": "Huidige pagina vernieuw ($1)" } , "page_action_busy_title": { "message": "Het laden van deze pagina stoppen ($1)" } , "page_action_context_normal_reload_title": { "message": "Normaal opnieuw laden" } , "page_action_context_hard_reload_title": { "message": "Geforceerd opnieuw laden" } , "page_action_context_empty_cache_and_hard_reload_title": { "message": "Cache wissen en geforceerd opnieuw laden" } } DOREMUS-ANR/recommender http://data.doremus.org/artist/ed7eb4d4-1e91-3a02-8e07-cbf857031127 http://data.doremus.org/artist/4802a043-23bb-3b8d-a443-4a3bd22ccc63 http://data.doremus.org/artist/b82c0771-5280-39af-ad2e-8ace2f4ebda3 http://data.doremus.org/artist/e432aa13-207f-34f8-9792-d1d15fcd4711 http://data.doremus.org/artist/a4e8f64d-3411-3eb4-8e09-59e24bc26bf3 http://data.doremus.org/artist/bb190de9-54d7-3e08-8ec0-d02fc701573f http://data.doremus.org/artist/45356ff6-c1a1-33e1-a4af-e005e6362351bootbox.js/2.1.1.json {"bootbox.js":","bootbox.min.js":"}0 {"expireTime":9007200878798617000,"key":"transformer-remark-markdown-ast-4e93b4d04ba21e3699ddb860fb92e05c-gatsby-remark-external-linksgatsby-remark-imagesgatsby-remark-code-titlesgatsby-remark-prismjs-","val":{"type":"root","children":[{"type":"paragraph","children":[{"type":"text","value":"When I arrive in the company, the website was dark and doesn't have a integrated ecommerce. The Website has a responsive version with many landing pages for main exams, like ","position":{"start":{"line":2,"column":1,"offset":1},"end":{"line":2,"column":175,"offset":175}}},{"type":"link","title":null,"url":"https://www.genomika.com.br/covid19/","children":[{"type":"text","value":"covid19 tests","position":{"start":{"line":2,"column":176,"offset":176},"end":{"line":2,"column":189,"offset":189}}}],"position":{"start":{"line":2,"column":175,"offset":175},"end":{"line":2,"column":228,"offset":228}},"data":{"hProperties":{"target":"_blank","rel":"nofollow noopener noreferrer"}}},{"type":"text","value":", ","position":{"start":{"line":2,"column":228,"offset":228},"end":{"line":2,"column":230,"offset":230}}},{"type":"link","title":null,"url":"https://www.genomika.com.br/testes-para-risco-hereditario-de-cancer/","children":[{"type":"text","value":"hereditary cancer","position":{"start":{"line":2,"column":231,"offset":231},"end":{"line":2,"column":248,"offset":248}}}],"position":{"start":{"line":2,"column":230,"offset":230},"end":{"line":2,"column":319,"offset":319}},"data":{"hProperties":{"target":"_blank","rel":"nofollow noopener noreferrer"}}},{"type":"text","value":" and others. Now is possible the user buy a genetic test by the ecommerce integrated into the website.","position":{"start":{"line":2,"column":319,"offset":319},"end":{"line":2,"column":421,"offset":421}}}],"position":{"start":{"line":2,"column":1,"offset":1},"end":{"line":2,"column":421,"offset":421}}},{"type":"paragraph","children":[{"type":"text","value":"I interviewed employees, doctors and give some users informations from Hotjar. Benchmarks and competitor analysis was done frequently. I prototyped all screens at Adobe XD. I comunicated the user and company needs to the programmer and managed the activities in Scrum Sprints of a week. For the front-end, I worked with HTML5, CSS3 and jQuery in Django templates.","position":{"start":{"line":4,"column":1,"offset":423},"end":{"line":4,"column":364,"offset":786}}}],"position":{"start":{"line":4,"column":1,"offset":423},"end":{"line":4,"column":364,"offset":786}}}],"position":{"start":{"line":1,"column":1,"offset":0},"end":{"line":4,"column":364,"offset":786}}}}{"resourceType":"ValueSet","id":"bundle-type","extension":[{"url":"http://hl7.org/fhir/StructureDefinition/valueset-oid","valueUri":"urn:oid:2.16.840.1.113883.4.642.2.327"}],"url":"http://hl7.org/fhir/ValueSet/bundle-type","version":"1.4.0","name":"BundleType","status":"draft","experimental":false,"publisher":"HL7 (FHIR Project)","contact":[{"telecom":[{"system":"other","value":"http://hl7.org/fhir"},{"system":"email","value":""}]}],"date":"2016-03-31T08:01:25+11:00","description":"Indicates the purpose of a bundle - how it was intended to be used.","compose":{"include":[{"system":"http://hl7.org/fhir/bundle-type"}]},"expansion":{"identifier":"urn:uuid:38e53a34-449a-4e33-9ee9-695819297d43","timestamp":"2016-03-30T06:59:59+11:00","contains":[{"system":"http://hl7.org/fhir/bundle-type","code":"document","display":"Document"},{"system":"http://hl7.org/fhir/bundle-type","code":"message","display":"Message"},{"system":"http://hl7.org/fhir/bundle-type","code":"transaction","display":"Transaction"},{"system":"http://hl7.org/fhir/bundle-type","code":"transaction-response","display":"Transaction Response"},{"system":"http://hl7.org/fhir/bundle-type","code":"batch","display":"Batch"},{"system":"http://hl7.org/fhir/bundle-type","code":"batch-response","display":"Batch Response"},{"system":"http://hl7.org/fhir/bundle-type","code":"history","display":"History List"},{"system":"http://hl7.org/fhir/bundle-type","code":"searchset","display":"Search Results"},{"system":"http://hl7.org/fhir/bundle-type","code":"collection","display":"Collection"}]}}gabriellewp/vegapackages/vega/test/specs-valid.json [ "airports", "arc", "arc-diagram", "area", "autosize-fit", "autosize-fit-x", "autosize-fit-y", "bar", "bar-hover-label", "bar-rangestep", "bar-time", "barley", "budget-forecasts", "chart", "chart-rangestep", "choropleth", "contour-map", "contour-scatter", "corner-radius", "crossfilter", "crossfilter-multi", "density", "dimpvis", "dot-plot", "driving", "dynamic-format", "dynamic-url", "error", "falkensee", "flush-axis-labels", "font-size-steps", "force-network", "force-beeswarm", "gapminder", "gradient", "grouped-bar", "heatmap", "heatmap-image", "heatmap-lines", "heatmap-sinusoids", "histogram", "hops", "horizon", "images", "images-inline", "isocontour-airports", "isocontour-precipitation", "isocontour-volcano", "jobs", "kde", "layout-facet", "layout-hconcat", "layout-vconcat", "layout-splom", "layout-wrap", "legends", "legends-continuous", "legends-discrete", "legends-ordinal", "legends-symbol", "lifelines", "map", "map-area-compare", "map-bind", "map-fit", "map-point-radius", "matrix-reorder", "movies-sort", "nested-plot", "nulls-histogram", "nulls-scatter-plot", "overview-detail", "panzoom", "parallel-coords", "playfair", "population", "quantile-dot-plot", "quantile-quantile-plot", "regression", "scales-bin", "scales-discretize", "scatter-brush-filter", "scatter-brush-panzoom", "scatter-plot", "scatter-plot-contours", "scatter-plot-guides", "scatter-plot-heatmap", "shift-select", "splom-inner", "splom-outer", "stacked-area", "stacked-bar", "stocks-index", "symbol-angle", "text-multiline", "titles", "tree-cluster", "tree-nest", "tree-radial", "tree-radial-bundle", "treemap", "violin-plot", "weather", "window", "wordcloud" ]{"cpexcel.js":","jszip.js":","xlsx.core.min.js":","xlsx.full.min.js":","xlsx.js":","xlsx.min.js":"}{ "schema_version": "1.2.0", "id": "GHSA-mfhm-pr46-c4c3", "modified": "2022-05-17T00:01:26Z", "published": "2022-05-17T00:01:26Z", "aliases": [ "CVE-2022-1349" ], "details": "The WPQA Builder Plugin WordPress plugin before 5.2, used as a companion plugin for the Discy and Himer , does not validate that the value passed to the image_id parameter of the ajax action wpqa_remove_image belongs to the requesting user, allowing any users (with privileges as low as Subscriber) to delete the profile pictures of any other user.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2022-1349" }, { "type": "WEB", "url": "https://wpscan.com/vulnerability/7ee95a53-5fe9-404c-a77a-d1218265e4aa" } ], "database_specific": { "cwe_ids": [ "CWE-287" ], "severity": null, "github_reviewed": false } }{"dependencies":[{"name":"/home/msdimos/桌面/workspace/doxjs/tsconfig.json","includedInParent":true,"mtime":1518708038618},{"name":"./index","loc":{"line":3,"column":22}}],"generated":{"js":"\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar index_1 = require(\"./index\");\nvar actions = {\n inc: function (store, value) {\n if (value === void 0) { value = 1; }\n store.value += value;\n },\n dec: function (store, value) {\n if (value === void 0) { value = 1; }\n store.value -= value;\n }\n};\nvar listeners = {\n display: function (store) {\n console.log(store.value);\n console.log(this);\n }\n};\nvar dox = new index_1.DoxJS({\n value: 0\n});\ndox.bindActions(actions);\ndox.bindListeners(listeners);\ndox.subscribe(\"display\", false, {\n word: \"Hello wolrd!\"\n});\ndox.dispatch(\"inc\", 11);\ndox.dispatch(\"dec\", 3);\n","map":{"version":3,"file":"test.js","sourceRoot":"","sources":["src/test.ts"],"names":[],"mappings":";;AAAA,iCAAgC;AAGhC,IAAM,OAAO,GAAG;IACZ,GAAG,EAAE,UAAC,KAAK,EAAE,KAAS;QAAT,sBAAA,EAAA,SAAS;QAClB,KAAK,CAAC,KAAK,IAAI,KAAK,CAAA;IACxB,CAAC;IACD,GAAG,EAAE,UAAC,KAAK,EAAE,KAAS;QAAT,sBAAA,EAAA,SAAS;QAClB,KAAK,CAAC,KAAK,IAAI,KAAK,CAAC;IACzB,CAAC;CACJ,CAAA;AAED,IAAM,SAAS,GAAG;IACd,OAAO,EAAE,UAAS,KAAK;QACnB,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC;QACzB,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;IACtB,CAAC;CACJ,CAAA;AAED,IAAI,GAAG,GAAG,IAAI,aAAK,CAAC;IAChB,KAAK,EAAE,CAAC;CACX,CAAC,CAAC;AAEH,GAAG,CAAC,WAAW,CAAC,OAAO,CAAC,CAAC;AACzB,GAAG,CAAC,aAAa,CAAC,SAAS,CAAC,CAAC;AAE7B,GAAG,CAAC,SAAS,CAAC,SAAS,EAAE,KAAK,EAAE;IAC5B,IAAI,EAAE,cAAc;CACvB,CAAC,CAAC;AAEH,GAAG,CAAC,QAAQ,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC;AACxB,GAAG,CAAC,QAAQ,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC","sourcesContent":["import { DoxJS } from './index';\n\n\nconst actions = {\n inc: (store, value = 1) => {\n store.value += value\n },\n dec: (store, value = 1) => {\n store.value -= value;\n }\n}\n\nconst listeners = {\n display: function(store) {\n console.log(store.value);\n console.log(this);\n }\n}\n\nlet dox = new DoxJS({\n value: 0\n});\n\ndox.bindActions(actions);\ndox.bindListeners(listeners);\n\ndox.subscribe(\"display\", false, {\n word: \"Hello wolrd!\"\n});\n\ndox.dispatch(\"inc\", 11);\ndox.dispatch(\"dec\", 3);\n"]}},"hash":"9442b72a80789e75cc0c0add7258bc40","cacheData":{"env":{}}}swapnilpatil8289/career { "accept_payment": 0, "allow_comments": 1, "allow_delete": 0, "allow_edit": 1, "allow_incomplete": 0, "allow_multiple": 1, "allow_print": 0, "amount": 0.0, "amount_based_on_field": 0, "creation": "2016-09-10 02:53:16.598314", "doc_type": "Job Applicant", "docstatus": 0, "doctype": "Web Form", "idx": 0, "introduction_text": "", "is_standard": 1, "login_required": 0, "max_attachment_size": 0, "modified": "2019-06-15 16:12:43.060240", "modified_by": "Administrator", "module": "Career Portal", "name": "job-application", "owner": "Administrator", "published": 1, "route": "job_application", "route_to_success_link": 0, "show_attachments": 0, "show_in_grid": 0, "show_sidebar": 1, "sidebar_items": [], "success_message": "Thank you for applying.", "success_url": "/jobs", "title": "Job Application", "web_form_fields": [ { "allow_read_on_all_link_options": 0, "fieldname": "job_title", "fieldtype": "Select", "hidden": 0, "label": "Job Opening", "max_length": 0, "max_value": 0, "options": "", "read_only": 1, "reqd": 0, "show_in_filter": 0 }, { "allow_read_on_all_link_options": 0, "fieldname": "applicant_name", "fieldtype": "Data", "hidden": 0, "label": "Applicant Name", "max_length": 0, "max_value": 0, "read_only": 0, "reqd": 1, "show_in_filter": 0 }, { "allow_read_on_all_link_options": 0, "fieldname": "email_id", "fieldtype": "Data", "hidden": 0, "label": "Email Address", "max_length": 0, "max_value": 0, "options": "Email", "read_only": 0, "reqd": 1, "show_in_filter": 0 }, { "allow_read_on_all_link_options": 0, "fieldname": "cover_letter", "fieldtype": "Text", "hidden": 0, "label": "Cover Letter", "max_length": 0, "max_value": 0, "read_only": 0, "reqd": 0, "show_in_filter": 0 }, { "allow_read_on_all_link_options": 0, "fieldname": "resume_attachment", "fieldtype": "Attach", "hidden": 0, "label": "Resume Attachment", "max_length": 0, "max_value": 0, "read_only": 0, "reqd": 0, "show_in_filter": 0 }, { "allow_read_on_all_link_options": 0, "fieldname": "interview_city", "fieldtype": "Link", "hidden": 0, "label": "Interview Schedule", "max_length": 0, "max_value": 0, "options": "My Test Schedule", "read_only": 0, "reqd": 0, "show_in_filter": 0 }, { "allow_read_on_all_link_options": 0, "fieldname": "interview_slot", "fieldtype": "Link", "hidden": 0, "label": "Interview Slot", "max_length": 0, "max_value": 0, "options": "Interview Slot Group", "read_only": 0, "reqd": 0, "show_in_filter": 0 } ] }{ "authors": [ { "homepage": "https://github.com/creativecommons/sre-salt-prime", "name": "Creative Commons" } ], "config": { "vendor-dir": "wp-content/vendor" }, "description": "Creative Commons Open COVID Pledge WordPress Site via Composer", "extra": { "wordpress-install-dir": "wp" }, "name": "creativecommons/wordpress-opencovid", "repositories": [ { "type": "composer", "url": "https://wpackagist.org" } ], "require": { "composer/installers": "1.9", "johnpbloch/wordpress": "5.4.2", "wpackagist-plugin/a3-lazy-load": "2.4.0", "wpackagist-plugin/all-in-one-wp-migration": "7.26", "wpackagist-plugin/all-in-one-wp-security-and-firewall": "4.4.4", "wpackagist-plugin/antispam-bee": "2.9.2", "wpackagist-plugin/broken-link-checker": "1.11.13", "wpackagist-plugin/classic-editor": "1.6", "wpackagist-plugin/contact-form-7": "5.2.1", "wpackagist-plugin/duplicate-post": "3.2.5", "wpackagist-plugin/elementor": "2.9.14", "wpackagist-plugin/header-footer-elementor": "1.5.2", "wpackagist-plugin/limit-login-attempts-reloaded": "2.15.1", "wpackagist-plugin/ml-slider": "3.17.0", "wpackagist-plugin/molongui-authorship": "4.1.2", "wpackagist-plugin/mystickymenu": "2.4.3", "wpackagist-plugin/statify": "1.8.0", "wpackagist-plugin/tablepress": "1.12", "wpackagist-plugin/the-events-calendar": "5.1.5", "wpackagist-plugin/themeisle-companion": "2.9.16", "wpackagist-plugin/tinymce-advanced": "5.5.0", "wpackagist-plugin/wordpress-seo": "14.7", "wpackagist-plugin/wp-mail-smtp": "2.2.1", "wpackagist-plugin/wpforms-lite": "1.6.2.2", "wpackagist-theme/neve": "2.7.6" }, "require-dev": { "wpackagist-plugin/debug-bar": "1.0.1", "wpackagist-plugin/debug-bar-actions-and-filters-addon": "1.5.4" }, "type": "project" } 1-10 {"ast":null,"code":"import { combineReducers } from \"redux\";\nimport authReducer from \"./authReducer\";\nimport errorReducer from \"./errorReducer\";\nexport default combineReducers({\n auth: authReducer,\n errors: errorReducer\n});","map":{"version":3,"sources":["/home/megha/mernapp/frontend/src/reducers/index.js"],"names":["combineReducers","authReducer","errorReducer","auth","errors"],"mappings":"AAAA,SAASA,eAAT,QAAgC,OAAhC;AACA,OAAOC,WAAP,MAAwB,eAAxB;AACA,OAAOC,YAAP,MAAyB,gBAAzB;AACA,eAAeF,eAAe,CAAC;AAC3BG,EAAAA,IAAI,EAAEF,WADqB;AAE3BG,EAAAA,MAAM,EAAEF;AAFmB,CAAD,CAA9B","sourcesContent":["import { combineReducers } from \"redux\";\nimport authReducer from \"./authReducer\";\nimport errorReducer from \"./errorReducer\";\nexport default combineReducers({\n auth: authReducer,\n errors: errorReducer\n});"]},"metadata":{},"sourceType":"module"}{"nom":"Allichamps","circ":"2ème circonscription","dpt":"Haute-Marne","inscrits":287,"abs":156,"votants":131,"blancs":6,"nuls":0,"exp":125,"res":[{"nuance":"FN","nom":"","voix":76},{"nuance":"LR","nom":"","voix":49}]}{"word":"rustic","definition":"1. Of or pertaining to the country; rural; as, the rustic gods of antiquity. Milton. And many a holy text around she strews, That teach the rustic moralist to die. Gray. She had a rustic, woodland air. Wordsworth. 2. Rude; awkward; rough; unpolished; as, rustic manners. \"A rustic muse.\" Spenser. 3. Coarse; plain; simple; as, a rustic entertainment; rustic dress. 4. Simple; artless; unadorned; unaffected. Pope. Rustic moth (Zoöl.), any moth belonging to Agrotis and allied genera. Their larvæ are called cutworms. See Cutworm. -- Rustic work. (a) (Arch.) Cut stone facing which has the joints worked with grooves or channels, the face of each block projecting beyond the joint, so that the joints are very conspicuous. (b) (Arch. & Woodwork) Summer houses, or furniture for summer houses, etc., made of rough limbs of trees fancifully arranged. Syn. -- Rural; rude; unpolished; inelegant; untaught; artless; honest. See Rural.\n\n1. An inhabitant of the country, especially one who is rude, coarse, or dull; a clown. Hence to your fields, you rustics! hence, away. Pope. 2. A rural person having a natural simplicity of character or manners; an artless, unaffected person. [Poetic]"}{ "name": "simplyk-import", "description": "", "main": "", "authors": [ "ThibautJ <>" ], "license": "MIT", "homepage": "https://github.com/ThibautJ/simplyk-import", "private": true, "ignore": [ "**/.*", "node_modules", "public/bower_components", "test", "tests" ], "dependencies": { "bootstrap": "^3.3.7" } } package.json { "name": "onoffcanvas", "version": "2.3.0", "description": "An offcanvas plugin", "types": "dist/types/index.d.ts", "style": "dist/onoffcanvas.css", "sass": "scss/onoffcanvas.scss", "main": "dist/onoffcanvas.js", "module": "dist/onoffcanvas.esm.js", "browser": "dist/onoffcanvas.min.js", "unpkg": "dist/onoffcanvas.min.js", "jsdelivr": "dist/onoffcanvas.min.js", "files": [ "dist", "src", "scss", "README.md" ], "scripts": { "css": "run-p css-compile css-minify", "css-compile": "sass scss:dist", "css-minify": "sass -s compressed scss/onoffcanvas.scss dist/onoffcanvas.min.css", "js": "npm-run-all --parallel ts-compile --sequential js-compile js-minify", "lint": "eslint src/**/*.ts", "tsc": "tsc", "ts-compile": "npm-run-all --parallel lint --sequential tsc", "js-compile": "rollup -c", "js-minify": "terser --compress typeofs=false --mangle --comments \"/^!/\" --source-map \"content=dist/onoffcanvas.js.map,includeSources,url=onoffcanvas.min.js.map\" --output dist/onoffcanvas.min.js dist/onoffcanvas.js", "build": "run-p css js", "watch": "run-p watch-css watch-js", "watch-css": "nodemon --ignore docs/ --ignore js --ignore dist/ -e scss -x \"npm run css\"", "watch-js": "nodemon --ignore docs/ --ignore scss/ --ignore dist/ -e ts -x \"npm run js\"", "css:dev": "sass scss:docs/assets/css -w", "js:dev": "rollup -c -w" }, "repository": { "type": "git", "url": "git+https://github.com/onokumus/onoffcanvas.git" }, "keywords": [ "offcanvas", "canvas", "events", "EventEmitter" ], "author": " <> (https://onokumus.com)", "license": "MIT", "bugs": { "url": "https://github.com/onokumus/onoffcanvas/issues" }, "homepage": "https://github.com/onokumus/onoffcanvas", "devDependencies": { "@rollup/plugin-commonjs": "^19.0.0", "@rollup/plugin-node-resolve": "^13.0.0", "@rollup/plugin-typescript": "^8.2.1", "@typescript-eslint/eslint-plugin": "^4.23.0", "@typescript-eslint/parser": "^4.23.0", "eslint": "^7.26.0", "eslint-config-airbnb-base": "^14.2.1", "eslint-config-airbnb-typescript": "^12.3.1", "eslint-config-prettier": "^8.3.0", "eslint-plugin-import": "^2.23.2", "nodemon": "^2.0.7", "npm-run-all": "^4.1.5", "prettier": "^2.3.0", "rollup": "^2.48.0", "sass": "^1.32.13", "terser": "^5.7.0", "tslib": "^2.2.0", "typescript": "^4.2.4" } } uk-gov-mirror/hmcts.probate-frontend0 { "title": "Copïau - DU", "question": "Faint o gopïau swyddogol ychwanegol o’r grant sydd eu hangen arnoch i’w defnyddio yn y DU? ", "paragraph1": "Byddwch yn cael copi swyddogol am ddim o grant profiant gyda’ch ffi gwneud cais.", "paragraph2": "Archebwch gopïau swyddogol ychwanegol o’r grant os oes angen i chi eu hanfon at wahanol ddeiliaid asedau, er enghraifft, copi ar gyfer banciau, polisïau yswiriant, cyfranddaliadau ac eiddo. ", "paragraph3": "Mae copïau swyddogol ychwanegol yn costio £1.50 yr un ", "copies": "Nifer y copïau swyddogol ychwanegol", "questionOld": "Faint o gopïau swyddogol ychwanegol o’r grant sydd eu hangen arnoch i’w defnyddio yn y DU?", "paragraph1Old": "Byddwch yn cael copi swyddogol am ddim o grant profiant gyda’ch ffi gwneud cais. ", "paragraph2Old": "Archebwch gopïau swyddogol ychwanegol o’r grant os oes angen i chi eu hanfon at wahanol ddeiliaid asedau, er enghraifft, copi ar gyfer banciau, polisïau yswiriant, cyfranddaliadau ac eiddo. ", "paragraph3Old": "Mae copïau swyddogol ychwanegol yn costio £1.50 yr un ", "copiesOld": "Nifer y copïau swyddogol ychwanegol", "detailTitle": "Beth yw copi swyddogol?", "detailText1": "Mae'n gopi o’r grant gyda sêl holograffig lliw arian ar ei flaen. Gall banc ofyn i chi am gopi ardystiedig i ryddhau arian i chi.", "detailText2": "Defnyddir copïau swyddogol ar gyfer asedau yn y DU yn unig.", "errors": { "uk": { "invalid": "Nodwch rif dilys", "required": "Nodwch y nifer o gopïau DU rydych eu hangen. Rhowch ‘0’ os nad ydych angen unrhyw gopïau." } } } {"appid": 335830, "name": "Distant Star: Revenant Fleet", "windows": true, "mac": false, "linux": false, "early_access": false, "lookup_time": 1490980022}{"name":"chevron_up","subject":1008,"date":"11122009-112517","paths":{"Pen":{"strokes":[{"x":-743,"y":645,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":-742,"y":657,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":-742,"y":657,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":-742,"y":657,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":-736,"y":638,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":-716,"y":622,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":-705,"y":583,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":-680,"y":539,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":-658,"y":476,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":-621,"y":407,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":-585,"y":324,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":-539,"y":235,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":-495,"y":136,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":-446,"y":31,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":-396,"y":-77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":-343,"y":-184,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":-291,"y":-288,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":-245,"y":-382,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":-204,"y":-466,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":-168,"y":-536,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":-140,"y":-594,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":-117,"y":-630,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":-99,"y":-652,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":-88,"y":-650,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":-81,"y":-633,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":-74,"y":-596,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":-69,"y":-544,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":-62,"y":-479,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":-55,"y":-401,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":-42,"y":-313,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0},{"x":-27,"y":-218,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":30,"stroke_id":0},{"x":-8,"y":-118,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":31,"stroke_id":0},{"x":15,"y":-19,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":32,"stroke_id":0},{"x":38,"y":75,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":33,"stroke_id":0},{"x":74,"y":177,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":34,"stroke_id":0},{"x":99,"y":261,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":35,"stroke_id":0},{"x":131,"y":340,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":36,"stroke_id":0},{"x":156,"y":410,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":37,"stroke_id":0},{"x":184,"y":476,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":38,"stroke_id":0},{"x":207,"y":533,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":39,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Lenovo X61 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}Application/build/intermediates/blame/res/debug/single/color-v11.json0 [ { "merged": "/Users/takahashimasatsugu/AndroidStudioProjects/RepeatingAlarm/Application/build/intermediates/res/merged/debug/color-v11/abc_background_cache_hint_selector_material_dark.xml", "source": "/Users/takahashimasatsugu/.android/build-cache/c61a0f6b0c1ef494a4113cc3ec1603b82c35955b/output/res/color-v11/abc_background_cache_hint_selector_material_dark.xml" }, { "merged": "/Users/takahashimasatsugu/AndroidStudioProjects/RepeatingAlarm/Application/build/intermediates/res/merged/debug/color-v11/abc_background_cache_hint_selector_material_light.xml", "source": "/Users/takahashimasatsugu/.android/build-cache/c61a0f6b0c1ef494a4113cc3ec1603b82c35955b/output/res/color-v11/abc_background_cache_hint_selector_material_light.xml" } ]1-10 { "id": 20242, "citation_title": "The Eurocrisis: Muddling Through, or On the Way to a More Perfect Euro Union?", "citation_author": [ "" ], "citation_publication_date": "2014-06-19", "issue_date": "2014-06-19", "revision_date": "None", "topics": [ "International Economics", "International Finance", "International Macroeconomics" ], "program": [ "International Finance and Macroeconomics" ], "projects": null, "working_groups": null, "abstract": "\n\nThis paper looks at the short history of the Eurozone through the lens of an evolutionary approach to forming new institutions. The euro has operated as a currency without a state, under the dominance of Germany. This has so far allowed the euro to achieve a number of design objectives, and this may continue, as long as Germany does not shirk its growing responsibility for the euro's future. Germany's resilience and dominant size within the EU may explain its \"muddling-through\" approach towards the Eurozone crisis. We review several manifestations of this muddling through process. Greater mobility of labor and lower mobility of under-regulated capital may be the costly \"second best\" adjustment until the arrival of more mature institutions in the Eurozone.\n\n", "acknowledgement": "\nInsightful comments by , , , and the 20th Dubrovnik Economic Conference participants are gratefully acknowledged. is grateful for the support provided by the Dockson Chair in Economics and International Relations, USC. The views expressed herein are those of the author and do not necessarily reflect the views of the National Bureau of Economic Research.\n\n\n" }luosichengx/myplace {"title": "Agnostic Bayesian Learning of Ensembles.", "fields": ["probabilistic logic", "finite set", "linear combination", "baseline", "model selection"], "abstract": "We propose a method for producing ensembles of predictors based on holdout estimations of their generalization performances. This approach uses a prior directly on the performance of predictors taken from a finite set of candidates and attempts to infer which one is best. Using Bayesian inference, we can thus obtain a posterior that represents our uncertainty about that choice and construct a weighted ensemble of predictors accordingly. This approach has the advantage of not requiring that the predictors be probabilistic themselves, can deal with arbitrary measures of performance and does not assume that the data was actually generated from any of the predictors in the ensemble. Since the problem of finding the best (as opposed to the true) predictor among a class is known as agnostic PAC-learning, we refer to our method as agnostic Bayesian learning. We also propose a method to address the case where the performance estimate is obtained from k-fold cross validation. While being efficient and easily adjustable to any loss function, our experiments confirm that the agnostic Bayes approach is state of the art compared to common baselines such as model selection based on k-fold crossvalidation or a learned linear combination of predictor outputs.", "citation": "Citations (8)", "year": "2014", "departments": ["Laval University", "Laval University", "Laval University", "Universit\u00e9 de Sherbrooke"], "conf": "icml", "authors": [".....http://dblp.org/pers/hd/l/Lacoste:Alexandre", ".....http://dblp.org/pers/hd/m/Marchand:Mario", "Fran\u00e7ois Laviolette.....http://dblp.org/pers/hd/l/Laviolette:Fran=ccedil=ois", ".....http://dblp.org/pers/hd/l/Larochelle:Hugo"], "pages": 9}package.json { "name": "node-pokedex", "private": true, "version": "1.0.0", "description": "Pokédex made with Node, using Axios and Inquirer", "main": "src/index.js", "dependencies": { "axios": "^0.19.0", "inquirer": "^7.0.0" }, "scripts": { "start": "node src/index" }, "repository": { "type": "git", "url": "git+https://github.com/mauricio-chavez/node-pokedex.git" }, "keywords": [ "pokémon", "pokédex", "node", "axios", "inquirer" ], "author": "", "license": "MIT", "bugs": { "url": "https://github.com/mauricio-chavez/node-pokedex/issues" }, "homepage": "https://github.com/mauricio-chavez/node-pokedex#readme" } src/main/resources/static/mas_json/2013_popl_6044404722080163277.json {"title": "Inductive data flow graphs.", "fields": ["correctness", "control flow graph", "data flow analysis", "concurrency", "thread"], "abstract": "The correctness of a sequential program can be shown by the annotation of its control flow graph with inductive assertions. We propose inductive data flow graphs, data flow graphs with incorporated inductive assertions, as the basis of an approach to verifying concurrent programs. An inductive data flow graph accounts for a set of dependencies between program actions in interleaved thread executions, and therefore stands as a representation for the set of concurrent program traces which give rise to these dependencies. The approach first constructs an inductive data flow graph and then checks whether all program traces are represented. The size of the inductive data flow graph is polynomial in the number of data dependencies (in a sense that can be made formal); it does not grow exponentially in the number of threads unless the data dependencies do. The approach shifts the burden of the exponential explosion towards the check whether all program traces are represented, i.e., to a combinatorial problem (over finite graphs).", "citation": "Citations (66)", "departments": ["University of Toronto", "University of Toronto", "University of Freiburg"], "authors": [".....http://dblp.org/pers/hd/f/Farzan:Azadeh", ".....http://dblp.org/pers/hd/k/Kincaid:Zachary", ".....http://dblp.org/pers/hd/p/Podelski:Andreas"], "conf": "popl", "year": "2013", "pages": 14}{"collections":[{"fullName":"System.Security.Principal.IPrincipal","methods":[{"name":"System.Security.Principal.IPrincipal.Identity","type":"PROP","count":2010},{"name":"System.Security.Principal.IPrincipal.IsInRole(System.String role)","type":"SIMPLE","count":766},{"name":"(System.Security.Principal.IPrincipal) object","type":"CAST","count":4}]}]}{"path":"vanilla/api/loot/conditions/crafttweaker/True.md","ownerModId":"crafttweaker","zenCodeName":"crafttweaker.api.loot.conditions.crafttweaker.True","searchTerms":["crafttweaker.api.loot.conditions.ILootConditionTypeBuilder"]} hapi-fhir-structures-r4/src/test/resources/rdf-test-input/valueset-instance-availability.json { "resourceType": "ValueSet", "id": "instance-availability", "meta": { "lastUpdated": "2019-11-01T09:29:23.356+11:00", "profile": [ "http://hl7.org/fhir/StructureDefinition/shareablevalueset" ] }, "text": { "status": "extensions", "div": "\u003cdiv xmlns\u003d\"http://www.w3.org/1999/xhtml\"\u003e\u003ch2\u003eInstanceAvailability\u003c/h2\u003e\u003cdiv\u003e\u003cp\u003eAvailability of the resource.\u003c/p\u003e\n\u003c/div\u003e\u003cp\u003e\u003cb\u003eCopyright Statement:\u003c/b\u003e\u003c/p\u003e\u003cdiv\u003e\u003cp\u003eThese codes are excerpted from Digital Imaging and Communications in Medicine (DICOM) Standard, Part 16: Content Mapping Resource, Copyright 2011 by the National Electrical Manufacturers Association\u003c/p\u003e\n\u003c/div\u003e\u003cp\u003eThis value set includes codes from the following code systems:\u003c/p\u003e\u003cul\u003e\u003cli\u003eInclude these codes as defined in \u003ca href\u003d\"http://dicom.nema.org/resources/ontology/DCM\"\u003e\u003ccode\u003ehttp://dicom.nema.org/resources/ontology/DCM\u003c/code\u003e\u003c/a\u003e\u003ctable class\u003d\"none\"\u003e\u003ctr\u003e\u003ctd style\u003d\"white-space:nowrap\"\u003e\u003cb\u003eCode\u003c/b\u003e\u003c/td\u003e\u003ctd\u003e\u003cb\u003eDisplay\u003c/b\u003e\u003c/td\u003e\u003ctd\u003e\u003cb\u003eDefinition\u003c/b\u003e\u003c/td\u003e\u003c/tr\u003e\u003ctr\u003e\u003ctd\u003e\u003ca href\u003d\"codesystem-dicom-dcim.html#dicom-dcim-ONLINE\"\u003eONLINE\u003c/a\u003e\u003c/td\u003e\u003ctd\u003eOnline\u003c/td\u003e\u003ctd\u003eResources are immediately available.\u003c/td\u003e\u003c/tr\u003e\u003ctr\u003e\u003ctd\u003e\u003ca href\u003d\"codesystem-dicom-dcim.html#dicom-dcim-OFFLINE\"\u003eOFFLINE\u003c/a\u003e\u003c/td\u003e\u003ctd\u003eOffline\u003c/td\u003e\u003ctd\u003eResources need to be retrieved by manual intervention.\u003c/td\u003e\u003c/tr\u003e\u003ctr\u003e\u003ctd\u003e\u003ca href\u003d\"codesystem-dicom-dcim.html#dicom-dcim-NEARLINE\"\u003eNEARLINE\u003c/a\u003e\u003c/td\u003e\u003ctd\u003eNearline\u003c/td\u003e\u003ctd\u003eResources need to be retrieved from relatively slow media.\u003c/td\u003e\u003c/tr\u003e\u003ctr\u003e\u003ctd\u003e\u003ca href\u003d\"codesystem-dicom-dcim.html#dicom-dcim-UNAVAILABLE\"\u003eUNAVAILABLE\u003c/a\u003e\u003c/td\u003e\u003ctd\u003eUnavailable\u003c/td\u003e\u003ctd\u003eResources cannot be retrieved.\u003c/td\u003e\u003c/tr\u003e\u003c/table\u003e\u003c/li\u003e\u003c/ul\u003e\u003c/div\u003e" }, "extension": [ { "url": "http://hl7.org/fhir/StructureDefinition/structuredefinition-wg", "valueCode": "ii" } ], "url": "http://hl7.org/fhir/ValueSet/instance-availability", "identifier": [ { "system": "urn:ietf:rfc:3986", "value": "urn:oid:1.2.840.10008.6.​1.​811" } ], "version": "4.0.1", "name": "InstanceAvailability", "title": "InstanceAvailability", "status": "draft", "experimental": false, "date": "2019-11-01T09:29:23+11:00", "publisher": "HL7 (FHIR Project)", "contact": [ { "telecom": [ { "system": "url", "value": "http://hl7.org/fhir" }, { "system": "email", "value": "" } ] } ], "description": "Availability of the resource.", "copyright": "These codes are excerpted from Digital Imaging and Communications in Medicine (DICOM) Standard, Part 16: Content Mapping Resource, Copyright 2011 by the National Electrical Manufacturers Association", "compose": { "include": [ { "system": "http://dicom.nema.org/resources/ontology/DCM", "concept": [ { "extension": [ { "url": "http://hl7.org/fhir/StructureDefinition/valueset-concept-definition", "valueString": "Resources are immediately available." } ], "code": "ONLINE" }, { "extension": [ { "url": "http://hl7.org/fhir/StructureDefinition/valueset-concept-definition", "valueString": "Resources need to be retrieved by manual intervention." } ], "code": "OFFLINE" }, { "extension": [ { "url": "http://hl7.org/fhir/StructureDefinition/valueset-concept-definition", "valueString": "Resources need to be retrieved from relatively slow media." } ], "code": "NEARLINE" }, { "extension": [ { "url": "http://hl7.org/fhir/StructureDefinition/valueset-concept-definition", "valueString": "Resources cannot be retrieved." } ], "code": "UNAVAILABLE" } ] } ] } }1-10 [ { "className" : "file.pagefactory.json.JsonFileProcessorTest$InValidJsonWrongFieldByFieldPage", "fieldBy" : [ { "fie" : "inValidFileStructure", "how" : "ID", "using" : "inValidFileStructure" } ] } ]{"content": "I write for @ParlStreet on the key talking points from Osborne's final Budget before #GE2015 http://t.co/FVXik4NlWX #Budget2015", "entities": [{"offset": 12, "type": "ne", "id": 2, "entity": "@ParlStreet"}, {"entity": "budget2015", "type": "topic keyword", "id": 3, "offset": 117}, {"entity": "osborne", "type": "topic keyword", "id": 4, "offset": 55}], "topics": [{"topic": "economy", "id": 1}], "tweet_id": "578214908606709760"}pyganflor/facturacion { "/js/app.js": "/js/app.js", "/css/app.scss": "/css/app.scss", "/css/custom.css": "/css/custom.css" } codystanfield/foundry-vtt--pathfinder-2e { "_id": "7WBZ2kkhZ7JorWu2", "data": { "actionCategory": { "value": "" }, "actionType": { "value": "passive" }, "actions": { "value": "" }, "description": { "chat": "", "unidentified": "", "value": "

The touch of undeath runs through your blood. Your family tree might contain powerful undead, like a vampire, or perhaps you died and returned a bit different.

\n

Spell List divine
Bloodline Skills Intimidation, Religion
Granted Spells cantrip @Compendium[pf2e.spells-srd.mAMEt4FFbdqoRnkN]{Chill Touch}, 1st: @Compendium[pf2e.spells-srd.wdA52JJnsuQWeyqz]{Harm}, 2nd: @Compendium[pf2e.spells-srd.8ViwItUgwT4lOvvb]{False Life}, 3rd: @Compendium[pf2e.spells-srd.GUeRTriJkMlMlVrk]{Bind Undead}, 4th: <@Compendium[pf2e.spells-srd.FM3SmEW8N1FCRjqt]{Talking Corpse}, 5th: @Compendium[pf2e.spells-srd.MlpbeZ61Euhl0d60]{Cloudkill}, 6th: @Compendium[pf2e.spells-srd.fd31tAHSSGXyOxW6]{Vampiric Exsanguination}, 7th: @Compendium[pf2e.spells-srd.Z9OrRXKgAPv6Hn5l]{Finger of Death}, 8th: @Compendium[pf2e.spells-srd.M0jQlpQYUr0pp2Sv]{}, 9th: @Compendium[pf2e.spells-srd.FEsuyf203wTNE2et]{Wail of the Banshee}
Bloodline Spells initial: @Compendium[pf2e.spells-srd.FedTjedva2rYk33r]{Undeath's Blessing}, advanced: @Compendium[pf2e.spells-srd.cqdmSmQnM0q6wbWG]{Drain Life}, greater: <@Compendium[pf2e.spells-srd.2YIr0S2Gt14PMMQp]{Grasping Grave}
Blood Magic Necromantic energy flows through you or one target. Either you gain temporary Hit Points equal to the spell’s level for 1 round, or a target takes 1 negative damage per spell level (if the spell already deals initial negative damage, combine this with the spell’s initial damage before determining weaknesses and resistances).

" }, "featType": { "value": "classfeature" }, "level": { "value": "1" }, "prerequisites": { "value": "" }, "rarity": { "value": "common" }, "source": { "value": "" }, "traits": { "custom": "", "value": [ "sorcerer" ] }, "usage": { "value": "held-in-one-hand" } }, "flags": {}, "img": "systems/pf2e/icons/features/classes/undead.jpg", "name": "Bloodline: Undead", "permission": { "default": 0 }, "type": "feat" } 10-100 { "directions": [ "In a saucepan over medium heat, combine water, sugar, vanilla and ground coffee. Bring to a boil, reduce heat to low, and simmer for 10 minutes, stirring occasionally. Allow to cool, then remove grounds through a strainer.", "When cool, stir in vodka. Pour into a liquor bottle, and keep in a cool place." ], "ingredients": [ "2 cups water", "1 1/4 cups white sugar", "2 tablespoons vanilla extract", "2 tablespoons fresh ground coffee beans", "2 1/2 cups vodka" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Coffee Flavored Liqueur III", "url": "http://allrecipes.com/recipe/34761/coffee-flavored-liqueur-iii/" } app/pages/sections/commencement-date/manifest.json { "backLinkText": "Go back", "title": "Commencement date for", "description": "Enter the date that this Call-off Agreement will begin.", "questions": { "commencementDate": { "id": "commencementDate", "type": "date", "mainAdvice": "Commencement date", "additionalAdvice": "For example 14 01 2020" } }, "errorMessages": { "CommencementDateRequired": "Enter a commencement date", "CommencementDateInvalid": "Enter a commencement date in a valid format", "CommencementDateGreaterThan": "Commencement date must be in the future or within the last 60 days", "CommencementDateNotReal": "Commencement date must be a real date", "CommencementDateDayRequired": "Commencement date must include a day", "CommencementDateMonthRequired": "Commencement date must include a month", "CommencementDateYearRequired": "Commencement date must include a year", "CommencementDateYearLength": "Year must be four numbers" }, "saveButtonText": "Save and return" } 1-10 {"appid": 438140, "name": "", "windows": true, "mac": true, "linux": false, "early_access": false, "lookup_time": 1490991580}0 { "extends": ["tslint-eslint-rules"], "rulesDirectory": ["node_modules/tslint-eslint-rules/dist/rules"], "rules": { "no-constant-condition": true, "no-unnecessary-type-assertion": true, "prefer-const": true, "prefer-for-of": true, "curly": true, "forin": true, "no-duplicate-variable": true, "no-eval": true, "no-invalid-template-strings": true, "no-return-await": true, "no-string-literal": true, "no-string-throw": true, "no-var-keyword": true, "eofline": true, "indent": [true, "spaces", 2], "linebreak-style": [true, "LF"], "no-duplicate-imports": true, "prefer-object-spread": true, "object-literal-sort-keys": false, "trailing-comma": [true, {"typeLiterals": "never", "arrays": "always", "objects": "always"}], "array-type": [true, "generic"], "arrow-parens": true, "arrow-return-shorthand": true, "class-name": true, "import-spacing": true, "no-consecutive-blank-lines": true, "no-console": { "severity": "warning", "options": ["debug", "info", "log", "time", "timeEnd", "trace"] }, "type-literal-delimiter": true, "semicolon": [true, "always"], "quotemark": [true, "single"] } } plugins/plugin-core-support/i18n/resources_it.json { "versionUsageTitle": "Informazioni sulla versione", "versionUsageHeader": "Stampa la versione corrente", "versionUsageOptionalDocs": "verifica anche la presenza di aggiornamenti", "helpUsageTitle": "Introduzione", "helpUsageHeader": "Un riepilogo della struttura di comandi di primo livello", "screenshotUsageTitle": "Cattura schermata", "screenshotUsageHeader": "Catturare una schermata, specificando facoltativamente l'area della finestra da catturare.", "screenshotSidecarUsageDocs": "cattura i contenuti collaterali", "screenshotReplUsageDocs": "cattura i contenuti di riga comandi", "screenshotLastUsageDocs": "cattura l'output dell'ultimo comando", "screenshotFullUsageDocs": "cattura l'intera pagina, inclusa l'intestazione", "screenshotUsageDocs": "cattura l'intera pagina, tranne l'intestazione", "screenshotWhichUsageDocs": "l'area da catturare", "screenshotNUsageDocs": "l'ennesima area da catturare", "screenshotREPLError": "È stato richiesto di catturare la schermata dell'ultimo output di riga comandi, ma questo è il primo comando", "screenshotSidecarNotOpen": "È stato richiesto di catturare la schermata del sidecar, ma attualmente non è aperto", "screenshotInternalError": "Impossibile identificare l'area della schermata da catturare", "currentTheme": "Si sta utilizzando il tema predefinito", "themesUsageDocs": "Elenca temi disponibili", "themeResetUsageDocs": "Reimposta il tema predefinito", "themeSetUsageDocs": "Imposta il tema corrente", "themeSetUsageRequiredDocs": "Il nome di un tema da utilizzare", "pleaseConfirm": "Confermare", "areYouSure": "Continuare l'operazione?", "aboutToExecute": "Si sta per eseguire il comando.", "confirmationMessage": "Messaggio di conferma", "commandToBeExecuted": "Comando da eseguire", "operationCancelled": "Operazione annullata", "operationConfirmed": "Operazione confermata", "yesIAmSure": "Si, continua", "cancel": "Annulla", "notSupportedInBrowser": "Il comando non è ancora supportato per l'esecuzione in un browser" } BlogService/Properties/launchSettings.json { "iisSettings": { "windowsAuthentication": false, "anonymousAuthentication": true, "iisExpress": { "applicationUrl": "http://localhost:6213", "sslPort": 44383 } }, "$schema": "http://json.schemastore.org/launchsettings.json", "profiles": { "IIS Express": { "commandName": "IISExpress", "launchBrowser": true, "launchUrl": "api/posts", "environmentVariables": { "ASPNETCORE_ENVIRONMENT": "Development" } }, "BlogService": { "commandName": "Project", "launchBrowser": true, "launchUrl": "api/posts", "environmentVariables": { "ASPNETCORE_ENVIRONMENT": "Development" }, "applicationUrl": "https://localhost:5001;http://localhost:5000" }, "Docker": { "commandName": "Docker", "launchBrowser": true, "launchUrl": "{Scheme}://localhost:{ServicePort}/api/posts" }, "Azure Dev Spaces": { "commandName": "AzureDevSpaces", "launchBrowser": true, "resourceGroup": "BlogServiceKube", "aksName": "MyAKS", "subscriptionId": "acc66ff6-71a9-44ba-8e67-cc79737b06ce" } } }{ "name": "magic", "description": "Cookbook helpers and other magical things", "long_description": "Cookbook helpers and other magical things", "maintainer": "", "maintainer_email": "", "license": "ISC", "platforms": { }, "dependencies": { }, "recommendations": { }, "suggestions": { }, "conflicting": { }, "providing": { }, "replacing": { }, "attributes": { }, "groupings": { }, "recipes": { }, "version": "1.5.0", "source_url": "", "issues_url": "", "privacy": false } { "name": "@pando/parse-js-examples", "version": "4.3.6", "private": true, "description": "fast-csv parsing examples", "scripts": { "list": "run-examples list", "all-examples": "run-examples all", "example": "run-examples run" }, "dependencies": { "@pando/parse": "4.3.6", "example-runner": "4.3.6" } } package.json { "name": "closure-externs-mocha", "version": "1.0.1", "description": "Mocha externs for Closure Compiler.", "repository": { "type": "git", "url": "git://github.com/teppeis/closure-externs-mocha.git" }, "keywords": [ "mocha", "closure" ], "author": " <>", "license": "MIT", "readmeFilename": "README.md", "gitHead": "0eb7853f740df460f63159cc63ab456464850d27" } { "basic_info": { "description_header": "Hola!", "description": "👋 Soy . Disfruto conectar con la gente y resolver problemas complejos. Me encanta el ajedrez y los cubos Rubik. Actualmente soy agente de bienes raices y desarrollador web.", "section_name": { "about": "Sobre mi", "projects": "Projectos", "skills": "Talentos", "experience": "Experiencia" } }, "projects": [ { "title": "Salud", "startDate": "2020", "description": "Para mi 2022, mi salud sera mi primera prioridad. Esto incluye todo desde mi salud fisica hasta mental. Nutricion, ejercicio y dormir seran los puntos de enfoque en los que estare trabajando mas que nada", "images": [ "images/portfolio/animal-shelter/p1.jpg", "images/portfolio/animal-shelter/p2.jpg" ], "url": "", "technologies": [ { "class": "devicon-angularjs-plain", "name": "Angular" }, { "class": "devicon-typescript-plain", "name": "TypeScript" }, { "class": "devicon-csharp-plain", "name": "C#" } ] }, { "title": "Ajedrez", "startDate": "2018", "description": "Me encantaria combertirme en Maestro Nacional (NM) en la liga de Estados Unidos, USCF. Es una meta que consumira mucho de mi tiempo y esfuerzo, pero estoy emocionado de ver lo que puedo lograr. Sin embargo, continuare con mi formacion en el ajedrez.", "images": [ "images/portfolio/photography/p1.jpg", "images/portfolio/photography/p2.jpg" ] }, { "title": "Jiu-Jitsu", "startDate": "2015", "description": "Desde que comenze a entrenar jiu-jitsu a principios de este año, me enamore del arte. Me gustaria volver a entrenar y conseguir mi cinta azul.", "images": [ "images/portfolio/adventure/p1.jpg", "images/portfolio/adventure/p2.jpg" ], "url": "https://github.com", "technologies": [ { "class": "devicon-angularjs-plain", "name": "Angular" }, { "class": "devicon-typescript-plain", "name": "TypeScript" }, { "class": "devicon-csharp-plain", "name": "C#" } ] } ], "experience": [ { "company": "CTS Construction", "title": "Front-End Developer", "years": "2021", "mainTech": ["React, Styled Components"], "technologies": ["React", "JavaScript", "C# API by "] }, { "company": "Amazon", "title": "Warehouse Associate", "years": "2020", "mainTech": [""], "technologies": [""] } ] } {"topic_27": {"num_tags": 1, "name": "topic_27", "full_name": "topic_27", "num_included_tokens": 1}, "topic_55": {"num_tags": 27, "name": "topic_55", "full_name": "topic_55", "num_included_tokens": 27}, "topic_41": {"num_tags": 15, "name": "topic_41", "full_name": "topic_41", "num_included_tokens": 15}, "topic_51": {"num_tags": 16, "name": "topic_51", "full_name": "topic_51", "num_included_tokens": 16}, "topic_29": {"num_tags": 6, "name": "topic_29", "full_name": "topic_29", "num_included_tokens": 6}, "topic_12": {"num_tags": 1, "name": "topic_12", "full_name": "topic_12", "num_included_tokens": 1}, "topic_21": {"num_tags": 50, "name": "topic_21", "full_name": "topic_21", "num_included_tokens": 50}, "topic_80": {"num_tags": 3, "name": "topic_80", "full_name": "topic_80", "num_included_tokens": 3}, "topic_79": {"num_tags": 3, "name": "topic_79", "full_name": "topic_79", "num_included_tokens": 3}, "topic_64": {"num_tags": 5, "name": "topic_64", "full_name": "topic_64", "num_included_tokens": 5}, "topic_95": {"num_tags": 11, "name": "topic_95", "full_name": "topic_95", "num_included_tokens": 11}, "topic_0": {"num_tags": 6, "name": "topic_0", "full_name": "topic_0", "num_included_tokens": 6}, "topic_62": {"num_tags": 21, "name": "topic_62", "full_name": "topic_62", "num_included_tokens": 21}, "topic_74": {"num_tags": 3, "name": "topic_74", "full_name": "topic_74", "num_included_tokens": 3}, "topic_99": {"num_tags": 3, "name": "topic_99", "full_name": "topic_99", "num_included_tokens": 3}, "topic_73": {"num_tags": 4, "name": "topic_73", "full_name": "topic_73", "num_included_tokens": 4}}{ "contract": "0x66c16e9524e4143c63fca51a2cbe3cc18944c794", "tool": "mythril", "start": 1563768868.801559, "end": 1563768873.0485477, "duration": 4.246988773345947, "analysis": { "success": false, "error": "Solc experienced a fatal error (code 1).\n\n/unique_chucks/45/0x66c16e9524e4143c63fca51a2cbe3cc18944c794.sol:1:1: Error: Source file requires different compiler version (current compiler is 0.4.25+commit.59dbf8f1.Linux.g++ - note that nightly builds are considered to be strictly less than the released version\npragma solidity 0.4.24;\r\n^---------------------^\n/unique_chucks/45/0x66c16e9524e4143c63fca51a2cbe3cc18944c794.sol:34:1: Error: Source file requires different compiler version (current compiler is 0.4.25+commit.59dbf8f1.Linux.g++ - note that nightly builds are considered to be strictly less than the released version\npragma solidity 0.4.24;\r\n^---------------------^\n/unique_chucks/45/0x66c16e9524e4143c63fca51a2cbe3cc18944c794.sol:164:1: Error: Source file requires different compiler version (current compiler is 0.4.25+commit.59dbf8f1.Linux.g++ - note that nightly builds are considered to be strictly less than the released version\npragma solidity 0.4.24;\r\n^---------------------^\n/unique_chucks/45/0x66c16e9524e4143c63fca51a2cbe3cc18944c794.sol:209:1: Error: Source file requires different compiler version (current compiler is 0.4.25+commit.59dbf8f1.Linux.g++ - note that nightly builds are considered to be strictly less than the released version\npragma solidity 0.4.24;\r\n^---------------------^\n/unique_chucks/45/0x66c16e9524e4143c63fca51a2cbe3cc18944c794.sol:143:5: Warning: Functions in interfaces should be declared external.\n function balanceOf(address _owner) public view returns (uint balance);\r\n ^--------------------------------------------------------------------^\n/unique_chucks/45/0x66c16e9524e4143c63fca51a2cbe3cc18944c794.sol:144:5: Warning: Functions in interfaces should be declared external.\n function transfer(address _to, uint _value) public returns (bool success);\r\n ^------------------------------------------------------------------------^\n", "issues": [] } }{ "url": "https://static.wikia.nocookie.net/griftlands_gamepedia_en/images/a/a8/Initial_Packrat.png/revision/latest?cb=20210808160005", "sha1": "30c6d8a84fb94232a5e7bd03f3c1710fc19ec091" }nathan818fr/node-java-props { "key1": "-monotype-timesnewroman-regular-r---*-%d-*-*-p-*-iso8859-1serif.1a-monotype-timesnewroman-regular-r-normal--*-%d-*-*-p-*-iso8859-2serif.2a-b&h-LucidaBrightLat4-Normal-r-normal--*-%d-*-*-p-*-iso8859-4serif.3a-monotype-times-regular-r-normal--*-%d-*-*-p-*-iso8859-5serif.4a-monotype-timesnewromangreek-regular-r-normal--*-%d-*-*-p-*-iso8859-7serif.5a-monotype-times-regular-r-normal--*-%d-*-*-p-*-iso8859-9serif.6a-monotype-times-regular-r-normal--*-%d-*-*-p-*-iso8859-15serif.7a-hanyi-ming-medium-r-normal--*-%d-*-*-m-*-big5-1serif.8a-sun-song-medium-r-normal--*-%d-*-*-c-*-gb2312.1980-0serif.9a-ricoh-hgminchol-medium-r-normal--*-%d-*-*-m-*-jisx0201.1976-0serif.10a-ricoh-hgminchol-medium-r-normal--*-%d-*-*-m-*-jisx0208.1983-0serif.11a-ricoh-heiseimin-w3-r-normal--*-%d-*-*-m-*-jisx0212.1990-0serif.12a-hanyang-myeongjo-medium-r-normal--*-%d-*-*-m-*-ksc5601.1992-3serif.13a-urw-itczapfdingbats-medium-r-normal--*-%d-*-*-p-*-sun-fontspecificserif.14a-*-symbol-medium-r-normal--*-%d-*-*-p-*-sun-fontspecificbserif.italic.0=-monotype-timesbnewbroman-regular-i---*-%d-*-*-p-*-iso8859-1bserif.italic.1=-monotype-timesbnewbroman-regular-i-normal-italic-*-%d-*-*-p-*-iso8859-2", "key2": "-b&h-LucidaBrightLat4-normal-i-normal-Italic-*-%d-*-*-p-*-iso8859-4" } amaajemyfren/data { "abstract": "In this talk we'll first see the basic idea behind serverless and learn\nhow to deploy a very simple web application to AWS Lambda using Zappa.\nWe'll then look in detail at the \"embarrassingly parallel\" problems\nwhere serverless really shines for data scientists. In particular we'll\ntake a look at PyWren, an ultra-lightweight alternative to heavy big\ndata distributed systems such as Spark. We'll learn how PyWren uses AWS\nLambda as its computational backend to churn through huge analytics\ntasks. PyWren opens up big data to mere mortal data scientists who don't\nhave the budget or engineering support for a long- lived cluster. We'll\nfinish up by using PyWren and Zappa to train and deploy a production\nmachine learning model.\n", "copyright_text": null, "description": "Working in the cloud means you don\u2019t have to deal with hardware. The\ngoal of \"serverless\" is to also avoid dealing with operating systems. It\noffers instances that run for the duration of a single function call.\nThese instances have limitations, but a lot of what data scientists do\nis a perfect fit for this new world! We'll see how to train and deploy\nmachine learning using this infrastructure.\n", "duration": 2747, "language": "eng", "recorded": "2018-10-22", "related_urls": [ { "label": "schedule", "url": "https://pydata.org/la2018/schedule/" } ], "speakers": [ "" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/_wBj4PM8awM/maxresdefault.jpg", "title": "Serverless for Data Scientists", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=_wBj4PM8awM" } ] } Milan0099/Travel { "name": "fly", "version": "0.0.0", "authors": [ " <>" ], "license": "MIT", "ignore": [ "**/.*", "node_modules", "bower_components", "test", "tests", "web/assets" ], "dependencies": { "bootstrap": "latest", "components-font-awesome": "latest", "jquery-form" : "latest", "masonry" : "latest", "imagesloaded" : "latest", "ekko-lightbox" : "latest", "eonasdan-bootstrap-datetimepicker" : "latest", "select2" : "latest", "qtip2" : "latest", "bootstrap-daterangepicker" : "latest", "jquery-validation" : "latest" } } turbolent/heckel-diff { "name": "heckel-diff-items", "version": "0.1.0", "license": "MIT", "author": " <>", "description": "Paul Heckel's A Technique for Isolating Differences Between Files", "repository": { "type": "git", "url": "https://github.com/turbolent/heckel-diff-items.git" }, "main": "dist/index.js", "types": "dist/declarations/index.d.ts", "module": "src/index.js", "files": [ "README.md", "dist/", "src/", "LICENSE" ], "scripts": { "build": "webpack --mode development --progress --profile --colors", "buildProduction": "webpack --mode production --progress --profile --colors", "prepublishOnly": "yarn run buildProduction", "lint": "tslint -c tslint.json 'src/**/*.ts'", "buildTestEnv": "webpack --mode production --context tests/env --config tests/env/webpack.config.js", "test": "jest" }, "devDependencies": { "@types/jest": "22.2.3", "jest": "22.4.4", "ts-loader": "4.3.0", "tslint": "5.10.0", "typescript": "2.8.3", "webpack": "4.9.1", "webpack-cli": "2.1.4" }, "jest": { "moduleFileExtensions": [ "ts", "js", "json" ], "transform": { "^.+\\.ts$": "/tests/preprocessor.js" }, "testMatch": [ "/tests/*.spec.ts" ], "verbose": true } } 100-1000 { "id": 142691, "info": { "name": "Battle Net Forums - Hide rating and vote buttons", "description": "Also hides \"highly rated\" status tag and removes opacity effect on low rated posts.

UPD 22.05: Auto despoiles very low rated posts
UPD 03.07: Option to hide post counter

Скрывает рейтинг и кнопки лайков/дизлайков. А так же плашку \"высокий рейтинг\" и убирает эффект прозрачности на задизлайканных постах.", "additionalInfo": "For an unbiased opinion.", "format": "uso", "category": "battle", "createdAt": "2017-05-15T22:20:02.000Z", "updatedAt": "2017-07-03T01:19:54.000Z", "license": "CC0-1.0", "author": { "id": 358606, "name": "" } }, "stats": { "installs": { "total": 115, "weekly": 0 } }, "screenshots": { "main": { "name": "142691_after.png", "archived": true } }, "discussions": { "stats": { "discussionsCount": 1, "commentsCount": 3 }, "data": [ { "id": 57006, "title": "Счётчик постов бы убрать ещё", "createdAt": "2017-07-02T07:47:11.000Z", "author": { "id": 454642, "name": "fastlove7" } } ] }, "style": { "css": "@-moz-document url-prefix(\"https://eu.battle.net/forums/\"), url-prefix(\"https://us.battle.net/forums/\"), url-prefix(\"https://kr.battle.net/forums/\"), url-prefix(\"https://www.battlenet.com.cn/forums/\"), url-prefix(\"https://cn.battle.net/forums/\"), url-prefix(\"https://tw.battle.net/forums/\") {\r\n\r\n.TopicPost-rank, .TopicPost-button--like, .TopicPost-button--dislike, .highly-rated, .TopicPost.is-buried .TopicPost-button--viewPost {\r\n display: none !important;\r\n}\r\n\r\n.TopicPost-button--quote {\r\n margin-left: 0 !important;\r\n}\r\n\r\n.TopicPost.is-low-rated .LoginPlaceholder.is-previewing .TopicPost-body--preview,.TopicPost.is-low-rated .Poll,.TopicPost.is-low-rated .Timestamp-details,.TopicPost.is-low-rated .TopicForm.is-previewing .TopicPost-body--preview,.TopicPost.is-low-rated .TopicPost-actions,.TopicPost.is-low-rated .TopicPost-author,.TopicPost.is-low-rated .TopicPost-bodyContent,.TopicPost.is-low-rated .TopicPost-bodyStatus,.TopicPost.is-low-rated .TopicPost-button--viewPost {\r\n opacity: 1 !important;\r\n}\r\n\r\n.LoginPlaceholder.is-previewing .TopicPost-body--preview.is-buried, .Poll.is-buried, .TopicForm.is-previewing .TopicPost-body--preview.is-buried, .TopicPost-bodyContent.is-buried {\r\n display: block !important; \r\n}\r\n\r\n.TopicPost-actions.is-buried {\r\n display: flex !important; \r\n}\r\n\r\n.Author-posts {\r\n display: /*[[hide]]*/ !important;\r\n}\r\n\r\n}", "settings": [ { "key": "hide", "label": "Post counter", "type": "dropdown", "options": [ { "key": "0", "label": "Show", "value": "inline", "default": true }, { "key": "1", "label": "Hide", "value": "none", "default": false } ] } ] } }{"date":20200314,"state":"NC","positive":23,"negative":137,"pending":null,"hospitalizedCurrently":null,"hospitalizedCumulative":null,"inIcuCurrently":null,"inIcuCumulative":null,"onVentilatorCurrently":null,"onVentilatorCumulative":null,"recovered":null,"dataQualityGrade":"","lastUpdateEt":"3/14/2020 12:00","dateModified":"2020-03-14T12:00:00Z","checkTimeEt":"03/14 08:00","death":null,"hospitalized":null,"dateChecked":"2020-03-14T12:00:00Z","totalTestsViral":null,"positiveTestsViral":null,"negativeTestsViral":null,"positiveCasesViral":null,"deathConfirmed":null,"deathProbable":null,"totalTestEncountersViral":null,"totalTestsPeopleViral":null,"totalTestsAntibody":null,"positiveTestsAntibody":null,"negativeTestsAntibody":null,"totalTestsPeopleAntibody":null,"positiveTestsPeopleAntibody":null,"negativeTestsPeopleAntibody":null,"totalTestsPeopleAntigen":null,"positiveTestsPeopleAntigen":null,"totalTestsAntigen":null,"positiveTestsAntigen":null,"fips":"37","positiveIncrease":8,"negativeIncrease":51,"total":160,"totalTestResultsSource":"posNeg","totalTestResults":160,"totalTestResultsIncrease":59,"posNeg":160,"deathIncrease":0,"hospitalizedIncrease":0,"hash":"63a606d4e7f6899ae5d04579fd51201e4c03dcea","commercialScore":0,"negativeRegularScore":0,"negativeScore":0,"positiveScore":0,"score":0,"grade":""} {"output":"Para cada caso, exiba uma única linha, a quantidade máxima de posições possíveis de serem visitadas se a posição de início for escolhida de forma ótima.","input":"Haverá diversos casos de testes. Cada caso inicia com três inteiros, L, C e K (1 ≤ L, C ≤ 1000, 0 ≤ K ≤ 5), representando, respectivamente, a quantidade de linhas, colunas, e a quantidade de posições especiais que podem ser utilizadas. A segunda linha contém um caractere P (\u2018N\u2019 ou \u2018R\u2019) representando o modo do jogo, normal ou reverso.\n\n A seguir haverá L linhas, cada linha contendo C inteiros X (0 <= X <= 10^7).\n\n A entrada termina com L = C = K = 0, a qual não deve ser processada.","level":9,"name":"Brincando com Números","has_images":false,"description":"Alguns amigos, entediados em uma tarde de domingo, resolveram inventar uma brincadeira. Eles desenharam uma matriz de L linhas e C colunas em uma folha de papel, e em seguida escreveram um número em cada uma de suas LxC posições.\n\n A brincadeira funciona da seguinte maneira:\n\n \n \tUma posição (i, j) qualquer da matriz pode ser escolhida para começar o jogo, i representando uma linha, e j uma coluna.\n \tA partir dessa posição, é possível mover para as posições (i-1, j) \u2013 (i, j+1) se o modo do jogo for normal ou para as posições (i+1, j) \u2013 (i, j-1) se o modo do jogo for reverso. Porém, só é possível mover para alguma dessas posições, se o número contido nela for menor que o número da posição atual.\n \tNesse jogo, há algumas posições especiais. São as posições que contêm um número primo. Quando um jogador cai nessa posição, ele pode se mover para qualquer uma das 2 adjacentes(de acordo com o modo do jogo), mesmo que o número contido nela não seja menor que o número atual.\n \tEm uma partida, é possível utilizar apenas K posições especiais. Após a utilização das K posições, uma posição com número primo será tratada como uma posição normal.\n \tO objetivo do jogo é \u201cvisitar\u201d o maior número de posições possíveis.","id":"1788","category":"Paradigmas","statistics":{"level":"9 / 10","submissions":332,"solved":120,"ratio":"36.14%"}}{ "token" : "", "prefix" : ">", "using" : "0.9.1" }0 { "name": "cardboard-iguana.com", "version": "1.0.0", "private": true, "author": " <>", "dependencies": { "all-relative": "*" }, "repository": { "url": "https://github.com/necopinus/cardboard-iguana.com" } } { "name": "ENS", "version": "1.0.0", "description": "", "main": "subdomain.en.js", "scripts": { "build": "browserify records.en.js > recordsBundle.en.js && browserify view.en.js > viewBundle.en.js" }, "keywords": [], "author": "", "license": "MIT", "dependencies": { "eth-ens-namehash": "^2.0.8" }, "devDependencies": { "browserify": "^16.5.1" } } src/main/resources/static/mas_json/2018_ccs_-8775003860475752162.json {"title": "Towards Usable Checksums: Automating the Integrity Verification of Web Downloads for the Masses.", "fields": ["checksum", "usable", "internet privacy", "computer security", "usability"], "abstract": null, "citation": "Not cited", "departments": ["HEC Lausanne", "HEC Lausanne", "HEC Lausanne", "ETH Zurich", "Google"], "authors": [".....http://dblp.org/pers/hd/c/Cherubini:Mauro", ".....http://dblp.org/pers/hd/m/Meylan:Alexandre", ".....http://dblp.org/pers/hd/c/Chapuis:Bertil", ".....http://dblp.org/pers/hd/h/Humbert:Mathias", ".....http://dblp.org/pers/hd/b/Bilogrevic:Igor", "K\u00e9vin Huguenin.....http://dblp.org/pers/hd/h/Huguenin:K=eacute=vin"], "conf": "ccs", "year": "2018", "pages": 16}{ "name": "Chocolate Cake Shake", "description": "OOO Chocolate Milk is a REEEALLY good base for this chocolate milk shake profile. It has a nice milky dairy note and a subtle hershey's chocolate syrup note.\r\n\r\nLB Vanilla Ice Cream is going to be our ice cream note. It's a little eggy, which is perfect for an ice cream. You won't get any notes of pepper from this flavor.\r\n\r\nOur cake note is JF Yellow Cake. Yellow Cake i a nice sweet bakery note, it's a bit dry but it blends up nicely with the dairy and chocolate notes.\r\n\r\nSweeten to taste.\r\n\r\nWatch the video here:\r\n\r\nhttps:\/\/youtu.be\/rhJPYjTvRO0", "id": 207019, "image_url": null, "updated_at": "2020-12-12T11:53:48.000-05:00", "deleted_at": null, "recipe_type": 1, "recipe_flavors": [ { "name": "Chocolate Milk", "flavor_id": "6446", "millipercent": 6000, "vendor": "OOO" }, { "name": "Yellow Cake", "flavor_id": "4560", "millipercent": 2000, "vendor": "JF" }, { "name": "Vanilla Ice Cream", "flavor_id": "1511", "millipercent": 3000, "vendor": "LB" } ], "author": "RUG_ly", "views": "607", "created_at": "2020-10-12", "slug": "chocolate_cake_shake", "total_flavoring": "11.0%", "steep_days": "7", "best_vg": "80%", "temperature": "0" }{"nom":"Moret-Loing-et-Orvanne","circ":"3ème circonscription","dpt":"Seine-et-Marne","inscrits":9538,"abs":5688,"votants":3850,"blancs":381,"nuls":105,"exp":3364,"res":[{"nuance":"UDI","nom":"","voix":2528},{"nuance":"FN","nom":"","voix":836}]}v2/states/hi/2020-08-23/simple.json {"meta":{"build_time":"2021-06-01T07:04:25.950Z","license":"CC-BY-4.0","version":"2.0-beta"},"data":{"date":"2020-08-23","state":"HI","meta":{"data_quality_grade":"C","updated":"2020-08-22T22:00:00Z","tests":{"total_source":"totalTestEncountersViral"}},"cases":{"total":6356,"confirmed":null,"probable":null},"tests":{"pcr":{"total":223447,"pending":null,"encounters":{"total":223447},"specimens":{"total":228381,"positive":6369,"negative":null},"people":{"total":176184,"positive":6356,"negative":169828}},"antibody":{"encounters":{"total":null,"positive":null,"negative":null},"people":{"total":null,"positive":null,"negative":null}},"antigen":{"encounters":{"total":null,"positive":null,"negative":null},"people":{"total":null,"positive":null,"negative":null}}},"outcomes":{"recovered":2107,"hospitalized":{"total":377,"currently":192,"in_icu":{"total":null,"currently":35},"on_ventilator":{"total":null,"currently":19}},"death":{"total":47,"confirmed":47,"probable":null}}}} {"template":{"small":"https://static-cdn.jtvnw.net/emoticons/v1/{image_id}/1.0","medium":"https://static-cdn.jtvnw.net/emoticons/v1/{image_id}/2.0","large":"https://static-cdn.jtvnw.net/emoticons/v1/{image_id}/3.0"},"channels":{"kimmyminx":{"title":"KimmyMinx","channel_id":37685768,"link":"http://twitch.tv/kimmyminx","desc":null,"plans":{"$4.99":"138338","$9.99":"138342","$24.99":"138343"},"id":"kimmyminx","first_seen":"2017-06-28 19:20:04","badge":null,"badge_starting":null,"badge_3m":null,"badge_6m":null,"badge_12m":null,"badge_24m":null,"badges":null,"bits_badges":null,"cheermote1":null,"cheermote100":null,"cheermote1000":null,"cheermote5000":null,"cheermote10000":null,"set":138338,"emotes":[{"code":"kimmymPOO","image_id":225066,"set":138338}]}}} 0 {"artist_id":"ARHRE771187B9B5AED","artist_latitude":null,"artist_location":"","artist_longitude":null,"artist_name":"Shaped By Fate","duration":398.36689,"num_songs":1,"song_id":"SOKKTGI12AC4689F59","title":"From Perfection To Poison","year":0}data/styles/138692.json { "id": 138692, "info": { "name": "Simplenote 3 column dark", "description": "Changes simplenote web ui to have a three column (tags/note list/note body) layout", "additionalInfo": null, "format": "uso", "category": "simplenote", "createdAt": "2017-02-09T10:05:33.000Z", "updatedAt": "2017-02-09T10:05:33.000Z", "license": "CC-BY-4.0", "author": { "id": 395518, "name": "" } }, "stats": { "installs": { "total": 167, "weekly": 0 } }, "screenshots": { "main": { "name": "138692_after.png", "archived": false } }, "discussions": { "stats": { "discussionsCount": 1, "commentsCount": 0 }, "data": [ { "id": 57262, "title": "Excellent", "createdAt": "2017-07-14T04:13:05.000Z", "author": { "id": 290574, "name": "McX" } } ] }, "style": { "css": "@-moz-document url(\"https://app.simplenote.com/\") {\r\n.note #txtarea {\r\n font-size: 1em !important;\r\n color: #ddd;\r\n font-family: monospace !important;\r\n}\r\n.app {\r\n background: #293134;\r\n}\r\n.searchfield {\r\n border: 1px solid #000000;\r\n box-shadow: none !important;\r\n}\r\n.toolbar {\r\n border-bottom: 1px solid #000000;\r\n}\r\n.sideview {\r\n border-bottom: 1px solid #000000;\r\n}\r\n.tagbar {\r\n border-bottom: 1px solid #000000;\r\n left: 400px !important;\r\n}\r\n.sidebar {\r\n border-right: 1px solid #000000;\r\n width: 465px;\r\n}\r\n.wrapper {\r\n bottom: 0px;\r\n}\r\n.footer {\r\n display: none;\r\n}\r\n.searchfield {\r\n color: #ddd;\r\n}\r\n.notes li {\r\n padding: 5px;\r\n text-shadow: none;\r\n border-bottom: 1px solid #000000;\r\n}\r\n.notes li .note-preview-title {\r\n color: #ddd;\r\n font-size: 15px;\r\n}\r\n.note #txtarea {\r\n padding: 20px;\r\n}\r\n.note #static_content {\r\n color: #ddd;\r\n}\r\n.notes li:hover {\r\n background-color: #555;\r\n}\r\n.notes li.selected {\r\n background: #555;\r\n}\r\n.popover .menu.tags {\r\n z-index: 0 !important;\r\n position: static !important;\r\n visibility: visible !important;\r\n padding-left: 5px;\r\n margin-top: 5px;\r\n}\r\n.popover .menu li a, .popover .menu h4 {\r\n color: #ddd;\r\n background: #293134;\r\n text-shadow: none;\r\n border-bottom: 1px solid #000000;\r\n}\r\n.popover .menu .taglist {\r\n overflow: visible;\r\n}\r\n.notes {\r\n left: 132px;\r\n border-left: 1px solid #000000;\r\n}\r\n.note {\r\n left: 461px;\r\n}\r\n.sideview span.tail {\r\n visibility: hidden;\r\n}\r\n.tagmenu.button .menu li.trash a.empty {\r\n background: #555;\r\n text-shadow: none;\r\n}\r\n.toolbar .tools {\r\n left: 480px;\r\n}\r\n}" } }inputfile/json/2018-00467.json { "id": "2018-00467", "title": "Untitled (Singapore River Bank)", "source": "Bumblebee", "path": "inputfile/json/", "content": "\n accession_no_csv: 2018-00467\n\n Image: \n\n object_work_type: drawings (visual works)\n\n title_text: Untitled (Singapore River Bank)\n\n preference: main\n\n title_language: \n\n creator_2: Tan\n\n creator_1: Tan\n e\n 1930- Tan\n Choon Ghee\n\n creator_role: artists (visual artists)\n\n creation_date: 1964\n\n creation_place_original_location: Pulau Pinang (state) Singapore (nation)\n\n styles_periods_indexing_terms: Not indicated\n\n inscriptions: \n\n inscription_language: \n\n scale_type: Not indicated\n\n shape: rectangular\n\n materials_name: rice paper (paper) ink\n\n techniques_name: drawing (image-making) creating (artistic activity)\n\n object_colour: black (colour)\n\n physical_appearance: \\The image shows a drawing. It is rectangular in shape. The lines drawn are all black in colour. There are numerous human figures depicted in the painting. The background shows a several layers of roofed buildings. A bridge can be seen at the right hand side of the drawing. The foreground shows a sheltered area. A table can be seen and several items are placed on it. The title\n signature\n and year\n \\\\S\\u0027pore River Bank\n\n subject_terms_1: figures (representation) Tan. 1963\\\\ is stated at the bottom right hand side.\\ rivers\n\n subject_terms_2: Singapore River (Singapore) Buildings buildings (structures)\n\n subject_terms_3: Drawings Buildings Singapore River\n\n subject_terms_4: Drawing Singapore (nation)\n\n context_1: Singapore River (river)\n", "createdDate": "20201127205924", "version": 0, "latest": false, "roles": [], "metadata": { "accession_no_csv": "2018-00467", "Image": "", "object_work_type": "drawings (visual works)", "title_text": "Untitled (Singapore River Bank)", "preference": "main", "title_language": "", "creator_2": "", "creator_1": ", 1930- ", "creator_role": "artists (visual artists)", "creation_date": "1964", "creation_place_original_location": "Pulau Pinang (state) Singapore (nation)", "styles_periods_indexing_terms": "Not indicated", "inscriptions": "", "inscription_language": "", "scale_type": "Not indicated", "shape": "rectangular", "materials_name": "rice paper (paper) ink", "techniques_name": "drawing (image-making) creating (artistic activity)", "object_colour": "black (colour)", "physical_appearance": "\"The image shows a drawing. It is rectangular in shape. The lines drawn are all black in colour. There are numerous human figures depicted in the painting. The background shows a several layers of roofed buildings. A bridge can be seen at the right hand side of the drawing. The foreground shows a sheltered area. A table can be seen and several items are placed on it. The title, signature, and year, \"\"S\u0027pore River Bank", "subject_terms_1": "figures (representation) Tan. 1963\"\" is stated at the bottom right hand side.\" rivers", "subject_terms_2": "Singapore River (Singapore) Buildings buildings (structures)", "subject_terms_3": "Drawings Buildings Singapore River", "subject_terms_4": "Drawing Singapore (nation)", "context_1": "Singapore River (river)" }, "nlpDate": "20201127205924", "connectorId": 0, "tags": {} }{"base":["Device"],"code":"din","description":"The donation identification number (DIN)","experimental":true,"expression":"Device.extension('http://hl7.org/fhir/SearchParameter/device-extensions-Device-din')","id":"device-extensions-Device-din","name":"din","resourceType":"SearchParameter","status":"draft","type":"token","url":"http://hl7.org/fhir/SearchParameter/device-extensions-Device-din","version":"5.0.0-snapshot1","xpathUsage":"normal"}{"name":"curve_downLeft","subject":8,"date":"26122009-100603","paths":{"Pen":{"strokes":[{"x":53,"y":-559,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":62,"y":-570,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":59,"y":-592,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":50,"y":-607,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":39,"y":-621,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":19,"y":-630,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":-2,"y":-638,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":-31,"y":-643,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":-63,"y":-641,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":-105,"y":-636,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":-153,"y":-621,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":-210,"y":-602,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":-273,"y":-571,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":-347,"y":-537,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":-424,"y":-490,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":-507,"y":-442,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":-585,"y":-383,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":-662,"y":-322,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":-727,"y":-253,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":-785,"y":-184,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":-826,"y":-109,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":-852,"y":-35,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":-860,"y":38,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":-853,"y":107,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":-831,"y":173,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":-796,"y":234,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":-747,"y":290,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":-690,"y":336,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":-622,"y":376,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":-547,"y":408,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0},{"x":-467,"y":435,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":30,"stroke_id":0},{"x":-388,"y":454,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":31,"stroke_id":0},{"x":-310,"y":472,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":32,"stroke_id":0},{"x":-238,"y":483,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":33,"stroke_id":0},{"x":-173,"y":492,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":34,"stroke_id":0},{"x":-118,"y":500,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":35,"stroke_id":0},{"x":-75,"y":504,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":36,"stroke_id":0},{"x":-43,"y":511,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":37,"stroke_id":0},{"x":-28,"y":517,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":38,"stroke_id":0},{"x":-26,"y":525,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":39,"stroke_id":0},{"x":-37,"y":535,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":40,"stroke_id":0},{"x":-66,"y":539,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":41,"stroke_id":0},{"x":-108,"y":545,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":42,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Stylistic ST5022 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}Skuratovich/src/ITnews/Htp.ITnews/Htp.ITnews.Web/package-lock.json { "requires": true, "lockfileVersion": 1, "dependencies": { "bootstrap-star-rating": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/bootstrap-star-rating/-/bootstrap-star-rating-4.0.6.tgz", "integrity": " } } } loukwn/klouglecrawlers/crawled/1/3/4.json {"link": "http://rss.cnn.com/~r/rss/edition_technology/~3/wBGZxsDnWMU/index.html", "description": "\"Attention! Attention!\" blares the Russian voice from a loudspeaker. \"The nuclear bombs will be launched in one hour.\"\"\"/", "content": " Inside a room styled as a Soviet-era nuclear bunker, a couple of Russians race to prevent a catastrophic strike on the United States. Their quest -- the latest craze in Moscow -- is to find the nuclear launch codes and deactivate a hidden red button, which has already been pressed by a mad Russian general. It's complete fantasy; just an interactive game hosted in a building in a former industrial area of the city, harking back to the fears of the Cold War. But amid the current tensions with Russia, in which potential nuclear confrontation with the West has again been raised, it feels a little unsettling. \"I'm worried because there is very stupid information from both sides,\" said , a Russian who has just completed the Red Button Quest game. \"I know that normal people all over the world don't want any war,\" he added. A nation preparing for conflict But Russian officials have been preparing the nation for the possibility of conflict, stoking deep-seated concerns about a standoff with the West, Russia's old Cold War rival. Russian television has been broadcasting a mass training exercise, involving up to 40 million people across the country. It is designed to prepare responses, the government says, for a chemical or nuclear attack. The video shows emergency workers with protective suits and gas masks leading the civil defense rehearsal, the biggest of its kind since the collapse of the Soviet Union. It suggests the Kremlin wants Russians to take the threat of war very seriously. Of course, all-out conflict between Russia and the West remains highly unlikely. Analysts say the principle of Mutually Assured Destruction -- or MAD -- still holds as a deterrent, just as it did during the Cold War. But with tensions growing over Syria, Ukraine, and the Baltic states, analysts say a small risk of contact, misunderstanding and escalation between the nuclear superpowers has become very real. \"I don't think nuclear war is likely,\" says , editor of Russia in Global Affairs, a prominent foreign policy journal. \"But when two nuclear superpowers are operating with their military machines in the same area, very close to each other and they don't have proper coordination, any unintended thing can happen,\" he told CNN. Kremlin playing up fears It is a risk the Kremlin seems keen to play up, with state television upping its hardline rhetoric in recent weeks. In its flagship current affairs show, Russia's top state news anchor, -- dubbed the Kremlin's propagandist-in-chief by critics -- recently issued a stark warning of global war if Russian and US forces clash in Syria. \"Brutish behavior towards Russia could have nuclear dimensions,\" he declared. The Russian defense ministry has also released details of the latest intercontinental ballistic missile being added to its nuclear arsenal. The Satan 2, as it's known, will be the world's most destructive weapon, guaranteeing Russia's place as a top nuclear power. It is an apocalyptic vision that adds a further sense of realism to the fantasy quest being acted out by gamers in Moscow. \"I know that now in schools in Russia they tell the children that our main enemy is the US,\" said , another Moscow gamer. \"But it sounds ridiculous to me and I'm totally sure that war is impossible,\" she adds. Back in the fake Cold War bunker, the Russian gamers have cracked the launch codes and deactivated the missile launch. The United States, it seems, has again been saved from this virtual Russian nuclear attack. Hopefully, the real world will be spared such a confrontation too. ", "title": "Russian gamers race to prevent nuclear 'war'"}{ "id": 31244, "name": "Makes ClockingIT a bit nicer", "description": "ClockingIT wastes vast amount of screen in the filesmanagement inteface. This style removes that waste.", "user": { "id": 51724, "name": "Odalrick", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": null }, "updated": "2010-08-16T02:08:44.000Z", "weekly_install_count": 0, "total_install_count": 137, "rating": null, "after_screenshot_name": "https://userstyles.org/auto_style_screenshots/31244-after.png?r=1586332980", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": null, "license": null, "created": "2010-05-31T01:35:28.000Z", "category": "site", "raw_subcategory": "clockingit", "subcategory": "clockingit", "additional_info": null, "style_tags": [], "css": "@namespace url(http://www.w3.org/1999/xhtml);\r\n\r\n@-moz-document domain(\"distanssupport.clockingit.com\") {\r\n\r\n .file_cell center {\r\n float: left;\r\n }\r\n\r\n .file_cell div {\r\n float: right;\r\n }\r\n\r\n\r\n .file_cell_img,\r\n .file_cell_icon,\r\n .file_cell {\r\n width: auto !important;\r\n height: auto !important;\r\n }\r\n\r\n #flash_body {\r\n display: none;\r\n }\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/31244/makes-clockingit-a-bit-nicer.user.js", "style_settings": [] }{ "name": "VideoCompressor", "version": "1.0.0", "main": "index.js", "repository": "https://github.com/luisc13/VideoCompressor.git", "author": "GustavoBonfimS <>", "license": "MIT", "dependencies": {}, "devDependencies": { "eslint": "^7.19.0", "eslint-config-airbnb-base": "^14.2.1", "eslint-plugin-import": "^2.22.1" } } FrederickOberg/frederickoberg.github.io1-10 { "@metadata": { "authors": [ "Kghbln" ] }, "multimediaviewer-optout-help": "Der Medienbetrachter wird nicht mehr zur Anzeige von Bildern verwendet. Um ihn wieder zu nutzen, klicken Sie zunächst auf die Schaltfläche „{{int:multimediaviewer-view-expanded}}“ neben einem beliebigen Bild. Klicken Sie dann auf „{{int:multimediaviewer-optin-mmv}}“.", "multimediaviewer-download-attribution-cta-header": "Sie müssen den Urheber angeben", "multimediaviewer-download-attribution-cta": "Zeigen Sie mir wie" } [["897", "air philippines", "cebu", "philippines", "davao", "philippines", "0"], ["897", "air philippines", "davao", "philippines", "cebu", "philippines", "0"], ["897", "air philippines", "davao", "philippines", "manila", "philippines", "0"], ["897", "air philippines", "manila", "philippines", "davao", "philippines", "0"], ["1683", "cebu pacific", "cebu", "philippines", "davao", "philippines", "0"], ["1683", "cebu pacific", "ladag", "philippines", "davao", "philippines", "0"], ["1683", "cebu pacific", "davao", "philippines", "cebu", "philippines", "0"], ["1683", "cebu pacific", "davao", "philippines", "ladag", "philippines", "0"], ["1683", "cebu pacific", "davao", "philippines", "iloilo", "philippines", "0"], ["1683", "cebu pacific", "davao", "philippines", "manila", "philippines", "0"], ["1683", "cebu pacific", "davao", "philippines", "zamboanga", "philippines", "0"], ["1683", "cebu pacific", "iloilo", "philippines", "davao", "philippines", "0"], ["1683", "cebu pacific", "manila", "philippines", "davao", "philippines", "0"], ["1683", "cebu pacific", "zamboanga", "philippines", "davao", "philippines", "0"], ["4750", "silkair", "cebu", "philippines", "davao", "philippines", "0"], ["4750", "silkair", "davao", "philippines", "cebu", "philippines", "0"], ["4750", "silkair", "davao", "philippines", "singapore", "singapore", "0"], ["4750", "silkair", "singapore", "singapore", "davao", "philippines", "0"], ["3952", "philippine airlines", "ladag", "philippines", "davao", "philippines", "0"], ["3952", "philippine airlines", "davao", "philippines", "ladag", "philippines", "0"], ["3952", "philippine airlines", "davao", "philippines", "manila", "philippines", "0"], ["3952", "philippine airlines", "davao", "philippines", "zamboanga", "philippines", "0"], ["3952", "philippine airlines", "manila", "philippines", "davao", "philippines", "0"], ["3952", "philippine airlines", "zamboanga", "philippines", "davao", "philippines", "0"], ["9764", "zest air", "davao", "philippines", "manila", "philippines", "0"], ["9764", "zest air", "manila", "philippines", "davao", "philippines", "0"]]stockmouton/lenia.world {"name": "Lenia #4", "description": "A beautiful mathematical life-form", "external_link": "https://lenia.world", "image": "https://lenia.world/metadata/4.gif", "animation_url": "https://lenia.world/metadata/4.mp4", "attributes": [{"value": "laurel", "trait_type": "Colormap"}, {"value": "pulsium", "trait_type": "Family"}, {"value": "Kiai", "trait_type": "Ki", "numerical_value": 0.45067}, {"value": "Mental", "trait_type": "Aura", "numerical_value": 0.72961}, {"value": "Feather", "trait_type": "Weight", "numerical_value": 0.73272}, {"value": "Vibranium", "trait_type": "Robustness", "numerical_value": 0.81814}, {"value": "Kawarimi", "trait_type": "Avoidance", "numerical_value": 0.00868}, {"value": "Immovable", "trait_type": "Velocity", "numerical_value": 0.00346}, {"value": "Demie", "trait_type": "Spread", "numerical_value": 0.90163}], "config": {"kernels_params": [{"b": [1.0, 0.25], "c_in": 0, "c_out": 0, "gf_id": 0, "h": 1, "k_id": 0, "m": 0.37785670457936915, "q": 4, "r": 1, "s": 0.0653968055948577}], "world_params": {"R": 13, "T": 10, "nb_channels": 1, "nb_dims": 2, "scale": 1.0, "update_fn_version": "v1", "weighted_average": true}, "cells": "AAAAAAAAAAP\u00f3\u00c3\u00f9\u00e0\u00e2\u00c3\u00f7P\u00f3AAAAAAAAAAAAAAAAY\u00e7\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdY\u00e5AAAAAAAAAAr\u00cb\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdr\u00c8AAAAAAH\u00f7\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdH\u00f6AAAA\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00cdw\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdAAAA\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdAAAAAA\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdAAA\u00c8\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00c4\u00d7AAAAAA\u00c4\u00da\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdA\u00c7A\u00e1\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00ec\u00ecAAAAAA\u00ec\u00f0\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdA\u00e0AA\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdfFAAfN\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdAAAAkX\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdkVAAAAAA\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fdAAAAAAAAA\u00fa\u00f8r\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00f8mA\u00f9AAAAAAAAAAAAd\u00db\u00e9\u00f2\u00fd\u00fd\u00fd\u00fd\u00fd\u00fd\u00e9\u00efd\u00dbAAAAAAAAAAAAAAAAAAAAA\u00e7B\u00caA\u00e7AAAAAAAAAAAA::1;14;15"}, "tokenID": "4"}{ "title": "Messiers and Mars", "explanation": "A telescopic tour of the constellation Sagittarius offers the many bright clusters and nebulae of dimensioned space in a starscape surrounding the galactic center. This gorgeous color deep-sky photograph visits two such lovely sights, cataloged by the 18th century cosmic tourist as M8 and M20. M20 (upper left), the Trifid Nebula, presents a striking contrast in red/blue colors and dark dust lanes. Just below and to the right is the expansive, alluring red glow of M8, the Lagoon Nebula. Both nebulae are a few thousand light-years distant but at the far right, the dominant celestial beacon is a \"local\" source, the planet Mars. Just passing through Sagittarius and strongly overexposed in this picture, the Red Planet was a short 4 light-minutes away. Now headed for its closest approach to planet Earth in recorded history, Mars rises in the east southeast by midnight shining brightly at about -1.4 magnitude. Urban imager recorded this photograph at 3:00 AM on May 20th, 2001 in clear skies over Camp Hancock, Oregon, USA.", "date": "2003-06-28", "hdurl": "https://apod.nasa.gov/apod/image/0306/m8m20mars_cole_big.jpg", "service_version": "v1", "copyright": "", "media_type": "image", "url": "https://apod.nasa.gov/apod/image/0306/m8m20mars_cole.jpg" }{ "name": "octotree", "version": "1.0.0", "description": "Display GitHub code in tree format", "main": "inject.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "repository": { "type": "git", "url": "https://github.com/buunguyen/octotree" }, "author": " <> (https://github.com/buunguyen)", "license": "MIT", "bugs": { "url": "https://github.com/buunguyen/octotree/issues" }, "homepage": "https://github.com/buunguyen/octotree", "devDependencies": { "gulp": "^3.6.2", "gulp-clean": "^0.2.4", "event-stream": "^3.1.5", "gulp-run-sequence": "^0.3.2", "gulp-concat": "^2.2.0", "gulp-autoprefixer": "0.0.7", "gulp-less": "^1.2.3", "map-stream": "^0.1.0", "gulp-util": "^2.2.16", "stream-series": "^0.1.0" } } { "name": "react-hook-text-overflow", "version": "1.0.1", "description": "A React hook to determine if text is overflowing in an element showing an ellipsis", "keywords": [ "react", "hook", "overflow", "ellipsis" ], "repository": ":bence-toth/react-hook-text-overflow.git", "license": "MIT", "author": " <>", "main": "dist/index.js", "scripts": { "build": "rm -rf dist && babel src --out-dir dist", "build:watch": "babel src --out-dir dist --watch" }, "devDependencies": { "@babel/cli": "^7.12.8", "@babel/core": "^7.12.9", "@babel/preset-env": "^7.12.7", "@babel/preset-react": "^7.12.7", "babel-core": "^7.0.0-bridge", "react": "^17.0.1" }, "peerDependencies": { "react": "^16.8.0 || ^17 || ^18" }, "files": [ "/dist" ] } resources/json_data/record_3352.json {"cam/image_array":"3352_cam-image_array_.jpg","user/throttle":0.0,"user/angle":0.09370100498199463,"user/mode":"user"}{ "_from": "@yarnpkg/shell@^2.4.1", "_id": "@yarnpkg/shell@2.4.1", "_inBundle": false, "_integrity": "sha512-oNNJkH8ZI5uwu0dMkJf737yMSY1WXn9gp55DqSA5wAOhKvV5DJTXFETxkVgBQhO6Bow9tMGSpvowTMD/oAW/9g==", "_location": "/@yarnpkg/shell", "_phantomChildren": { "isexe": "2.0.0" }, "_requested": { "type": "range", "registry": true, "raw": "@yarnpkg/shell@^2.4.1", "name": "@yarnpkg/shell", "escapedName": "@yarnpkg%2fshell", "scope": "@yarnpkg", "rawSpec": "^2.4.1", "saveSpec": null, "fetchSpec": "^2.4.1" }, "_requiredBy": [ "/@yarnpkg/core" ], "_resolved": "https://registry.npmjs.org/@yarnpkg/shell/-/shell-2.4.1.tgz", "_shasum": "abc557f8924987c9c382703e897433a82780265d", "_spec": "@yarnpkg/shell@^2.4.1", "_where": "C:\\Users\\abo_a\\Documents\\GitHub\\MoonsDust\\node_modules\\@yarnpkg\\core", "bin": { "shell": "lib/cli.js" }, "bugs": { "url": "https://github.com/yarnpkg/berry/issues" }, "bundleDependencies": false, "dependencies": { "@yarnpkg/fslib": "^2.4.0", "@yarnpkg/parsers": "^2.3.0", "clipanion": "^2.6.2", "cross-spawn": "7.0.3", "fast-glob": "^3.2.2", "micromatch": "^4.0.2", "stream-buffers": "^3.0.2", "tslib": "^1.13.0" }, "deprecated": false, "description": "A JavaScript implementation of a bash-like shell (we use it in Yarn 2 to provide cross-platform scripting). This package exposes an API that abstracts both the parser and the interpreter; should you only need the parser you can check out `@yarnpkg/parsers`, but you probably won't need it.", "devDependencies": { "@types/cross-spawn": "6.0.0", "@types/micromatch": "^4.0.1", "@yarnpkg/monorepo": "0.0.0" }, "engines": { "node": ">=10.19.0" }, "files": [ "/lib/**/*" ], "homepage": "https://github.com/yarnpkg/berry#readme", "license": "BSD-2-Clause", "main": "./lib/index.js", "name": "@yarnpkg/shell", "publishConfig": { "main": "./lib/index.js", "bin": "./lib/cli.js", "typings": "./lib/index.d.ts" }, "repository": { "type": "git", "url": "git+ssh://git@github.com/yarnpkg/berry.git" }, "scripts": { "postpack": "rm -rf lib", "prepack": "run build:compile \"$(pwd)\"", "release": "yarn npm publish", "test:shell": "run test:unit packages/yarnpkg-shell" }, "typings": "./lib/index.d.ts", "version": "2.4.1" } tslint.json10-100 { "extends": ["dcl-tslint-config-standard", "tslint-plugin-prettier"], "rules": { "no-commented-out-code": false, "prettier": [true, { "printWidth": 80, "singleQuote": true, "semi": false }] }, "linterOptions": { "exclude": ["./**/*.js"] } } imbhargav5/new-website {"leaflet-routing-machine.css":"sha256-+bjQZgpGVWDuLihQuI7gf9eqEQwxB1tonRBglZY01og=","leaflet-routing-machine.js":"sha256-es9Eare3bi1hemMMx7+pAetQeNsssU7yiEEi4tXJxGY=","leaflet-routing-machine.min.css":"sha256-Xzj2YBR9gA/KpkZjOkKtILmYxuIdPlbJKXfiiv4w9d4=","leaflet-routing-machine.min.js":"sha256-h2WSDXluZDRqfw9GkCBYZP/0HrGQUACqF3Lg8eiCsd8="}applist/applist/compile_commands.json1-10 [ {"command":"/Applications/Xcode10.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang++ -x objective-c -c -I/Users/kevinbradley/Projects/theos/include -I/Users/kevinbradley/Projects/theos/vendor/include -I/Users/kevinbradley/Projects/theos/include/_fallback -include /Users/kevinbradley/Projects/theos/Prefix.pch -MT /Users/kevinbradley/Projects/AppListTV/applist/.theos/obj/appletv/arm64/ALRootListController.m.da9f307a.o -MMD -MP -MF /Users/kevinbradley/Projects/AppListTV/applist/.theos/obj/appletv/arm64/ALRootListController.m.da9f307a.Td -fcolor-diagnostics -DTARGET_APPLETV=1 -Os -Wall -ggdb -Werror -isysroot /Users/kevinbradley/Projects/theos/sdks/AppleTVOS14.2.sdk -mtvos-version-min=9.0 -fmodules -fcxx-modules -fmodule-name=AppList -fbuild-session-file=/Users/kevinbradley/Projects/AppListTV/applist/.theos/build_session -fmodules-prune-after=345600 -fmodules-prune-interval=86400 -fmodules-validate-once-per-build-session -I. -ITVSettings -I../public -F. -fobjc-arc -I../include -DTHEOS_INSTANCE_NAME=AppList -fmodules -fcxx-modules -fmodule-name=AppList -fbuild-session-file=/Users/kevinbradley/Projects/AppListTV/applist/.theos/build_session -fmodules-prune-after=345600 -fmodules-prune-interval=86400 -fmodules-validate-once-per-build-session -arch arm64 -std=c99 ALRootListController.m -o /Users/kevinbradley/Projects/AppListTV/applist/.theos/obj/appletv/arm64/ALRootListController.m.da9f307a.o", "file":"/Users/kevinbradley/Projects/AppListTV/applist/applist/ALRootListController.m","directory":"/"} ] AtilaDev/google-fonts { "kind": "webfonts#webfont", "family": "BenchNine", "category": "sans-serif", "variants": ["300", "regular", "700"], "subsets": ["latin", "latin-ext"], "version": "v8", "lastModified": "2019-07-16", "files": { "300": "http://fonts.gstatic.com/s/benchnine/v8/ahcev8612zF4jxrwMosT--tRhWa8q0v8ag.ttf", "700": "http://fonts.gstatic.com/s/benchnine/v8/ahcev8612zF4jxrwMosT6-xRhWa8q0v8ag.ttf", "regular": "http://fonts.gstatic.com/s/benchnine/v8/ahcbv8612zF4jxrwMosrV8N1jU2gog.ttf" } } bundestag/dip21-daten17/Mündliche Frage/17-43272.json { "vorgangId": "43272", "VORGANG": { "WAHLPERIODE": "17", "VORGANGSTYP": "Mündliche Frage", "TITEL": "Abzugsverlangen der afghanischen Regierung für alle NATO-Truppen bereits im Jahr 2013", "AKTUELLER_STAND": "Beantwortet", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "PLENUM": { "PLPR_KLARTEXT": "Mündliche Frage/Schriftliche Antwort", "PLPR_HERAUSGEBER": "BT", "PLPR_NUMMER": "17/167", "PLPR_SEITEN": "19853B - 19853C", "PLPR_LINK": "http://dipbt.bundestag.de:80/dip21/btp/17/17167.pdf#P.19853" }, "EU_DOK_NR": "", "SCHLAGWORT": [ { "_fundstelle": "true", "__cdata": "Afghanistan" }, "Auslandseinsatz der Bundeswehr", "NATO-Truppen", { "_fundstelle": "true", "__cdata": "Truppenreduzierung" } ], "ABSTRAKT": "Originaltext der Frage(n): \r\n \r\nWelche Konsequenzen zieht die Bundesregierung aus der Forderung des afghanischen Präsidenten vom 15. März 2012, alle NATO-Truppen sollten sich aus den Dörfern und Regionen in ihre Stützpunkte zurückziehen und bereits 2013 die ganze Sicherheitsverantwortung an afghanische Einheiten übergeben, sowie aus dem Aussetzen der Friedensgespräche durch die Taliban am gleichen Tag, weil die USA unannehmbare Vorbedingungen stellen, und warum besteht die Bundesregierung entgegen diesem erklärten Abzugsverlangen der afghanischen Regierung trotzdem auf einer Fortsetzung der Kampfeinsätze der Bundeswehr mindestens bis Ende 2014 und jedenfalls einer Kampfpräsenz über 2014?" }, "VORGANGSABLAUF": { "VORGANGSPOSITION": [ { "ZUORDNUNG": "BT", "URHEBER": "Mündliche Frage ", "FUNDSTELLE": "16.03.2012 - BT-Drucksache 17/9001, Nr. 61", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/17/090/1709001.pdf" }, { "ZUORDNUNG": "BT", "URHEBER": "Mündliche Frage/Schriftliche Antwort", "FUNDSTELLE": "21.03.2012 - BT-Plenarprotokoll 17/167, S. 19853B - 19853C", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btp/17/17167.pdf#P.19853", "PERSOENLICHER_URHEBER": [ { "VORNAME": "Hans-Christian", "NACHNAME": "Ströbele", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Frage", "SEITE": "19853B" }, { "VORNAME": "Cornelia", "NACHNAME": "Pieper", "FUNKTION": "Staatsmin.", "RESSORT": "Auswärtiges Amt", "AKTIVITAETSART": "Antwort", "SEITE": "19853B" } ] } ] } } {"artist_id":"ART6NHY1187FB5B96F","artist_latitude":null,"artist_location":"","artist_longitude":null,"artist_name":"U.S.U.R.A.","duration":315.32363,"num_songs":1,"song_id":"SODEPUV12AB0187446","title":"Open Your Mind","year":1992}paulo3011/sparkfy0 {"artist_id":"AROUUTS1187FB36EF9","artist_latitude":33.6671,"artist_location":"Orange County, CA","artist_longitude":-117.76505,"artist_name":"As Hope Dies","duration":297.74322,"num_songs":1,"song_id":"SOLQHFS12AB017B426","title":"Letters Of Our Existence","year":0}{ "id": "excel-sheet-importer", "version": "1.0.1", "meta": { "label": "Excel sheet importer", "description": "Create one dataset per sheet from an Excel file stored in a folder.", "author": "Dataiku ()", "icon": "icon-table", "licenseInfo": "Apache Software License", "url": "https://www.dataiku.com/product/plugins/excel-sheet-importer/", "tags": ["Format", "Productivity"] } } 10-100 { "version": "{{VERSION}}", "plunker": "https://plnkr.co/edit/?p=preview", "assetsUrl": "https://localhost:8090/assets" } Ryebread4/Rustionaryen/orpheus.json {"word":"orpheus","definition":"The famous mythic Thracian poet, son of the Muse Calliope, and husband of Eurydice. He is reputed to have had power to entrance beasts and inanimate objects by the music of his lyre."}bundie1990/new-websitesri/ie8/0.4.0.json {"ie8.js":"sha256-4I/2p0rW5T//ZoYV4tH3bXEk88SGgPNJFOo8XHe9cLc=","ie8.max.js":"sha256-qpYyXjDECe03giGoQWc0OCwZz7YwamcOz10Sc6bthyw="} SandraPicarra/screencasts { "title": "Introduction to Backbone.js", "slides": [ { "title": "Welcome", "name": "firstSlide" }, { "title": "Background", "name": "background" }, { "title": "Example Code", "name": "code" }, { "title": "The End", "name": "lastSlide" } ] } {"vega-core.js":","vega-core.min.js":"sha512-Zz4E/Kx/,"vega.js":","vega.min.js":"sha5}{ "author": { "id": "t2_3aham3g1", "name": "marcjammy" }, "date": { "day": 1617753600, "full": 1617811283, "month": 1617235200, "week": 1617494400 }, "id": "t3_mm5cz2", "misc": { "postHint": "hosted:video" }, "picture": { "filesize": 60847, "fullUrl": "https://external-preview.redd.it/0rPieclzt4b0Y6c2sniLPoQz5uYJBK4bcQs0Krxd79k.png?format=pjpg&auto=webp&s=fed07c3c024527cb2e6ab38f89417859352b48a1", "hash": "ca331ccc5a", "height": 360, "lqip": "data:image/png;base64,/9j/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wAARCAAJABADASIAAhEBAxEB/8QAFwAAAwEAAAAAAAAAAAAAAAAAAQQFBv/EACUQAAEDAQYHAAAAAAAAAAAAAAECAwUABBESIUFzMjRRcYGxwf/EABUBAQEAAAAAAAAAAAAAAAAAAAQF/8QAGREAAwADAAAAAAAAAAAAAAAAAAECAzFh/9oADAMBAAIRAxEAPwDLxVjjHYtlt1DS39XMWYSU6Dvlf18VIm7Ew0lBbCQkjCR9pGH4FbZoSPMs7Y9VMmGsmxFcP//Z", "thumbnailUrl": "https://b.thumbs.redditmedia.com/uUrZkgitGqFMmxloyUo2QMul-l-N0V1Q4Z9BQ53Oe9E.jpg", "url": "https://external-preview.redd.it/0rPieclzt4b0Y6c2sniLPoQz5uYJBK4bcQs0Krxd79k.png?width=640&crop=smart&format=pjpg&auto=webp&s=85ae9ae7706f5785f9de23c7f6ec6bf895730352", "width": 640 }, "score": { "comments": 1, "downs": 0, "isCurated": false, "ratio": 1, "ups": 7, "value": 7 }, "subreddit": { "id": "t5_3isai", "name": "dndmaps" }, "tags": ["Encounter"], "title": "Smuggler's Cabin (animated)", "url": "https://www.reddit.com/r/dndmaps/comments/mm5cz2/smugglers_cabin_animated/" } {"pnpjs.es5.js":","pnpjs.es5.umd.bundle.js":","pnpjs.es5.umd.bundle.min.js":","pnpjs.es5.umd.js":","pnpjs.es5.umd.min.js":","pnpjs.js":"}Duvud/RestriCovid {"ast":null,"code":"var _jsxFileName = \"/home/duvud/Dev/RestriCovid/RestriCovidPublicPortal/src/mapa/Mapa.js\";\nimport { MapContainer, TileLayer, Marker, Popup } from 'react-leaflet';\nimport { jsxDEV as _jsxDEV } from \"react/jsx-dev-runtime\";\nexport const Mapa = () => {\n return /*#__PURE__*/_jsxDEV(MapContainer, {\n center: [43.0000000, -2.7500000],\n zoom: 2,\n scrollWheelZoom: false,\n children: /*#__PURE__*/_jsxDEV(TileLayer, {\n attribution: \"\\xA9
OpenStreetMap contributors\",\n url: \"https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png\"\n }, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 6,\n columnNumber: 7\n }, this)\n }, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 5,\n columnNumber: 5\n }, this);\n};\n_c = Mapa;\nexport default Mapa;\n\nvar _c;\n\n$RefreshReg$(_c, \"Mapa\");","map":{"version":3,"sources":["/home/duvud/Dev/RestriCovid/RestriCovidPublicPortal/src/mapa/Mapa.js"],"names":["MapContainer","TileLayer","Marker","Popup","Mapa"],"mappings":";AAAA,SAAQA,YAAR,EAAsBC,SAAtB,EAAiCC,MAAjC,EAAyCC,KAAzC,QAAqD,eAArD;;AAEA,OAAO,MAAMC,IAAI,GAAG,MAAM;AACxB,sBACE,QAAC,YAAD;AAAc,IAAA,MAAM,EAAE,CAAC,UAAD,EAAa,CAAC,SAAd,CAAtB;AAAgD,IAAA,IAAI,EAAE,CAAtD;AAAyD,IAAA,eAAe,EAAE,KAA1E;AAAA,2BACE,QAAC,SAAD;AACE,MAAA,WAAW,EAAC,0EADd;AAEE,MAAA,GAAG,EAAC;AAFN;AAAA;AAAA;AAAA;AAAA;AADF;AAAA;AAAA;AAAA;AAAA,UADF;AAQD,CATM;KAAMA,I;AAWb,eAAeA,IAAf","sourcesContent":["import {MapContainer, TileLayer, Marker, Popup} from 'react-leaflet';\n\nexport const Mapa = () => {\n return (\n \n \n \n );\n}\n\nexport default Mapa;\n\n"]},"metadata":{},"sourceType":"module"}xuandung38/flarum-translationstranslations/tr/dem13n-discussion-cards.json { "dem13n": { "forum": { "replies": "Yanıtlar: {count}" }, "admin": { "settings": { "settings_error": "Önbelleği temizleyin, bu uzantıyı etkisizleştirin, Kaldır düğmesine (sağda) tıklayın, verileri silmeyi onaylayın ve uzantıyı etkinleştirin", "default_img": "Varsayılan resim", "choose_tags": "Etiketleri seç", "badges": "Rozetleri göster", "actor_info": "Yazarı göster", "small_cards": "Birincil kartların sayısı", "preview_text": "Kısa metni göster", "show_replies": "Yanıtları göster", "output_on_index_page": "Anasayfada çıktısını göster", "desktop_card_width": "Masaüstü cihazlarda kartların genişliği: %{percent}", "tablet_card_width": "Tabletlerde kartların genişliği: %{percent}" } } } } 1-10 {"word":"windflower","definition":"The anemone; -- so called because formerly supposed to open only when the wind was blowing. See Anemone."}vamsikarri/DataVirtualization {"remainingRequest":"/Users/vamsikarri/Desktop/Gathi/Angular/FederatedQuery/node_modules/@angular-devkit/build-optimizer/src/build-optimizer/webpack-loader.js??ref--3-1!/Users/vamsikarri/Desktop/Gathi/Angular/FederatedQuery/node_modules/aws-sdk/lib/api_loader.js","dependencies":[{"path":"/Users/vamsikarri/Desktop/Gathi/Angular/FederatedQuery/node_modules/aws-sdk/lib/api_loader.js","mtime":1529418306535},{"path":"/Users/vamsikarri/Desktop/Gathi/Angular/FederatedQuery/node_modules/cache-loader/dist/cjs.js","mtime":1529418306962},{"path":"/Users/vamsikarri/Desktop/Gathi/Angular/FederatedQuery/node_modules/@angular-devkit/build-optimizer/src/build-optimizer/webpack-loader.js","mtime":1529418304772}],"contextDependencies":[],"result":["function apiLoader(svc, version) {\n if (!apiLoader.services.hasOwnProperty(svc)) {\n throw new Error('InvalidService: Failed to load api for ' + svc);\n }\n return apiLoader.services[svc][version];\n}\n\n/**\n * @api private\n *\n * This member of AWS.apiLoader is private, but changing it will necessitate a\n * change to ../scripts/services-table-generator.ts\n */\napiLoader.services = {};\n\n/**\n * @api private\n */\nmodule.exports = apiLoader;\n",null]}salty/data/MELTING_POINT/491.json {"title": "Phase transition properties: Normal melting temperature", "ref": {"full": ".; .; .; . (2007) Chem. Lett. 36, 1484-1485.", "title": "Effects of Thermal History on Thermal Anomaly in Solid of Ionic Liquid Compound [C4mim][Tf2N]"}, "footer": "", "expmeth": "Adiabatic calorimetry", "solvent": null, "constr": ["Pressure of 1 atm"], "components": [{"name": "1-butyl-3-methylimidazolium bis(trifluoromethylsulfonyl)imide", "idout": "ABchaf", "sample": [["Source:", "commercial source"], ["Initial purification:", "fraction melting in an adiabatic calorimeter"], ["Initial purity:", "99.7 mol %"], ["Final purification:", "Karl Fischer titration"], ["Final purity:", "0.011 water mass %"]], "formula": "C10H15F6N3O4S2", "mw": "419.36"}], "data": [[["270.35", "0.05"]]], "phases": ["Crystal", "Liquid"], "dhead": [["Normal melting temperature, K"]]}bobbucks/manifest.json { "default_applications": ["app.json"], "supported_origins": ["https://maxlgu.github.io", "https://bobbucks.dev"] } roguh/express-swagger-generator-typespackage.json { "bugs": { "url": "https://github.com/roguh/express-swagger-generator-types/issues" }, "contributors": [ { "name": "", "url": "https://roguh.com" } ], "dependencies": { }, "deprecated": false, "description": "TypeScript definitions for express-swagger-generator", "homepage": "https://github.com/roguh/express-swagger-generator-types", "license": "MIT", "main": "", "name": "express-swagger-generator-types", "repository": { "type": "git", "url": "git+https://github.com/roguh/express-swagger-generator-types.git", "directory": "./" }, "scripts": {}, "types": "index.d.ts", "version": "1.1.17" } { "name": "@finos/perspective-jupyterlab", "version": "1.3.6", "description": "A Jupyterlab extension for the Perspective library, designed to be used with perspective-python.", "files": [ "dist/**/*", "src/**/*" ], "main": "dist/umd/perspective-jupyterlab.js", "style": "dist/umd/perspective-jupyterlab.css", "directories": { "dist": "dist/" }, "license": "Apache-2.0", "publishConfig": { "access": "public" }, "scripts": { "bench": "npm-run-all bench:build bench:run", "bench:build": "echo \"No Benchmarks\"", "bench:run": "echo \"No Benchmarks\"", "clean:screenshots": "rimraf \"screenshots/**/*.@(failed|diff).png\"", "test:build": "cpy \"test/html/*\" dist/umd && cpy \"test/csv/*\" dist/umd && cpy \"test/css/*\" dist/umd && node build.js --test", "test:run": "jest --rootDir=. --config=test/config/jest.config.js --color --verbose", "test:jupyter:build": "cpy \"test/html/*\" dist/umd && cpy \"test/arrow/*\" dist/umd && cpy \"test/css/*\" dist/umd", "test:jupyter:run": "__JUPYTERLAB_PORT__=6538 jest --rootDir=. --config=test/config/jupyter/jest.config.js --color --verbose", "test": "npm-run-all test:build test:run", "test:jupyter": "npm-run-all test:jupyter:build test:jupyter:run", "build": "node build.js", "clean": "rimraf dist", "version": "yarn build" }, "dependencies": { "@finos/perspective": "^1.3.6", "@finos/perspective-viewer": "^1.3.6", "@finos/perspective-viewer-d3fc": "^1.3.6", "@finos/perspective-viewer-datagrid": "^1.3.6", "@jupyter-widgets/base": "^3.0.0 || ^4.0.0", "@jupyterlab/application": "^3.0.0", "@lumino/application": "^1.7.3", "@lumino/widgets": "^1.9.3" }, "devDependencies": { "@finos/perspective-build": "^1.3.6", "@finos/perspective-test": "^1.3.6", "@jupyter-widgets/base-manager": "^1.0.0-alpha.0" }, "jupyterlab": { "extension": true } } { "table": "./packages/table/index.js", "table-column": "./packages/table-column/index.js", "tooltip": "./packages/tooltip/index.js", "message-box": "./packages/message-box/index.js", "notification": "./packages/notification/index.js", "loading": "./packages/loading/index.js", "scrollbar": "./packages/scrollbar/index.js", "message": "./packages/message/index.js", "color-picker": "./packages/color-picker/index.js", "input": "./packages/input/index.js", "autocomplete": "./packages/autocomplete/index.js" } {"date":20200422,"state":"MA","positive":42944,"probableCases":null,"negative":137518,"pending":null,"totalTestResultsSource":"totalTestsViral","totalTestResults":247471,"hospitalizedCurrently":3890,"hospitalizedCumulative":4256,"inIcuCurrently":1050,"inIcuCumulative":null,"onVentilatorCurrently":null,"onVentilatorCumulative":null,"recovered":null,"lastUpdateEt":"4/22/2020 16:00","dateModified":"2020-04-22T16:00:00Z","checkTimeEt":"04/22 12:00","death":2597,"hospitalized":4256,"hospitalizedDischarged":null,"dateChecked":"2020-04-22T16:00:00Z","totalTestsViral":247471,"positiveTestsViral":57491,"negativeTestsViral":null,"positiveCasesViral":42944,"deathConfirmed":null,"deathProbable":null,"totalTestEncountersViral":null,"totalTestsPeopleViral":null,"totalTestsAntibody":null,"positiveTestsAntibody":null,"negativeTestsAntibody":null,"totalTestsPeopleAntibody":null,"positiveTestsPeopleAntibody":null,"negativeTestsPeopleAntibody":null,"totalTestsPeopleAntigen":null,"positiveTestsPeopleAntigen":null,"totalTestsAntigen":null,"positiveTestsAntigen":null,"fips":"25","positiveIncrease":1745,"negativeIncrease":3345,"total":180462,"totalTestResultsIncrease":15397,"posNeg":180462,"dataQualityGrade":null,"deathIncrease":148,"hospitalizedIncrease":247,"hash":"b34166acb44dfdd824f53168fd285d407cac3a4c","commercialScore":0,"negativeRegularScore":0,"negativeScore":0,"positiveScore":0,"score":0,"grade":""} {"name":"left","subject":1008,"date":"11122009-112517","paths":{"Pen":{"strokes":[{"x":960,"y":92,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":949,"y":87,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":949,"y":87,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":934,"y":85,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":915,"y":77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":902,"y":81,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":876,"y":75,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":856,"y":77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":828,"y":74,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":800,"y":76,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":765,"y":75,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":726,"y":76,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":673,"y":75,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":616,"y":77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":543,"y":78,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":463,"y":82,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":370,"y":83,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":274,"y":89,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":168,"y":91,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":61,"y":97,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":-50,"y":100,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":-157,"y":107,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":-262,"y":110,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":-356,"y":115,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":-447,"y":117,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":-524,"y":121,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":-592,"y":125,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":-646,"y":125,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":-685,"y":129,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":-715,"y":126,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0},{"x":-715,"y":126,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":30,"stroke_id":0},{"x":-715,"y":126,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":31,"stroke_id":0},{"x":-715,"y":126,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":32,"stroke_id":0},{"x":-685,"y":109,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":33,"stroke_id":0},{"x":-658,"y":93,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":34,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Lenovo X61 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}JefferyLukas/SRIsconstraintjs/0.9.4.json {"cjs.js":","cjs.min.js":"}{ "id": 8734, "title": [ "[West Bank, Riverside]" ], "description": [ "A slight curve in the riverbank forms a narrow sand beach, bordered by a neatly mowed grassy area. Several houses are situated among the trees that circle the parklike area, and a wooden picnic table and benches stand near the strip of sand." ], "paths": [ "Obvious paths: up" ], "location": "Wehnimer's Landing", "wayto": { "8733": "up" }, "timeto": { "8733": 0.2 }, "image": "wl-homes-1-1264234799.png", "image_coords": [ 981, 730, 1001, 750 ] }{ "address": "1BLogC9LN4oPDcruNz3qo1ysa133E9AGg8", "cert_auth_type": "web", "cert_sign": " "cert_user_id": "", "files": { "data.json": { "sha512": "f7d712b279eefa5f37f1ce4b9c6e93e2aead8e3a229092a94375ccf0352905b3", "size": 212 } }, "inner_path": "data/users/1DrBSBBxG8eqYsxXSvGt9kc5HE/content.json", "modified": 1494188054, "signs": { "": " } }invitame-un-cafe/invitame-un-cafe.github.io { "meta": { "username": "invitame-un-cafe", "repositoryName": "", "branch": "master" }, "theme": { "name": "Casper" } } { "name": "spacing_app", "version": "0.0.1", "description": "This app computes available seating in a cinema. Depending on the occupancy", "main": "main.js", "directories": { "doc": "docs" }, "dependencies": { "axios": "^0.20.0", "electron-db": "^0.15.7", "jwt-decode": "^2.2.0", "keytar": "^6.0.1" }, "devDependencies": { "electron": "^10.1.0" }, "scripts": { "test": "echo \"Error: no test specified\" && exit 1", "start": "electron ." }, "repository": { "type": "git", "url": "git+https://github.com/lemontyc/mtyhack2020_aladix.git" }, "keywords": [ "cinema", "spacing", "calculator" ], "author": ". - - ", "license": "SEE LICENSE IN LICENSE", "bugs": { "url": "https://github.com/lemontyc/mtyhack2020_aladix/issues" }, "homepage": "https://github.com/lemontyc/mtyhack2020_aladix#readme" } {"body": "Hi Randolph - I updated the ambient.c file in /usr/local/ray/src/rt/\nand re-ran rmake in that same directory.\n\n\nThe previous ambient.c got renamed to ambient.c- and affected rpict\nrtrace and rview binaries got replaced with the previous ones in\n/usr/local/ray/bin/previous\n\n\nperhaps you could test...\n\n\n-Dan\n\n\nOn Thu, Oct 14, 2010 at 11:46 AM, wrote:\n___\nAutomatically generated content from [radiance mailing-list](https://radiance-online.org/pipermail/radiance-general/2010-October/007377.html).", "attachments": [], "created_by_name": "", "created_at": "October 14, 2010 at 01:11PM", "created_by": "Daniel_Fuller", "parent_id": "radiance-general_007376", "id": "radiance-general_007377"}version https://git-lfs.github.com/spec/v1 oid sha256:7b2738133111ddf11000a27b832683fb27ecd4ba75e45f09b53f3ecbefce623f size 449 package.json { "name": "hubot-laclasse", "version": "0.0.4", "description": "Paste quotes from La Classe Américaine", "main": "./lib/index.coffee", "author": "", "license": "BSD", "repository": { "type": "git", "url": "https://github.com/francois2metz/hubot-laclasse.git" }, "dependencies": { "jsdom": "~3.1.x" } } data/parl/oop.com_SDC_3201_9_3.json0 { "type": "committees", "node": [["fra", "s", "committees", "32-1", 25]], "pubmax": "1983-11-30", "chamber": ["s"], "pubmin": "1980-04-14", "label": "Comités du Sénat, 32e Législature, 1re Session : Sous-comité de la défense nationale, Premier rapport", "callNumber": "1980/83 N28 A12", "language": ["fra"], "reportTitle": ["Les effectifs des Forces armées canadiennes"], "session": ["32-1"] } { "name": "react-querystring-router", "version": "1.0.0-beta.0", "description": "Bare router for React components, using props as query string", "repository": "https://github.com/skidding/react-cosmos/tree/master/packages/react-querystring-router", "license": "MIT", "main": "lib/index.js", "dependencies": { "lodash": "^3.10.1", "react-dom-polyfill": "^1.0.0-beta.0" }, "peerDependencies": { "react": ">=0.12 <16" }, "optionalDependencies": { "react-dom": "<16" } } { "name": "localmodel", "version": "0.6.0", "homepage": "https://github.com/RickCraig/localmodel", "authors": [ " <>" ], "description": "A library to allow the utilization of localStorage and sessionStorage using models (inspired by mongoose/mongodb)", "main": "dist/localmodel.min.js", "moduleType": [ "globals" ], "keywords": [ "localstorage", "storage", "localmodel", "sessionstorage", "sessions", "models", "model", "mongoose", "electron" ], "license": "MIT", "ignore": [ "**/.*", "node_modules", "bower_components", "test", "tests" ] } { "id": "17011", "title": "port", "description": "The listening port of the transaction that generated the event. This information will not always be available, so this attribute may not always be reported.", "eventTypes": [ "TransactionError" ], "relatedInfo": "

Related Info

For more information about APM data and events, see APM default events in Insights.

\r\n" }data/reviews/682/6827076.json { "description": "exp/types: avoid init race in check_test.go.\n\nThere was an init race between\n\tcheck_test.go:init\n universe.go:def\n use of Universe\nand\n\tuniverse.go:init\n creation of Universe\n\nThe order in which init funcs are executed in a package is unspecified.\nThe test is not currently broken in the golang.org environment\nbecause the go tool compiles the test with non-test sources before test sources,\nbut other environments may, say, sort the source files before compiling,\nand thus trigger this race, causing a nil pointer panic.", "cc": [ "", "" ], "reviewers": [], "messages": [ { "sender": "", "recipients": [ "", "", "", "" ], "text": "Hello gri (cc: ),\n\nI'd like you to review this change to\nhttps://code.google.com/p/go", "disapproval": false, "date": "2012-11-12 01:26:04.085490", "approval": false }, { "sender": "", "recipients": [ "", "", "", "" ], "text": "LGTM.\n\nThanks for catching this.\n- gri", "disapproval": false, "date": "2012-11-12 18:02:56.195960", "approval": true }, { "sender": "", "recipients": [ "", "", "", "" ], "text": "*** Submitted as http://code.google.com/p/go/source/detail?r=919e2645d301 ***\n\nexp/types: avoid init race in check_test.go.\n\nThere was an init race between\n\tcheck_test.go:init\n universe.go:def\n use of Universe\nand\n\tuniverse.go:init\n creation of Universe\n\nThe order in which init funcs are executed in a package is unspecified.\nThe test is not currently broken in the golang.org environment\nbecause the go tool compiles the test with non-test sources before test sources,\nbut other environments may, say, sort the source files before compiling,\nand thus trigger this race, causing a nil pointer panic.\n\nR=gri\nCC=golang-dev\nhttp://codereview.appspot.com/6827076", "disapproval": false, "date": "2012-11-12 22:08:42.375000", "approval": false } ], "owner_email": "", "private": false, "base_url": "", "owner": "dsymonds", "subject": "code review 6827076: exp/types: avoid init race in check_test.go.", "created": "2012-11-12 01:23:32.403020", "patchsets": [ 1, 2001, 3002, 7001 ], "modified": "2012-11-12 22:08:44.553640", "closed": true, "issue": 6827076 }src/main/resources/static/mas_json/2018_sp_-9212983101457711817.json {"title": "On the Economics of Offline Password Cracking.", "fields": ["authentication server", "pbkdf2", "cryptographic hash function", "password cracking", "scrypt"], "abstract": "We develop an economic model of an offline password cracker which allows us to make quantitative predictions about the fraction of accounts that a rational password attacker would crack in the event of an authentication server breach. We apply our economic model to analyze recent massive password breaches at Yahoo!, Dropbox, LastPass and AshleyMadison. All four organizations were using key-stretching to protect user passwords. In fact, LastPass' use of PBKDF2-SHA256 with 10^5 hash iterations exceeds 2017 NIST minimum recommendation by an order of magnitude. Nevertheless, our analysis paints a bleak picture: the adopted key-stretching levels provide insufficient protection for user passwords. In particular, we present strong evidence that most user passwords follow a Zipf's law distribution, and characterize the behavior of a rational attacker when user passwords are selected from a Zipf's law distribution. We show that there is a finite threshold which depends on the Zipf's law parameters that characterizes the behavior of a rational attacker \u2014 if the value of a cracked password (normalized by the cost of computing the password hash function) exceeds this threshold then the adversary's optimal strategy is always to continue attacking until each user password has been cracked. In all cases (Yahoo!, Dropbox, LastPass and AshleyMadison) we find that the value of a cracked password almost certainly exceeds this threshold meaning that a rational attacker would crack all passwords that are selected from the Zipf's law distribution (i.e., most user passwords). This prediction holds even if we incorporate an aggressive model of diminishing returns for the attacker (e.g., the total value of 500 million cracked passwords is less than 100 times the total value of 5 million passwords). On a positive note our analysis demonstrates that memory hard functions (MHFs) such as SCRYPT or Argon2i can significantly reduce the damage of an offline attack. In particular, we find that because MHFs substantially increase guessing costs a rational attacker will give up well before he cracks most user passwords and this prediction holds even if the attacker does not encounter diminishing returns for additional cracked passwords. Based on our analysis we advocate that password hashing standards should be updated to require the use of memory hard functions for password hashing and disallow the use of non-memory hard functions such as BCRYPT or PBKDF2.", "citation": "Citations (1)", "departments": ["Purdue University", "Purdue University", "Purdue University"], "authors": [".....http://dblp.org/pers/hd/b/Blocki:Jeremiah", ".....http://dblp.org/pers/hd/h/Harsha:Benjamin", ".....http://dblp.org/pers/hd/z/Zhou:Samson"], "conf": "sp", "year": "2018", "pages": 19}{ "name": "mock-pl", "version": "1.0.0", "description": "API that serves the latest fixture scores of a Mock Premier League", "main": "server/index.js", "scripts": { "start:dev": "set DEBUG=dev && cross-env NODE_ENV=development babel-watch ./server/start.js", "start": "set DEBUG=dev && node build/start.js", "build": "babel server --out-dir build", "seed": "babel-node ./server/src/schema/seed", "unseed": "babel-node ./server/src/schema/unseed", "heroku:seed": "node ./build/src/schema/seed", "heroku:unseed": "node ./build/src/schema/unseed", "test": "cross-env NODE_ENV=test && jest --timeOut=10000 --forceExit --detectOpenHandles --maxWorkers=1" }, "repository": { "type": "git", "url": "git+https://github.com/Baystef/mock-premier-league.git" }, "keywords": [ "express", "node", "MongoDB", "Redis" ], "author": "", "license": "ISC", "bugs": { "url": "https://github.com/Baystef/mock-premier-league/issues" }, "homepage": "https://github.com/Baystef/mock-premier-league#readme", "dependencies": { "@babel/runtime": "^7.6.0", "bcrypt": "^3.0.6", "connect-redis": "^4.0.2", "cross-env": "^5.2.0", "debug": "^4.1.1", "dotenv": "^8.0.0", "express": "^4.17.1", "express-session": "^1.16.2", "express-validator": "^6.1.1", "jest": "^24.9.0", "jsonwebtoken": "^8.5.1", "moment": "^2.24.0", "mongodb": "^3.3.2", "mongodb-memory-server": "^5.2.5", "mongoose": "^5.6.9", "morgan": "^1.9.1", "rate-limiter-flexible": "^1.1.2", "redis": "^2.8.0" }, "devDependencies": { "@babel/cli": "^7.5.5", "@babel/core": "^7.5.5", "@babel/node": "^7.5.5", "@babel/plugin-transform-runtime": "^7.6.0", "@babel/preset-env": "^7.5.5", "babel-watch": "^7.0.0", "eslint": "^6.1.0", "eslint-config-airbnb-base": "^14.0.0", "eslint-plugin-import": "^2.18.2", "supertest": "^4.0.2" } } { "name": "vue-bulma-progress", "version": "0.0.0", "description": "Native HTML progress bars", "keywords": [ "vue", "bulma", "progress" ], "author": " <>", "homepage": "https://github.com/vue-bulma/vue-bulma/packages/vue-bulma-progress#readme", "license": "MIT", "main": "src/main.js", "files": [ "src" ], "dependencies": { "bulma": "^0.7.2" } } 0 { "contract": "0x80406630da2eca06e293467f2b757049823cd361", "tool": "mythril", "start": 1563641955.6038513, "end": 1563641966.8317187, "duration": 11.227867364883423, "analysis": { "error": null, "issues": [ { "address": 253, "code": " %(/************/", "debug": "storage_2: 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\norigin: 0x0\ncaller: 0x0\ncalldata_Roulette_0: 0xb00000000000000000000000000000000000000000000000000000000\ncalldatasize_Roulette: 0x4\ncallvalue: 0xde0b6b3a7640000\n", "description": "A possible integer overflow exists in the function `fallback`.\nThe addition or multiplication may result in a value higher than the maximum representable integer.", "filename": "/unique_chucks/27/0x80406630da2eca06e293467f2b757049823cd361.sol", "function": "fallback", "lineno": 2, "title": "Integer Overflow ", "type": "Warning" }, { "address": 769, "debug": "", "description": "Function fallback retrieves the transaction origin (tx.origin) using the ORIGIN opcode. Use msg.sender instead.\nSee also: https://solidity.readthedocs.io/en/develop/security-considerations.html#tx-origin", "filename": "/unique_chucks/27/0x80406630da2eca06e293467f2b757049823cd361.sol", "function": "fallback", "lineno": 134, "title": "Use of tx.origin", "type": "Warning" }, { "address": 1590, "code": "\n feeAddr = msg.s", "debug": "The exception is triggered under the following conditions:\n\ncalldata_Roulette_4: 0x0\nstorage_2: 0x0\ncalldata_Roulette_0: 0xf71d96cb00000000000000000000000000000000000000000000000000000000\ncalldatasize_Roulette: 0x4\ncallvalue: 0x0\n", "description": "A reachable exception (opcode 0xfe) has been detected. This can be caused by type errors, division by zero, out-of-bounds array access, or assert violations. This is acceptable in most situations. Note however that `assert()` should only be used to check invariants. Use `require()` for regular input checking. ", "filename": "/unique_chucks/27/0x80406630da2eca06e293467f2b757049823cd361.sol", "function": "_function_0xf71d96cb", "lineno": 61, "title": "Exception state", "type": "Informational" } ], "success": true } }{"BizarreSeasons": ["SeasonalBaggage", "ItsAlwaysSpring", "HeatWave", "RainRainGoAway", "SnowedIn", "BizarroEpisode", "HostileWeather", "AStormIsComing", "HilarityEnsues", "TheGreatFlood", "ThirstyDesert", "EverythingTryingToKillYou", "OvernightAgeUp", "BodyHorror", "MasterRace", "SugarBowl", "TimePassesMontage", "RunningGag", "AncientEgypt", "OnlyInFlorida", "MyFriendsAndZoidberg", "HollywoodCalifornia", "UpToEleven", "DrivesLikeCrazy", "MemeticMutation", "DepartmentOfRedundancyDepartment", "OnlyInFlorida", "MyFriendsAndZoidberg", "HollywoodCalifornia", "UpToEleven", "DrivesLikeCrazy", "UpToEleven", "DrivesLikeCrazy"]}zeke/ch-ch-ch-changes1-10 { "name": "electron-channel", "description": "A dead simple ipc wrapper for electron", "version": "0.0.1", "keywords": [ "electron", "ipc", "channel", "render", "events" ], "author": { "name": "", "email": "" }, "main": "./index.js", "repository": { "user": "jmjuanes", "repo": "electron-channel", "host": "github.com", "branch": "master", "apiHost": "api.github.com", "tarball_url": "https://api.github.com/repos/jmjuanes/electron-channel/tarball/master", "clone_url": "https://github.com/jmjuanes/electron-channel", "https_url": "https://github.com/jmjuanes/electron-channel", "travis_url": "https://travis-ci.org/jmjuanes/electron-channel", "zip_url": "https://github.com/jmjuanes/electron-channel/archive/master.zip", "api_url": "https://api.github.com/repos/jmjuanes/electron-channel" }, "dependencies": {}, "license": "MIT", "gitHead": "157b10b69b067e126469d9591b17cf85d4b1d2f2", "homepage": "https://github.com/jmjuanes/electron-channel#readme", "_npmVersion": "5.4.2", "_nodeVersion": "6.9.5", "dist": { "integrity": " "shasum": "030ea8e2e394b1ab380fce2c7a95724af9b27033", "tarball": "https://registry.npmjs.org/electron-channel/-/electron-channel-0.0.1.tgz" }, "versions": [ { "number": "0.0.1", "date": "2017-09-27T14:36:03.845Z" } ], "created": "2017-09-27T14:36:03.845Z", "modified": "2017-09-27T14:36:03.845Z", "lastPublisher": { "name": "jmjuanes", "email": "" }, "owners": [ { "name": "jmjuanes", "email": "" } ] }{ "artemisApp": { "post": { "created": "Post erfolgreich erstellt", "updated": "Post erfolgreich aktualisiert", "deleted": "Post erfolgreich gelöscht", "text": "Post", "creationDate": "Gestellt am", "votes": "Votes", "answers": "Antworten", "approvedAnswerPosts": "Bestätigte Antworten", "exerciseOrLecture": "Aufgabe / Vorlesung", "overview": { "title": "Posts", "noPosts": "Es gibt keine Posts in diesem Kurs", "showApproved": "Zeige Posts mit bestätigten Antworten an", "hideApproved": "Verstecke Posts mit bestätigten Antworten" } }, "answerPost": { "created": "Antwort erfolgreich erstellt", "updated": "Antwort erfolgreich aktualisiert", "deleted": "Antwort erfolgreich gelöscht" } } } { "schema_version": "1.2.0", "id": "GHSA-7vfp-rfvf-q9mj", "modified": "2022-04-23T00:40:49Z", "published": "2022-04-23T00:40:49Z", "aliases": [ "CVE-2012-3336" ], "details": "IBM InfoSphere Guardium 8.0, 8.01, and 8.2 is vulnerable to SQL injection. A remote authenticated attacker could send specially-crafted SQL statements to multiple scripts, which could allow the attacker to view, add, modify or delete information in the back-end database. IBM X-Force ID: 78282.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2012-3336" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/78282" }, { "type": "WEB", "url": "http://www-01.ibm.com/support/docview.wss?uid=swg21611130" } ], "database_specific": { "cwe_ids": [ ], "severity": "MODERATE", "github_reviewed": false } }{ "itemDisplayName": "Web App with MySQL", "description": "This template creates an Azure Web App with MySQL", "githubUsername": "Microsoft", "summary": "Deploys an Azure Web App with a MySQL instance as an environment in a DevTest Lab." }{"react-bootstrap.js":","react-bootstrap.min.js":"}{"relation": [["Title", "Dark Dreams", "Dark Fire (Fire Series, Book 5)", "The Dark Horse", "The Dark Horse", "Dark Horse (Starlight Animal Rescue #4)", "Dark Lady's Chosen (Book Four of the Chronicles of the Necromancer)", "Dark Lover", "Dark of Night", "Dark of the Moon: A Dark Guardian Novel", "Dark Places", "The Dark Planet (Atherton #3)", "Dark Resurrection (Deathlands #85, Empire of Xibalba #2)", "Dark Road Rising", "Dark Slayer (Carpathian Novel #20)", "Dark Spirits: 200 Classy Concoctions Starring Bourbon, Brandy, Scotch, Whiskey, Rum and More", "Dark Stranger (Primes #10)", "Dark Time (Mortal Path #1)", "Dark Victory", "The Dark Vineyard (Bruno, Chief of Police #2)", "Dark Visions", "Darker Angels", "A Darker Domain", "A Darker Place (Sean Dillon #16)", "Darkest Hour (Age of Misrule #2)", "The Darkest Room"], ["Author", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""], ["Copyright", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009", "2009"], ["Date Added", "04/05/14", "11/22/10", "02/26/15", "04/23/14", "10/25/09", "04/29/11", "09/18/12", "02/02/12", "10/25/09", "07/16/15", "06/09/09", "06/01/11", "04/23/14", "04/23/14", "07/20/14", "04/29/12", "07/03/11", "09/18/12", "07/17/13", "03/05/13", "11/14/10", "07/05/11", "11/10/14", "04/10/13", "04/18/11"], ["Action", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""]], "pageTitle": "Popular Books - Bookshare - Accessible Books for Individuals with Print Disabilities", "title": "", "url": "https://www.bookshare.org/browse/popular?resultsView=TABLE&direction=ASC&sortOrder=COPYRIGHT_DATE&offset=77375", "hasHeader": true, "headerPosition": "FIRST_ROW", "tableType": "RELATION", "tableNum": 0, "s3Link": "common-crawl/crawl-data/CC-MAIN-2015-32/segments/1438042988061.16/warc/CC-MAIN-20150728002308-00265-ip-10-236-191-2.ec2.internal.warc.gz", "recordEndOffset": 894188748, "recordOffset": 894172354, "tableOrientation": "HORIZONTAL", "textBeforeTable": "2550100 Results Per Page: All ArabicEnglishFrenchGermanHindiPolishSpanishTurkish Language: By TitleBy AuthorBy Copyright DateBy Date Added Sort Order: Go to last page\u2026 \u00a0 3,097 3,096 3,095 Go to first page\u2026 \u00a0 Page: Showing 77,376 through 77,400 of 123,828 results Popular Books Browse List View Table View Browse Results You are using an outdated browser.", "textAfterTable": "Showing 77,376 through 77,400 of 123,828 results \u00a0 Page: Go to first page\u2026 3,095 3,096 3,097 Go to last page\u2026 \u00a0 Help Select your format based upon: 1) how you want to read your book, and 2) compatibility with your reading tool. To learn more about using Bookshare with your device, visit the \"Using Bookshare\" page in the Help Center. Here is an overview of the specialized formats that Bookshare offers its members with links that go to the Help Center for more information. Bookshare Web Reader - a customized reading tool for Bookshare members offering all the features of DAISY with a single click of the \"Read Now\" link. DAISY (Digital Accessible Information System) - a digital book file format. DAISY books from Bookshare are DAISY 3.0 text files that work with just about every type of access technology that reads text. Books that contain images will have the download option of \u2018DAISY Text with Images\u2019. BRF (Braille Refreshable Format) - digital Braille for use with refreshable Braille devices and Braille embossers. MP3 (Mpeg audio layer 3) - Provides audio only with no text. These books are created with a text-to-speech engine and", "hasKeyColumn": true, "keyColumnIndex": 0, "headerRowIndex": 0}{"web":[{"value":["预报员","预测者","预报"],"key":"forecaster"},{"value":["公共事业股票展望"],"key":"Utility Forecaster"},{"value":["经济预测员"],"key":"Economic Forecaster"}],"query":"forecaster","translation":["预报员"],"errorCode":"0","dict":{"url":"yddict://m.youdao.com/dict?le=eng&q=forecaster"},"webdict":{"url":"http://m.youdao.com/dict?le=eng&q=forecaster"},"basic":{"us-phonetic":"'fɔrkæstɚ","phonetic":"'fɔːkɑːstə","uk-phonetic":"'fɔːkɑːstə","explains":["n. 预报员","n. (Forecaster)人名;(英)福卡斯特"]},"l":"EN2zh-CHS"} data/www/149b8se3bji3zvy33.json {"title":"ワカメちゃん化江風でKARA - MISTER","author":"帰ってきたtakesiman","description":"転載不可
Unreprintable
不可转载","thumb":"//i.iwara.tv/sites/default/files/styles/thumbnail/public/videos/thumbnails/1333026/thumbnail-1333026_0001.jpg?itok=lRCmUGgh","download":"https://www.iwara.tv/api/video/149b8se3bji3zvy33","origin":"https://www.iwara.tv/videos/149b8se3bji3zvy33"}{"URL": "https://www.wired.com/1999/11/amphibious-assault", "heading": "amphibious assault", "subheading": "hardware my first thought was that my pal mario had bought some kind of cute tank, but then he introduced me to his argo, the amphibious all-terrain vehicle he\u2019d picked up to plow the snow, patrol the pond, and haul lumber. riding mario\u2019s argo is gorge-rattling magic. because it uses skid steering, the argo corners [\u2026]", "author": "", "category": "not found", "type": "article", "timestamp": "11.01.1999 12:00 PM", "text": "hardwaremy first thought was that my pal mario had bought some kind of cute tank, but then he introduced me to his argo, the amphibious all-terrain vehicle he'd picked up to plow the snow, patrol the pond, and haul lumber.riding mario's argo is gorge-rattling magic. because it uses skid steering, the argo corners with superb precision, squeezing between trees like a fleet-footed predator. we barreled down a hillside and splashed into the river - it was running dry, and we had to grind over wide stretches of slippery, uneven rock for half a mile before we found water deep enough to swim in. as the channels in the argo's soft goodyear runamucks churned forward, scaring the trout, i felt like i was in a speedboat, but we left no greasy rainbows in our wake. unlike a two-stroke outboard boat engine, this thing is clean.mario loves his argo, but he's not thrilled about the aftermarket support. all the local argo dealers do most of their business selling atv trikes and snowmobiles and aren't much for tuning up his beast, which has a tendency to stall out and backfire.leaping back onto the road, we got some nice hang time, heightened since the argo, because of its vehicle class, doesn't require seat belts. mario yanked open the throttle, letting the thing crank at 22 mph. that's crawling when you're in a car, but on an atv, it feels like you're hauling ass.argo 6 x 6 vanguard: $6,000. ontario drive & gear: (800) 561 9508, street credhedgehog blissgone postalget back to workthe law of the codespeakers, medium raremonster rumbleemail made easyreadmemusicdrag and dropmad as hellvan vs. peaamphibious assaultjust outta betatunes from the cryptrediscovered rantscontributors"}{ "url": "http://forum.nette.org/cs/8844-nette-database-multi-insert", "archived_snapshots": {}, "timestamp": "20220316" }{"id": 5244, "name": "Snake", "qualified_name": "Snake (Ape Atoll)", "examine": "It's a camouflaged jungle snake.", "members": true, "release_date": "2004-12-06", "attributes": [], "size": 1, "aggressive": true, "poisonous": true, "venomous": false, "immune_poison": false, "immune_venom": false, "max_hit": 9, "attack_types": ["crush"], "attack_speed": 4, "wiki_url": "https://oldschool.runescape.wiki/w/Snake_(Ape_Atoll)", "levels": {"combat": 24, "hitpoints": 36, "attack": 15, "strength": 25, "defence": 10, "magic": 10, "ranged": 1}, "bonuses": {"attack": {"melee": 100, "ranged": 0, "magic": 0}, "defence": {"stab": 0, "slash": 0, "crush": 0, "magic": 0, "ranged": 0}, "strength": {"melee": 100, "ranged": 0, "magic": 0}}, "drops": [{"id": 526, "members": false, "quantity": {"__typename": "ItemDropQuantityScalar", "quantity": 1}, "noted": false, "rarity": 1.0, "rolls": 1}, {"id": 7860, "members": true, "quantity": {"__typename": "ItemDropQuantityScalar", "quantity": 1}, "noted": false, "rarity": 0.25, "rolls": 1}]}{"id": 4998, "url": "http://proxy-pubminefi.diffusion.finances.gouv.fr/pub/document/18/15685.pdf", "author": "", "title": ", et - Les douaniers du Tunnel sous la Manche saisissent 65 kg d?héroïne", "text": " \n \n \nMINISTRE DE L’ECONOMIE ET \nDES FINANCES \nN \nMINISTRE DU COMMERCE \nEXTERIEUR \nBERNARD CAZENEUVE \nMINISTRE DELEGUE CHARGE DU \nBUDGET \nC o m m u n i q u é d e p r e s s e \nC o m m u n i q u é d e p r e s s e \nwww.economie.gouv.fr \n \n \n \n www.commerce-extérieur.gouv.fr \nParis, le 3 septembre 2013 \nN° 778/352 \n \n \nLes douaniers du Tunnel sous la Manche saisissent 65 kg d’héroïne \n \nLes douaniers du Tunnel sous la Manche ont saisi le 27 août près de 65 kg d’héroïne dans \nun poids lourd en provenance des Pays-Bas, qui avait pour destination le Royaume-Uni. La \nvaleur de la marchandise est estimée à plus de 2,6 millions d’euros sur le marché illicite de \nla revente de stupéfiants. \nCette saisie a été rendue possible par une analyse radioscopique du camion, qui a fait \napparaître des masses sombres suspectes au milieu du chargement. Il s’agissait de 120 \npaquets d’héroïne dissimulés dans un chauffe-eau, et recouverts par des couches isolantes \nen laine de verre. \, ministre de l’Economie et des Finances, , ministre du \nCommerce extérieur et , ministre délégué chargé du Budget félicitent \nles agents des douanes pour leur vigilance dans la lutte contre les trafics internationaux, et \npour les résultats qu’elle produit : en 2012, la douane française a saisi près de 260 kg \nd’héroïne. \n \n \nContact presse : \nService presse de la DGGDI : , directeur de la communication : 01 57 53 42 \n11 \n \n", "published_date": "2013-09-03", "section": "Communiques"}[ { "signature": "async UploadFilesFromS3({\n \"libraryId\": \"ilib3762veoskHQtAwoUZZRX26V2qKc2\",\n \"objectId\": \"iq__3MXtFtRdbFtPwKHmw46mgmd8GQjs\",\n \"writeToken\": \"n \"fileInfo\": [\n {\n \"path\": \"s3-copy\",\n \"source\": \"ENTIRE_CREED_2min_.mp4\"\n }\n ],\n \"region\": \"us-west-1\",\n \"bucket\": \"eluvio-mez-test\",\n \"accessKey\": \"\",\n \"secret\": \"\",\n \"copy\": true,\n \"callback\": \"\"\n});" }, { "signature": "async UploadFilesFromS3({\n \"libraryId\": \"ilib3762veoskHQtAwoUZZRX26V2qKc2\",\n \"objectId\": \"iq__3MXtFtRdbFtPwKHmw46mgmd8GQjs\",\n \"writeToken\": \"n \"encryption\": \"cgck\",\n \"fileInfo\": [\n {\n \"path\": \"s3-copy-encrypted\",\n \"source\": \"ENTIRE_CREED_2min_.mp4\"\n }\n ],\n \"region\": \"us-west-1\",\n \"bucket\": \"eluvio-mez-test\",\n \"accessKey\": \"\",\n \"secret\": \"\",\n \"copy\": true,\n \"callback\": \"\"\n});" }, { "signature": "async UploadFilesFromS3({\n \"libraryId\": \"ilib3762veoskHQtAwoUZZRX26V2qKc2\",\n \"objectId\": \"iq__3MXtFtRdbFtPwKHmw46mgmd8GQjs\",\n \"writeToken\": \"n \"fileInfo\": [\n {\n \"path\": \"s3-reference\",\n \"source\": \"ENTIRE_CREED_2min_.mp4\"\n }\n ],\n \"region\": \"us-west-1\",\n \"bucket\": \"eluvio-mez-test\",\n \"accessKey\": \"\",\n \"secret\": \"\",\n \"copy\": false,\n \"callback\": \"\"\n});" } ]CSBP-CPSE/lode-viewer-dev Index,Facility_Name,Facility_Type,Street_No,Street_Name,City,Prov_Terr,Postal_Code 9bb34fe71b4b394de009,Monseigneur-Belzile,Public,197,avenue ulric-tessier saint-ulric,Saint-Ulric,QC,G0J3H0 {"section": "communique-de-presse", "date": "29.01.2002", "content": "- -- -8ANQ1lflitANCE = 29 janvier 2002 COMMUNIQUE \u00b7 PASSAGE A L'EURO .FIDUCIAIRE : CIRCULATION DES BILLETS AU 28 JANVIER 2002 Au 28 janvier 2002, l'encours des billets en euros atteignait 22,6 milliards d'euros . Outre les 9,4 milliards d'euros mis a la disposition des agents economiques par la Banque de France au titre de la pre-alimentation, 13,.2 milliards d'euros ont ete preleves a la Banque de France depuis le 1er janvier par les banques et La Poste. Par ailleurs, le montant des pieces en euro en circulation depassait 2 milliards d'euros. Pour sa part, l'encours des billets en circulation en francs etait revenu de 31,5 milliards d'euros au 31 decembre 2001 a 15,4 milliards d'euros au 28 janvier 2002. Le retour .des billets en francs s'effectue progressivement, de mariiere conforme au scenario d'introduction des pieces et des billets en euros. Direction de la Communication Service de Presse Tel. 01 42 92 39 00 ", "author": null}{ "contacts": [], "guideStarURL": "http://www.guidestarindia.org/Summary.aspx?CCReg=7192", "name": " And Development Society", "primaryEmail": "", "organisationType": [ "Direct Service" ], "telephone": [ "919331536304" ], "mainAddrress": { "state": "West Bengal", "address": [ "Kurumbagazipura", "Narendrapur", "South 24 Parganas", "West Bengal", "700103" ] } }Universal-Omega/Preloader { "@metadata": { "authors": [ "Joetaras" ] }, "preloader-desc": "Avène ausete pe de le pre-namespace ca sonde personalizzabbele pe le pàggene nueve" } {"ast":null,"code":"var _jsxFileName = \"/home/marcio/\\xC1rea de Trabalho/Projeto-CRUD/crud/frontend/src/components/template/Footer.jsx\";\nimport './Footer.css';\nimport React from 'react';\nimport { jsxDEV as _jsxDEV } from \"react/jsx-dev-runtime\";\nexport default (props => /*#__PURE__*/_jsxDEV(\"footer\", {\n className: \"footer\",\n children: /*#__PURE__*/_jsxDEV(\"span\", {\n children: [\"Desenvolvido com \", /*#__PURE__*/_jsxDEV(\"i\", {\n className: \"fa fa-heart text-danger\"\n }, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 7,\n columnNumber: 26\n }, this), \" por\", /*#__PURE__*/_jsxDEV(\"strong\", {\n children: [\" Gian no curso de web dev da Cod\", /*#__PURE__*/_jsxDEV(\"span\", {\n className: \"text-danger\",\n children: \"3\"\n }, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 8,\n columnNumber: 49\n }, this), \"r\"]\n }, void 0, true, {\n fileName: _jsxFileName,\n lineNumber: 8,\n columnNumber: 9\n }, this)]\n }, void 0, true, {\n fileName: _jsxFileName,\n lineNumber: 6,\n columnNumber: 5\n }, this)\n}, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 5,\n columnNumber: 1\n}, this));","map":{"version":3,"sources":["/home/marcio/Área de Trabalho/Projeto-CRUD/crud/frontend/src/components/template/Footer.jsx"],"names":["React","props"],"mappings":";AAAA,OAAO,cAAP;AACA,OAAOA,KAAP,MAAkB,OAAlB;;AAEA,gBAAeC,KAAK,iBACpB;AAAQ,EAAA,SAAS,EAAC,QAAlB;AAAA,yBACI;AAAA,iDACqB;AAAG,MAAA,SAAS,EAAC;AAAb;AAAA;AAAA;AAAA;AAAA,YADrB,uBAEI;AAAA,kEAAwC;AAAM,QAAA,SAAS,EAAC,aAAhB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,cAAxC;AAAA;AAAA;AAAA;AAAA;AAAA,YAFJ;AAAA;AAAA;AAAA;AAAA;AAAA;AADJ;AAAA;AAAA;AAAA;AAAA,QADA","sourcesContent":["import './Footer.css'\nimport React from 'react'\n\nexport default props =>\n
\n \n Desenvolvido com por\n Gian no curso de web dev da Cod3r\n \n
"]},"metadata":{},"sourceType":"module"}{ "id": "norse-finance", "symbol": "nfi", "name": "", "platforms": { "binance-smart-chain": "0x43f001914c7d347d152f296e8539086fe49f8bd6" }, "hashing_algorithm": null, "categories": [], "description": { "en": "Norse Finance is a DeFi project built on the BSC Blockchain system that provides a secured platform which allows users to farm tokens by staking their $NFI" }, "country_origin": "NL", "genesis_date": null, "contract_address": "0x43f001914c7d347d152f296e8539086fe49f8bd6", "url": "https://norse.finance/", "explorers": [ "https://bscscan.com/token/0x43f001914c7d347d152f296e8539086fe49f8bd6" ], "twitter": "NorseFinance", "telegram": "NorseFinance" }{ "name": "comercioimovel", "version": "1.0.0", "private": true, "homepage": "https://comerciodeimovel.com.br", "main": "dist/index.js", "dependencies": { "@material-ui/core": "3.9.2", "@material-ui/icons": "3.0.2", "classnames": "2.2.6", "moment": "2.24.0", "node-sass-chokidar": "1.3.4", "nouislider": "12.1.0", "npm-run-all": "4.1.5", "prop-types": "15.6.2", "react": "16.7.0", "react-datetime": "2.16.3", "react-device-detect": "^1.6.2", "react-dom": "16.7.0", "react-router-dom": "4.3.1", "react-scripts": "2.1.3", "react-slick": "0.23.2", "react-swipeable-views": "0.13.1", "svg-react-loader": "^0.4.6" }, "devDependencies": { "babel-cli": "6.26.0", "babel-plugin-import-rename": "1.0.1", "babel-plugin-module-resolver": "^3.2.0", "babel-plugin-transform-object-rest-spread": "6.26.0", "babel-plugin-transform-react-jsx": "6.24.1", "babel-preset-es2015": "6.24.1" }, "scripts": { "build-css": "node-sass-chokidar --include-path ./src --include-path ./node_modules src/ -o src/", "watch-css": "npm run build-css && node-sass-chokidar --include-path ./src --include-path ./node_modules src/ -o src/ --watch --recursive", "start-js": "react-scripts start", "start": "npm-run-all -p watch-css start-js", "build": "npm run build-css && react-scripts build", "test": "react-scripts test --env=jsdom", "eject": "react-scripts eject", "install:clean": "rm -rf node_modules/ && rm -rf package-lock.json && npm install && npm start", "build-package-css": "node-sass-chokidar src/assets/scss/material-kit-react.scss dist/material-kit-react.css", "build-package": "npm run build-package-css && babel src --out-dir dist" }, "browserslist": [ ">0.2%", "not dead", "not ie <= 11", "not op_mini all" ] } { "url": "https://static.wikia.nocookie.net/griftlands_gamepedia_en/images/4/4a/Boosted_Entire_Supply.png/revision/latest?cb=20210808154715", "sha1": "ea68ce4d06b7bdbc5c72b97e6078601872b02435" }schools/brandenburg/BB-103512.json {"website":"my-hvp.de","school_type":"Grundschule","fax":"033201 20415","name":"Grundschule \"\" ","address":"Hechtsprung 14 14476 Potsdam/OT Groß Glienicke","id":"BB-103512","director":"","phone":"033201 20414","provider":"Kreisfreie Stadt","email":"info|at|ggg-pestalozza.de\n","state":"BB","full_time_school":false,"programs":{"programs":[]},"lon":13.104282,"lat":52.466012} 1-10 {"947623632680":{"type":"book open","bookName":"Epic%20Quest","subjectName":"school","fName":"users/"},"947623632967":{"type":"st"},"947623632968":{"type":"ss"},"947623674946":{"type":"sf"}}0 {"body": "Thanks for your answer, Greg.\n\n\nThere are no mirror or prism surfaces in this scene. Just glass and\nplastic. So this is weird, then.\n\n\nI did notice this behaviour on previous projects where we had\nthousands of light sources, but didn't look into it back then.\n\n\nCheers\n\n\nAxel\n\n\n\n\nOn 20 February 2017 at 17:18, < at > wrote:\n___\nAutomatically generated content from [radiance mailing-list](https://radiance-online.org/pipermail/radiance-general/2017-February/012040.html).", "attachments": [], "created_by_name": "", "created_at": "February 20, 2017 at 09:28AM", "created_by": "Axel_Jacobs", "parent_id": "radiance-general_012038", "id": "radiance-general_012040"}{ "contract": "0xae754fc7761d6b4b9da6511adb9d45af8c800457", "tool": "oyente", "start": 1563757659.541187, "end": 1563757705.656003, "duration": 46.11481595039368, "analysis": [ { "errors": [], "file": "/unique_chucks/42/0xae754fc7761d6b4b9da6511adb9d45af8c800457.sol", "name": "FoundersVesting", "evm_code_coverage": "71.5%", "integer_underflow": false, "integer_overflow": false, "parity_multisig_bug_2": false, "callstack_depth_attack_vulnerability": false, "transaction-ordering_dependence_tod": false, "timestamp_dependency": false, "re-entrancy_vulnerability": false }, { "errors": [], "file": "/unique_chucks/42/0xae754fc7761d6b4b9da6511adb9d45af8c800457.sol", "name": "Goldmint", "evm_code_coverage": "81.8%", "integer_underflow": false, "integer_overflow": false, "parity_multisig_bug_2": false, "callstack_depth_attack_vulnerability": false, "transaction-ordering_dependence_tod": false, "timestamp_dependency": false, "re-entrancy_vulnerability": false }, { "errors": [], "file": "/unique_chucks/42/0xae754fc7761d6b4b9da6511adb9d45af8c800457.sol", "name": "GoldmintUnsold", "evm_code_coverage": "95.0%", "integer_underflow": false, "integer_overflow": false, "parity_multisig_bug_2": false, "callstack_depth_attack_vulnerability": false, "transaction-ordering_dependence_tod": false, "timestamp_dependency": false, "re-entrancy_vulnerability": false }, { "errors": [ { "line": 16, "column": 20, "level": "Warning", "message": "Integer Overflow." } ], "file": "/unique_chucks/42/0xae754fc7761d6b4b9da6511adb9d45af8c800457.sol", "name": "MNTP", "evm_code_coverage": "86.1%", "integer_underflow": false, "integer_overflow": true, "parity_multisig_bug_2": false, "callstack_depth_attack_vulnerability": false, "transaction-ordering_dependence_tod": false, "timestamp_dependency": false, "re-entrancy_vulnerability": false }, { "errors": [], "file": "/unique_chucks/42/0xae754fc7761d6b4b9da6511adb9d45af8c800457.sol", "name": "SafeMath", "evm_code_coverage": "100.0%", "integer_underflow": false, "integer_overflow": false, "parity_multisig_bug_2": false, "callstack_depth_attack_vulnerability": false, "transaction-ordering_dependence_tod": false, "timestamp_dependency": false, "re-entrancy_vulnerability": false }, { "errors": [], "file": "/unique_chucks/42/0xae754fc7761d6b4b9da6511adb9d45af8c800457.sol", "name": "StdToken", "evm_code_coverage": "74.2%", "integer_underflow": false, "integer_overflow": false, "parity_multisig_bug_2": false, "callstack_depth_attack_vulnerability": false, "transaction-ordering_dependence_tod": false, "timestamp_dependency": false, "re-entrancy_vulnerability": false } ] }data/tracks/Gene prediction_2/tig00032588_overlapping_hits_sub_region/trackData.json0 {"featureCount":1,"formatVersion":1,"histograms":{"meta":[{"arrayParams":{"chunkSize":10000,"length":1,"urlTemplate":"hist-5000-{Chunk}.json"},"basesPerBin":"5000"}],"stats":[{"basesPerBin":"5000","max":1,"mean":1}]},"intervals":{"classes":[{"attributes":["Start","End","Strand","Id","Name","Seq_id","Source","Subfeatures","Type"],"isArrayAttr":{"Subfeatures":1}},{"attributes":["Start","End","Strand","Coverage","Id","Identity","Indels","Matches","Mismatches","Name","Seq_id","Source","Subfeatures","Type","Unknowns"],"isArrayAttr":{"Subfeatures":1}},{"attributes":["Start","End","Strand","Id","Name","Score","Seq_id","Source","Target","Type"],"isArrayAttr":{}},{"attributes":["Start","End","Strand","Id","Name","Phase","Score","Seq_id","Source","Target","Type"],"isArrayAttr":{}},{"attributes":["Start","End","Chunk"],"isArrayAttr":{"Sublist":1}}],"count":1,"lazyClass":4,"maxEnd":1554,"minStart":1129,"nclist":[[0,1129,1554,1,"tig00032588.g91.t1.path1","tig00032588.g91.t1","tig00032588_overlapping_hits_sub_region","cannabis_loc_scaffold",[[1,1129,1554,1,"100.0","tig00032588.g91.t1.mrna1","100.0","0","276","0","tig00032588.g91.t1","tig00032588_overlapping_hits_sub_region","cannabis_loc_scaffold",[[2,1129,1209,1,"tig00032588.g91.t1.mrna1.exon1","tig00032588.g91.t1",100,"tig00032588_overlapping_hits_sub_region","cannabis_loc_scaffold","tig00032588.g91.t1 1 80 +","exon"],[2,1358,1554,1,"tig00032588.g91.t1.mrna1.exon2","tig00032588.g91.t1",100,"tig00032588_overlapping_hits_sub_region","cannabis_loc_scaffold","tig00032588.g91.t1 81 276 +","exon"],[3,1129,1209,1,"tig00032588.g91.t1.mrna1.cds1","tig00032588.g91.t1",0,100,"tig00032588_overlapping_hits_sub_region","cannabis_loc_scaffold","tig00032588.g91.t1 1 80 +","CDS"],[3,1358,1554,1,"tig00032588.g91.t1.mrna1.cds2","tig00032588.g91.t1",2,100,"tig00032588_overlapping_hits_sub_region","cannabis_loc_scaffold","tig00032588.g91.t1 81 276 +","CDS"]],"mRNA","0"]],"gene"]],"urlTemplate":"lf-{Chunk}.json"}}{ "$id": "https://external-adapters.chainlinklabs.com/schemas/dns-record-check-adapter.json", "title": "Chainlink External Adapter to Check DNS Records", "description": "DNS Record Check lets query DNS over HTTPS (DoH) and check whether some record provided exists", "required": [], "type": "object", "properties": {}, "allOf": [ { "$ref": "https://external-adapters.chainlinklabs.com/schemas/dns-query-adapter.json" } ] } { "Title": "American Beauty", "Year": "1999", "Rated": "R", "Released": "01 Oct 1999", "Runtime": "122 min", "Genre": "Drama, Romance", "Director": "", "Writer": "", "Actors": ", , , ", "Plot": "A sexually frustrated suburban father has a mid-life crisis after becoming infatuated with his daughter's best friend.", "Language": "English", "Country": "USA", "Awards": "Won 5 Oscars. Another 103 wins & 96 nominations.", "Poster": "http://ia.media-imdb.com/images/M/MV5BMjM4NTI5NzYyNV5BMl5BanBnXkFtZTgwNTkxNTYxMTE@._V1_SX300.jpg", "Metascore": "86", "imdbRating": "8.4", "imdbVotes": "773,411", "imdbID": "tt0169547", "Type": "movie", "Response": "True" }10/Bericht, Gutachten, Programm/10-189594.json { "vorgangId": "189594", "VORGANG": { "WAHLPERIODE": "10", "VORGANGSTYP": "Bericht, Gutachten, Programm", "TITEL": "Bericht der Bundesregierung über die künftige Gestaltung der Gemeinschaftsaufgabe \"Verbesserung der Agrarstruktur und des Küstenschutzes\" hier: Rahmenplan 1985 bis 1988 (G-SIG: 10001609)", "INITIATIVE": "Bundesregierung", "AKTUELLER_STAND": "Abgeschlossen - Ergebnis siehe Vorgangsablauf", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "WICHTIGE_DRUCKSACHE": { "DRS_HERAUSGEBER": "BT", "DRS_NUMMER": "10/1832", "DRS_TYP": "Unterrichtung", "DRS_LINK": "http://dipbt.bundestag.de:80/dip21/btd/10/018/1001832.pdf" }, "PLENUM": { "PLPR_KLARTEXT": "Mitteilung", "PLPR_HERAUSGEBER": "BT", "PLPR_NUMMER": "10/99", "PLPR_SEITEN": "7219A", "PLPR_LINK": "http://dipbt.bundestag.de:80/dip21/btp/10/10099.pdf#P.7219" }, "EU_DOK_NR": "", "SACHGEBIET": "Landwirtschaft und Ernährung", "SCHLAGWORT": [ "Agrarkredit", "Agrarstruktur", "Ausgleichszulage", "Bericht der Bundesregierung", "Dorfsanierung", "Einzelbetriebliches Förderungsprogramm für Landwirtschaftsbetriebe", "Forstschaden", { "_fundstelle": "true", "__cdata": "Gemeinschaftsaufgabe Verbesserung der Agrarstruktur und des Küstenschutzes" }, "Holzwirtschaft", "Landwirtschaft", "Milch", "Wald" ], "ABSTRAKT": "Änderungen des Rahmenplanes im Jahre 1984, Einführung eines Agrarkreditprogramms, Abschaffung der Förderschwelle, Einschränkung der Milchviehhaltung, Förderung der Dorferneuerung, Maßnahmen aufgrund neuartiger Waldschäden, Weiterentwicklung der Agrarstrukturpolitik ab 1985, Verbesserung der Ausgleichszulage für benachteiligte Gebiete, Investitionsförderung in der Holzwirtschaft, Änderung des einzelbetrieblichen Förderungsprogramms, Finanzierung des Rahmenplans 1985, Auswirkung der EG-Agrarstrukturpolitik auf die Gemeinschaftsaufgabe " }, "VORGANGSABLAUF": { "VORGANGSPOSITION": [ { "ZUORDNUNG": "BT", "URHEBER": "Unterrichtung, Urheber : Bundesregierung ", "FUNDSTELLE": "03.08.1984 - BT-Drucksache 10/1832", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/10/018/1001832.pdf" }, { "ZUORDNUNG": "BT", "URHEBER": "Überweisung", "FUNDSTELLE": "12.09.1984 - BT-Plenarprotokoll 10/81, S. 5949D", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btp/10/10081.pdf#P.5949", "BESCHLUSS": { "BESCHLUSSSEITE": "5949D", "BESCHLUSSTENOR": "Überweisung" }, "ZUWEISUNG": [ { "AUSSCHUSS_KLARTEXT": "Ausschuss für Ernährung, Landwirtschaft und Forsten", "FEDERFUEHRUNG": "federführend" }, { "AUSSCHUSS_KLARTEXT": "Haushaltsausschuss" }, { "AUSSCHUSS_KLARTEXT": "Ausschuss für Raumordnung, Bauwesen und Städtebau" } ] }, { "ZUORDNUNG": "BT", "URHEBER": "Mitteilung", "FUNDSTELLE": "09.11.1984 - BT-Plenarprotokoll 10/99, S. 7219A", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btp/10/10099.pdf#P.7219", "VP_ABSTRAKT": "Mittlg: S.7219A - Mitteilung über Absehen von Berichterstattung" } ] } } { "name": "brickhack-4", "version": "0.0.0", "description": "the grand vision", "main": "src/entry.js", "scripts": { "start": "parcel index.html", "build": "parcel build index.html", "deploy": "npm run build && node bin/deploy.js" }, "repository": { "type": "git", "url": "git+https://github.com/LucidoM/BrickHack4.git" }, "author": ", ", "license": "MIT", "bugs": { "url": "https://github.com/LucidoM/BrickHack4/issues" }, "homepage": "https://github.com/LucidoM/BrickHack4#readme", "devDependencies": { "babel-plugin-transform-react-jsx": "^6.24.1", "babel-preset-env": "^1.6.1", "parcel-bundler": "^1.5.1", "shelljs": "^0.8.1" }, "dependencies": { "@hyperapp/router": "^0.4.1", "hyperapp": "^1.0.2" } } 1-10 { "name": "iansltx/flightaware-php-client", "description": "A curl-based API client for FlightAware's FlightXML v2 API", "homepage": "https://github.com/iansltx/flightaware-php-client", "license": "MIT", "require": { "php": ">=5.5.0", "ext-curl": "*" }, "autoload": { "classmap": ["FlightAwareClient.php"] } }["apigee-analytics-collector","apigee-sdk-mgmt-api","dreamface","grunt-apigee-import-api-bundle","grunt-apigee-kvm"]adobe/parliament-transformer-navigation { "name": "@adobe/parliament-transformer-navigation", "version": "1.4.0", "description": "", "main": "index.js", "scripts": { "build": "babel src --out-dir . --ignore **/__tests__", "test": "jest", "watch": "babel -w src --out-dir . --ignore **/__tests__" }, "repository": { "type": "git", "url": "git+https://github.com/adobe/parliament-transformer-navigation.git" }, "keywords": [ "gatsby", "parliament" ], "author": "Adobe", "license": "Apache-2.0", "bugs": { "url": "https://github.com/adobe/parliament-transformer-navigation/issues" }, "homepage": "https://github.com/adobe/parliament-transformer-navigation#readme", "devDependencies": { "jest": "^26.1.0", "gatsby": "^2.15.19" }, "dependencies": { "yaml": "^1.10.0" } } elanthia-online/cartograph1-10 { "id": 25409, "title": [ "[Amalfya's Weaponry]" ], "description": [ "Dark oak walls rise from a polished oak floor. Just above a suit of silver-chased armor hangs a ruby-pommeled longsword, and a varnished oak ceiling completes the picture. A dovetailed oak table and a narrow glass-topped table stand side by side in the middle of the room." ], "paths": [ "Obvious exits: east" ], "location": "Icemule Trace", "wayto": { "25404": "east" }, "timeto": { "25404": 0.2 } }{"nom":"","circ":"2ème circonscription","dpt":"Loir-et-Cher","inscrits":429,"abs":212,"votants":217,"blancs":14,"nuls":1,"exp":202,"res":[{"nuance":"LR","nom":"","voix":135},{"nuance":"REM","nom":"","voix":67}]}{ "id": 71036534, "name": "electron-with-express-master", "fullName": "zarabon/electron-with-express-master", "owner": { "login": "zarabon", "id": 18052706, "avatarUrl": "https://avatars.githubusercontent.com/u/18052706?v=3", "gravatarId": "", "url": "https://api.github.com/users/zarabon", "htmlUrl": "https://github.com/zarabon", "followersUrl": "https://api.github.com/users/zarabon/followers", "subscriptionsUrl": "https://api.github.com/users/zarabon/subscriptions", "organizationsUrl": "https://api.github.com/users/zarabon/orgs", "reposUrl": "https://api.github.com/users/zarabon/repos", "receivedEventsUrl": "https://api.github.com/users/zarabon/received_events", "type": "User" }, "private": false, "htmlUrl": "https://github.com/zarabon/electron-with-express-master", "description": null, "fork": false, "url": "https://api.github.com/repos/zarabon/electron-with-express-master", "forksUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/forks", "teamsUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/teams", "hooksUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/hooks", "eventsUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/events", "tagsUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/tags", "languagesUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/languages", "stargazersUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/stargazers", "contributorsUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/contributors", "subscribersUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/subscribers", "subscriptionUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/subscription", "mergesUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/merges", "downloadsUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/downloads", "deploymentsUrl": "https://api.github.com/repos/zarabon/electron-with-express-master/deployments", "createdAt": "2016-10-16T07:34:50.000Z", "updatedAt": "2016-10-16T07:39:39.000Z", "pushedAt": "2016-10-16T07:41:32.000Z", "gitUrl": "git://github.com/zarabon/electron-with-express-master.git", "sshUrl": "git@github.com:zarabon/electron-with-express-master.git", "cloneUrl": "https://github.com/zarabon/electron-with-express-master.git", "svnUrl": "https://github.com/zarabon/electron-with-express-master", "homepage": null, "size": 29870, "stargazersCount": 0, "watchersCount": 0, "language": "JavaScript", "hasIssues": true, "hasDownloads": true, "hasWiki": true, "hasPages": false, "forksCount": 0, "mirrorUrl": null, "openIssuesCount": 0, "openIssues": 0, "watchers": 0, "defaultBranch": "master", "permissions": { "admin": false, "push": false, "pull": true }, "license": { "key": "gpl-3.0", "name": "GNU General Public License v3.0", "spdxId": "GPL-3.0", "url": "https://api.github.com/licenses/gpl-3.0", "featured": true }, "networkCount": 0, "subscribersCount": 1, "status": 404, "packageJSON": { "name": "electron-with-express", "description": "Demonstrates spawning Express from Electron and using it to serve content", "version": "0.5.0", "main": "main.js", "authors": [ " <>" ], "license": "GPL-3.0", "private": true, "scripts": { "start": ".\\node.exe start-electron.js" }, "dependencies": { "electron": "^1.4.1", "electron-debug": "^1.0.1", "jquery": "^3.1.1", "keymaster": "^1.6.2", "lodash": "^4.16.2", "nodejs": "0.0.0", "request": "^2.75.0", "strip": "^3.0.0" }, "devDependencies": { "devtron": "^1.4.0" } }, "packageStatus": 200, "contributors": [ "zarabon" ], "firstCommit": { "sha": "0634cb63a58b6cad3f6d42f1bdfbe74d25ff1dcd", "commit": { "author": { "name": "Павел", "email": "", "date": "2016-10-16T07:38:13Z" }, "committer": { "name": "Павел", "email": "", "date": "2016-10-16T07:38:13Z" }, "message": "My first desktop app on node(using Electron+Express+Angular2)", "tree": { "sha": "d50e77339c14353d619caa815682dcbdefd81071", "url": "https://api.github.com/repos/zarabon/electron-with-express-master/git/trees/d50e77339c14353d619caa815682dcbdefd81071" }, "url": "https://api.github.com/repos/zarabon/electron-with-express-master/git/commits/0634cb63a58b6cad3f6d42f1bdfbe74d25ff1dcd", "commentCount": 0 } }, "filename": "zarabon___electron-with-express-master.json", "releases": [], "lastFetchedAt": "2017-05-04T15:13:57.603Z" }{ "name": "", "achievementgroup": "Wonders of the World", "ishidden": true, "sortorder": 1097, "stages": 1, "stage1": { "title": "Yamada Go's Wooden Mallet", "ps5title": "Inazuma: Yamada Go's Wooden Mallet", "description": "See through the illusions of the Tanuki several times.", "progress": 15, "reward": { "name": "Primogem", "count": 5 } } }{ "deno.enable": true, "deno.lint": true, "deno.unstable": true, "files.associations": { ".env.*": "dotenv" }, "[javascript]": { "editor.defaultFormatter": "denoland.vscode-deno" }, "[typescript]": { "editor.defaultFormatter": "denoland.vscode-deno" }, "files.eol": "\n" } {"configurations": [{ "name": "AVR", "includePath": ["${workspaceFolder}/**", "/opt/avr8-gnu-toolchain-linux_x86_64/avr/include/**", "C:/APPZ/Atmel/Studio/7.0/toolchain/avr8/avr8-gnu-toolchain/avr/include/**"], "defines": ["__AVR_ATmega328P__", "F_CPU"], "compilerPath": "C:/APPZ/Atmel/Studio/7.0/toolchain/avr8/avr8-gnu-toolchain/bin/avr-gcc.exe", "cStandard": "c99", "cppStandard": "c++11", "intelliSenseMode": "gcc-x64"}], "version": 4} jwzimmer/tv-tropes {"ClosetShuffle": ["HilarityEnsues", "CurtainCamouflage", "TheStateroomSketch", "ExplodingCloset", "ClassTrip", "OriginalCharacter", "HilarityEnsues", "UnwantedHarem", "AnIcePerson", "ChickMagnet", "UnwantedHarem", "OnlySaneMan", "PlayedForLaughs", "HarmlessFreezing", "GoodShepherd", "HilarityEnsues", "UnusuallyUninterestingSight", "PlayedWith", "DreadfulMusician", "RussianHumour", "UpToEleven", "PottyFailure", "BernieRhodenbarr", "GentlemanThief", "ExactlyWhatItSaysOnTheTin", "SadistTeacher", "Gaslighting", "SarcasticConfession", "SecretRelationship", "PlayedForDrama", "HowWeGotHere", "SplitScreenPhoneCall", "PlayedForDrama", "AxesAtSchool", "VictimOfTheWeek", "WallOfWeapons", "CrazyPrepared", "Gaslighting", "TheMatchmaker", "HilarityEnsues", "OverprotectiveDad", "ClockTower", "StealthHiBye", "LampshadeHanging", "UrbanLegendLoveLife"]}{"url": "https://www.michaeljacksonmoon.com/michael-jackson-favourite-movie/", "text": "Michael\u2019s Favorite movie ever was the movie E.T. , he wrote this himself in his book Moonwalk. He liked the movie E.T. so much he decided to work together with on the E.T. children story book.\n\nMichael related to odd looking creature E.T. , who came from space. E.T. was weird looking but the strange creature from space would melt your heart when he looked to the camera with his big eyes ad spoke the word\u2019s \u201cET phone home\u201d . said \u201d If ET would exist he would come to Michael \u201c. Michael was very happy when he \u201cmet \u201d the little alien and there\u2019s where the famous shot with his favourite star came from\u2026", "images": ["https://secure.gravatar.com/avatar/3f8ae9729ab3c686779abb2da779fa65?s=48&d=%3Cpath_to_url%3E&r=G", "http://www.michaeljacksonmoon.com/images/et.jpg", "https://secure.gravatar.com/avatar/330ceafec68a7d3f02a8c11029c45088?s=48&d=%3Cpath_to_url%3E&r=G", "https://secure.gravatar.com/avatar/c5cbb821cb426d3565d49d11bea73594?s=48&d=%3Cpath_to_url%3E&r=G", "https://secure.gravatar.com/avatar/e5d0e851ce956b99197ecd7ce78f4c2d?s=48&d=%3Cpath_to_url%3E&r=G", "http://www.michaeljacksonmoon.com/images/Michael-Jackson-ET-The-Extra-Terr-401575.jpg", "https://secure.gravatar.com/avatar/af6ffd945d43da4fcc76b48be515028a?s=48&d=%3Cpath_to_url%3E&r=G", "https://secure.gravatar.com/avatar/ba3dc912f3168e132fc9ec0cf5d43cfa?s=48&d=%3Cpath_to_url%3E&r=G", "http://www.michaeljacksonmoon.com/images/michael_jackson_et.jpg", "https://secure.gravatar.com/avatar/e92d32c9a12518a6d83a175bb49bc27b?s=48&d=%3Cpath_to_url%3E&r=G", "https://secure.gravatar.com/avatar/6d420fa9ff3fda3dbc2eb3053dd187e5?s=48&d=%3Cpath_to_url%3E&r=G", "https://www.michaeljacksonmoon.com/wp-includes/images/smilies/icon_smile.gif", "https://secure.gravatar.com/avatar/f3300f907a7339ce6b9e0c8639eb336d?s=48&d=%3Cpath_to_url%3E&r=G", "https://secure.gravatar.com/avatar/6ecbcd4aa65b74e6d993da008884db2b?s=48&d=%3Cpath_to_url%3E&r=G"], "top_img": "", "keywords": [], "authors": [], "canonical_link": "https://www.michaeljacksonmoon.com/michael-jackson-favourite-movie/", "title": " favourite movie? \u00ab ", "meta_data": {"generator": "WordPress 3.5.1"}, "movies": [], "publish_date": null, "source": "https://www.michaeljacksonmoon.com", "summary": ""}{"ApplicationID": "EDU00004", "Role": "SchoolAdmin", "PossFields": ["Status", "Class", "Section", "Term", "Day", "TimeSlot", "Period", "PeriodType", "Subject"]}10-100 {"vendor":"liberica","filename":"bellsoft-jdk15.0.2+10-macos-aarch64-lite.pkg","release_type":"ga","version":"15.0.2+10","java_version":"15.0.2+10","jvm_impl":"hotspot","os":"macosx","architecture":"aarch64","file_type":"pkg","image_type":"jdk","features":["lite"],"url":"https://github.com/bell-sw/Liberica/releases/download/15.0.2+10/bellsoft-jdk15.0.2+10-macos-aarch64-lite.pkg","md5":"eddb17f27df2e4ba4620975df853a285","md5_file":"bellsoft-jdk15.0.2+10-macos-aarch64-lite.pkg.md5","sha1":"196e5c4fc757c97a285e94d241113882961484d8","sha1_file":"bellsoft-jdk15.0.2+10-macos-aarch64-lite.pkg.sha1","sha256":"ba78f45ed90b9178c3692b88626af416a492bb9af377b8275117f846d9a1bf14","sha256_file":"bellsoft-jdk15.0.2+10-macos-aarch64-lite.pkg.sha256","sha512":"f25d56ec4359876d9d21bfcb4eea16de29d8e3c0d882fd99aa8e3f5e47b5e09c8981f5dfb5a3560c2f7ae9e7fffd27bad9a48fccbab781ebddc80c2fcda5cb2f","sha512_file":"bellsoft-jdk15.0.2+10-macos-aarch64-lite.pkg.sha512","size":71804012} { "name": "es-ddd-components", "version": "0.1.3", "description": "A set of components to build event sourced business domain layers", "main": "dist/main/lib/index.js", "typings": "dist/main/lib/index.d.ts", "module": "dist/module/lib/index.js", "repository": "https://github.com/gtriggiano/node-es-ddd-components", "license": "MIT", "keywords": [ "DDD", "Event Sourcing", "Domain Model Tactical Design" ], "scripts": { "info": "npm-scripts-info", "build": "run-s clean && run-p build:*", "build:main": "tsc -p tsconfig.json", "build:module": "tsc -p tsconfig.module.json", "clean": "trash coverage dist pages", "fix": "run-s fix:*", "fix:prettier": "prettier \"src/**/*.ts\" --write", "fix:tslint": "tslint --fix --project .", "test": "run-s build test:*", "test:lint": "tslint --project . && prettier \"src/**/*.ts\" --list-different", "test:unit": "jest", "dev": "run-s clean build:main test:unit && run-p \"build:main -- -w\" \"test:unit -- --watch\"", "watch": "run-s clean \"build:main -- -w\"", "cov": "npm test && opn pages/coverage/lcov-report/index.html", "check:coverage": "istanbul check-coverage --statements 100 --functions 100 --branches -2 --lines 100 pages/coverage/coverage-final.json", "send:coverage": "codecov -f pages/coverage/coverage-final.json -t $CODECOV_TOKEN", "typedoc": "run-s typedoc:html && opn pages/typedoc/index.html", "typedoc:html": "typedoc src/lib --target ES6 --mode file --out pages/typedoc", "typedoc:json": "typedoc src/lib --target ES6 --mode file --json pages/typedoc/typedoc.json", "gh-pages:publish": "gh-pages -m \"[ci skip] Updates\" -d pages", "version": "standard-version", "reset": "git clean -dfx && git reset --hard && rm -rf node_modules && npm i", "all": "run-s reset test typedoc:*", "commit": "git-cz", "prepare-release": "run-s all version" }, "scripts-info": { "info": "Display information about the package scripts", "build": "Clean and rebuild the project", "fix": "Try to automatically fix any linting problems", "test": "Clean and rebuild the project. Then lint and unit test the project", "dev": "Watch and rebuild the project on save, then rerun tests", "watch": "Watch and rebuild the project on save", "cov": "Rebuild, run tests, then create and open the coverage report", "typedoc": "Generate TypeDoc documentation and open it in a browser", "typedoc:html": "Generate TypeDoc documentation", "typedoc:json": "Generate documentation in typedoc JSON format", "version": "Bump package.json version, update CHANGELOG.md, tag release", "reset": "Delete all untracked files and reset the repo to the last commit", "commit": "Make a new commit through commitizen", "prepare-release": "One-step: clean, build, test, generate TypeDoc, and prepare a release" }, "dependencies": { "lodash": "^4.17.11" }, "devDependencies": { "@babel/core": "^7.2.0", "@babel/preset-env": "^7.2.0", "@types/jest": "^23.3.10", "babel-core": "7.0.0-bridge.0", "babel-jest": "^23.6.0", "codecov": "^3.1.0", "commitizen": "^3.0.4", "cz-conventional-changelog": "^2.1.0", "gh-pages": "^2.0.0", "istanbul": "^0.4.5", "jest": "^23.6.0", "jest-sourcemaps": "^1.0.1", "jest-stare": "^1.8.0", "npm-run-all": "^4.1.3", "npm-scripts-info": "^0.3.9", "opn-cli": "^3.1.0", "prettier": "^1.14.3", "standard-version": "^4.4.0", "trash-cli": "^1.4.0", "tslint": "^5.11.0", "tslint-config-prettier": "^1.15.0", "tslint-config-standard": "^8.0.1", "tslint-immutable": "^4.7.0", "typedoc": "^0.13.0", "typescript": "^3.2.2", "watch": "^1.0.2" }, "config": { "commitizen": { "path": "cz-conventional-changelog" } }, "prettier": { "printWidth": 80, "semi": false, "singleQuote": true, "trailingComma": "es5", "tabWidth": 2, "useTabs": false }, "jest-stare": { "resultDir": "pages", "coverageLink": "./coverage/lcov-report/index.html" } } library.json1-10 { "name": "OptoDebounce", "keywords": ", , input, converter, signal", "description": "debounce 50/100hz opto couplers.", "include": "OptoDebounce", "repository": { "type": "git", "url": "https://github.com/dirkx/OptoDebounce.git" }, "dependencies": { "frameworks": "arduino" }, "frameworks": "arduino", "platforms": "*" } {"player_id": 760, "name": "", "position": "RB", "height": "6-0", "weight": "216", "current_team": null, "birth_date": "1956-06-22", "birth_place": "Spotsylvania, VA", "death_date": null, "college": "Maryland", "high_school": "Spotsylvania, VA", "draft_team": "Green Bay Packers", "draft_round": "2", "draft_position": "44", "draft_year": "1979", "current_salary": null, "hof_induction_year": null}(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp��� 8 ` � � � ( P x � � �  @ h � � �  0 X � � � � H p � � � 8`���(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp���8`���(Px���@h���0X���� Hp���8`���(Px���@h��� 0 X � � � � !H!p!�!�!�!"8"`"�"�"�"#(#P#x#�#�#�#$@$h$�$�$�$%0%X%�%�%�%�% &H&p&�&�&�&'8'`'�'�'�'(((P(x(�(�(�()@)h)�)�)�)*0*X*�*�*�*�* +H+p+�+�+�+,8,`,�,�,�,-(-P-x-�-�-�-.@.h.�.�.�./0/X/�/�/�/�/ 0H0p0�0�0�0181`1�1�1�12(2P2x2�2�2�23@3h3�3�3�3404X4�4�4�4�4 5H5p5�5�5�5686`6�6�6�67(7P7x7�7�7�78@8h8�8�8�8909X9�9�9�9�9 :H:p:�:�:�:;8;`;�;�;�;<(<P<x<�<�<�<=@=h=�=�=�=>0>X>�>�>�>�> ?H?p?�?�?�?@8@`@�@�@�@A(APAxA�A�A�AB@BhB�B�B�BC0CXC�C�C�C�C DHDpD�D�D�DE8E`E�E�E�EF(FPFxF�F�F�FG@GhG�G�G�GH0HXH�H�H�H�H IHIpI�I�I�IJ8J`J�J�J�JK(KPKxK�K�K�KL@LhL�L�L�LM0MXM�M�M�M�M NHNpN�N�N�NO8O`O�O�O�OP(PPPxP�P�P�PQ@QhQ�Q�Q�QR0RXR�R�R�R�R SHSpS�S�S�ST8T`T�T�T�TU(UPUxU�U�U�UV@VhV�V�V�VW0WXW�W�W�W�W XHXpX�X�X�XY8Y`Y�Y�Y�YZ(ZPZxZ�Z�Z�Z[@[h[�[�[�[\0\X\�\�\�\�\ ]H]p]�]�]�]^8^`^�^�^�^_(_P_x_�_�_�_`@`Q`b`s`�`�`�`�`�`�`�`�` aa.a?aPaaara�a�a�a�a�a�a�a�a bb-b>bOb`bqb�b�b�b�b�b�b�b�b cc,c=cNc_cpc�c�c�c�c�c�c�c�c dd+dsOs`sqs�s�s�s�s�s�s�s�s tt,t=tNt_tpt�t�t�t�t�t�t�t�t uu+u E L R Y ` g n u | � � � � � � � � � � � � � � � � � �     # * 1 8 ? E L S Z a h o v } � � � � � � � � � � � � � � � � � � �  &-4;BHOV]djpw~�������������������#)0363212410582539233604136962466436394141325950442331707205099816625791898324433504145519972418451431574017902109565922643389164081639405161964625562792554597114159117262766964746065011346272300470232205902235086234172070877623992761534241331894718490353162748122427212714783182173442981615371485513271495538407619226931561054013103200027036833909122899225212412541851975733181092505411906428933987687143880970387855612570643815747164028935956928788172601199472688914574042951462182177010897922714447189518832152963659990362834980455010456691882957123376023950492983229782413957313605983875440332308930225524612736340656838364429368151839031152194448473827146146590210740184184366781252475729250748831747524564026228907275221612485756558923406929335037735554112239443546156506162269313938722004043352959175127908248935918913284554526470352831247944366582979013425057942915491044623175190232361662964062111941739053472897446393097186227883174225436237719839829961305634359088462621832826252996458278261846186087542543396596423980368069137657154878394555128176764591732383599148984162048119072843138545390282432620330088723409676473380711103853616665634477171196420444337568232821288157912023202913329861259672210613874095052637173349285823910081734405369865831704685253324363324235559338021921125630105433261971036461893705029130816826224953823009108602628782933774130290328725097132636170418002776216313525001329493991908217035125307902526574392239944312663550021671612184702734833231709556846460765966865685728356150076117729822926455410998210863851715002466087711471663013626135279738439932470123133353276709845786691465003680882883449221702891065333039723331309201057791745955486132841671002569105672667794083156386802630571191919652216608246914531630778446822308987730494561396573349013795125835549620536798796604630688274573844571144805924534645254590225995765038928457323680963987957355290228292891565476318128845217742204932787554314772711492165413731966278255491345112912971685460916616868421205407234907024362484064287392343611341521246513502901322102637893271692517465387251462246466597899933344512357332193685328390481206261428336425776312980991521153161084511328601821212192636539152893724708443099440226061163808438346081277354700797535613730452811912780490254618011685052515425662722902008536766282023416613881863076625050279550393288388115114000693536254171415932208492239970021963356276569590244044437088021077427301975302177604074355620322724544068387837536817512194883664562166524228726231495923186376446534771613214357051743406326080483510354370295731117541867774392263991425380898635569419708681654223427392234010246640331817614309118029179414637892392876609235468229533831816953176175225903732527033211844820870846601404680181449783145036211289152310831030517872268551132819106761728190183704229167284125825913749470422905910351654657781489405388596919671643626829850668802159149498630710032534279305951935371353832441210624217997901290901759599216184396374633324493343323278313614511251081330255502223598584718888105690540230414297985163387646891822264562400791951726223633703557783626391347662624133218677632505313567336315958382818611629781755708441064043518032523830423944024966813865490283787427383602356740190475811561928834393223819440511229693774457053383437627344873591831217951616215164636649306451638411074283350227412147278153074934395553642553823213948407220072377140120912666959100543340681653905119290059027835024496112414388288269840019308826392263025158834814763581075316466143130931673262845401672439082292544354061407188563712565623856037396821516210862031954339444324945596447217232410595204093072334927010397386705682743069463907214824204322082173556265737249086713697114419391950402653724466071077109396278213339893908771210366639981152096021206480817284133297134724463472279190395425843813047724999751672515813103945050459649426436751172983026242��}� L�"h&�)�*M0 2�4d5�7�8WA�D�M7U�V�\�]�^�f�l�����^�Q�$�� �� �9����A���&������&������s�s�i����v���������S������:  p0�!)'�0�1�345�6�9ZF�T�Y�Z�\�]e^�b�p�|ƈC�������͹��X� �_���Y�)������~�����������m�'�� �Y��#)N8h9N:�A�B�HnI_K�YtbLj*n)oWr^y�z��Ώ��,��)��ȨȬY�Q�x�b�������@���|�r�t���r��B�����6����� ��cUL.n1�<�F�[�^ `�amh�i�n�ot�t7w�{�|r����z�)�Ȓ�ˢ$�^��������������������M���M�p �E�$�&1�4�7I�R�T�UEY?\�dvfgk�l�p�u�|j�����U� �˝�� ���\�̮Y�p�����l�$�����(�V�zc � D#�$�,23�3�4,6tA�D�M] f�f5i/j�k\lEx�{�|��݈x�)���H�p�����c�����6�����|���T��� �����>�����g���� � ����k�����l���"�(�)�*?.A�E L�QmV�W�\�f�ujv��h�6��Q��N����� �۶��νڿ5�_�����>�����G�����4�I���f�����k�2k��}��_] �"%&'�'�8CV�ZO[Wkm�tL�+���j��Q���(��|���?�!�����j�E�����Z�V���������C B 4� B "�"&�*+�1 C�JL[�\sgJk�lqm�qfr9u@����G���������������!�g�E��������\���1������ _  �  y h 7 �! �" �$ �. n? kA �V xW �W q[ i �j [l Tn Eq ?r �w �x Sz �~ �� ՘ � � ܢ Ҥ ê �� (� B� Ƽ ?� Q� �� 8� � d� �� �� � �� � @� v� � �� |� �� �� � �  & � p h � � t �. 2/ �> �A �K 'L �Z mh �i l �r bz ^{ | I� Ȅ �� ʎ ^� )� � ˚ �� @� �� c� �� 5� '� �� h� �� �� �� E� /� v� { "type": "minecraft:crafting_shapeless", "ingredients": [ { "item": "integratedtunnels:part_interface_fluid" }, { "item": "integrateddynamics:variable_transformer_output" } ], "result": { "item": "integratedtunnels:part_exporter_fluid" } }datas_github/datas2/repository6847.json {"name": "neo4j-flask", "description": "Flaskr Extended with Neo4j and Py2neo.", "license": null, "starNum": 174, "folkNum": 62, "watchNum": 174, "topic": []}{"name":"","alt_name":"Universitas Trilogi","country":"Indonesia","state":null,"address":{"street":"Jl.Taman Makam Pahlawan No.1","city":"Kalibata","province":"Jakarta Selatan","postal_code":"12760"},"contact":{"telephone":"+62(21) 798-0011","website":"http:\/\/www.stekpi.ac.id","email":"","fax":"+62(21) 798-1352"},"funding":"Private","languages":null,"academic_year":null,"accrediting_agency":"BAN (National Accreditation Agency)"} {"derivation": "from G264 (\u1f01\u03bc\u03b1\u03c1\u03c4\u03af\u03b1);", "kjv_def": "offence, sin(-ful)", "lemma": "\u1f01\u03bc\u03b1\u03c1\u03c4\u03af\u03b1", "frequency": 174, "strongs_def": " a sin (properly abstract)", "outline": "
  1. equivalent to 264
    1. to be without a share in
    2. to miss the mark
    3. to err, be mistaken
    4. to miss or wander from the path of uprightness and honour, to do or go wrong
    5. to wander from the law of God, violate God's law, sin
  2. that which is done wrong, sin, an offence, a violation of the divine law in thought or in act
  3. collectively, the complex or aggregate of sins committed either by a single person or by many
"}{ "name": "rollup-plugin-userscript-metablock", "version": "0.1.2", "description": "Transform json file to userscript metablock and append on", "main": "dist/rollup-plugin-userscript-metablock.common.js", "scripts": { "test": "npm run build && cd test && babel --presets env -o compiled.js test.js && mocha compiled.js", "build": "rollup -c" }, "repository": { "type": "git", "url": "git+https://github.com/FlandreDaisuki/rollup-plugin-userscript-metablock.git" }, "bugs": { "url": "https://github.com/FlandreDaisuki/rollup-plugin-userscript-metablock/issues" }, "homepage": "https://github.com/FlandreDaisuki/rollup-plugin-userscript-metablock#readme", "author": "FlandreDaisuki <>", "license": "MIT", "devDependencies": { "babel-cli": "^6.26.0", "babel-core": "^6.26.3", "babel-preset-env": "^1.7.0", "mocha": "^4.1.0", "rollup": "^0.51.8", "rollup-plugin-babel": "^3.0.7" }, "babel": { "presets": [ [ "env", { "modules": false } ] ] } } sample-data/data-1891.json {"_id":"55ee5e452c199adbd73ef131","index":1891,"guid":"b5e7cf16-f4f6-4ab1-ad0f-62ac69446991","isActive":false,"balance":"$18,152.57","picture":"http://placehold.it/32x32","age":54,"eyeColor":"blue","name":"","gender":"female","company":"IRACK","email":"","phone":"+1 (957) 411-2832","address":"394 Portland Avenue, Washington, Rhode Island, 6308","about":"Do labore ut pariatur qui reprehenderit. Aliquip dolore aute nulla proident et ex anim reprehenderit minim aliquip. Cillum aute cupidatat aute laborum consequat non minim est incididunt laborum ad excepteur. Commodo irure elit nisi aute. Voluptate qui occaecat deserunt eu qui excepteur commodo id voluptate aute et enim aute. Ullamco id eu et laborum labore adipisicing. Anim deserunt quis mollit pariatur reprehenderit reprehenderit laborum exercitation.\r\n","registered":"2015-07-13T02:13:53 -07:00","tags":["id","deserunt","labore","nostrud","eu","officia","irure"],"friends":[{"id":0,"name":""},{"id":1,"name":""},{"id":2,"name":""}],"greeting":"Hello, ! You have 2 unread messages.","favoriteFruit":"strawberry","location":{"latitude":-10.614075,"longitude":153.455085},"docFormat":"json","triples":[{"triple":{"subject":"/sample-data/data-1891.json","predicate":"http://www.w3.org/2000/01/rdf-schema#label","object":"Deirdre Yang"}},{"triple":{"subject":"/sample-data/data-1891.json","predicate":"http://www.w3.org/1999/02/22-rdf-syntax-ns#type","object":"http://xmlns.com/foaf/0.1/Person"}},{"triple":{"subject":"/sample-data/data-1891.json","predicate":"http://xmlns.com/foaf/0.1/knows","object":"/sample-data/data-234.json"}},{"triple":{"subject":"/sample-data/data-1891.json","predicate":"http://xmlns.com/foaf/0.1/knows","object":"/sample-data/data-2937.json"}},{"triple":{"subject":"/sample-data/data-1891.json","predicate":"http://xmlns.com/foaf/0.1/knows","object":"/sample-data/data-1621.json"}}]}0 {"questions": [{"player_1": {"name": "", "player_stat": 37.0}, "player_2": {"name": "", "player_stat": 108.0}, "stat": "wickets", "skill": "BOWL", "question_text": "Who has taken more wickets?", "greater": true}, {"player_1": {"name": "", "player_stat": 3366.0}, "player_2": {"name": "", "player_stat": 430.0}, "stat": "runs_given", "skill": "BOWL", "question_text": "Who has given away less runs?", "greater": false}, {"player_1": {"name": "", "player_stat": 7.86}, "player_2": {"name": "", "player_stat": 6.87}, "stat": "economy", "skill": "BOWL", "question_text": "Who has the better economy rate?", "greater": false}, {"player_1": {"name": "", "player_stat": 67.0}, "player_2": {"name": "", "player_stat": 46.0}, "stat": "highest", "skill": "BAT", "question_text": "Who has the greater highest score?", "greater": true}, {"player_1": {"name": "", "player_stat": 16.0}, "player_2": {"name": "", "player_stat": 158.0}, "stat": "sixes", "skill": "BAT", "question_text": "Who has hit more sixes?", "greater": true}, {"player_1": {"name": "", "player_stat": 3.0}, "player_2": {"name": "", "player_stat": 1.0}, "stat": "zeroes", "skill": "BAT", "question_text": "Who has more ducks?", "greater": true}, {"player_1": {"name": "", "player_stat": 26.46}, "player_2": {"name": "", "player_stat": 36.52}, "stat": "average", "skill": "BAT", "question_text": "Who has the better batting average?", "greater": true}, {"player_1": {"name": "", "player_stat": 43.0}, "player_2": {"name": "", "player_stat": 54.0}, "stat": "wickets", "skill": "BOWL", "question_text": "Who has taken more wickets?", "greater": true}, {"player_1": {"name": "", "player_stat": 3.0}, "player_2": {"name": "", "player_stat": 2.0}, "stat": "zeroes", "skill": "BAT", "question_text": "Who has more ducks?", "greater": true}, {"player_1": {"name": "", "player_stat": 19.0}, "player_2": {"name": "", "player_stat": 193.0}, "stat": "sixes", "skill": "BAT", "question_text": "Who has hit more sixes?", "greater": true}, {"player_1": {"name": "", "player_stat": 37.5}, "player_2": {"name": "", "player_stat": 36.26}, "stat": "average", "skill": "BOWL", "question_text": "Who has the better bowling average?", "greater": false}, {"player_1": {"name": "", "player_stat": 41.13}, "player_2": {"name": "", "player_stat": 36.9}, "stat": "average", "skill": "BAT", "question_text": "Who has the better batting average?", "greater": true}, {"player_1": {"name": "", "player_stat": 93.0}, "player_2": {"name": "", "player_stat": 82.0}, "stat": "highest", "skill": "BAT", "question_text": "Who has the greater highest score?", "greater": true}, {"player_1": {"name": "", "player_stat": 1570.0}, "player_2": {"name": "", "player_stat": 1118.0}, "stat": "runs_given", "skill": "BOWL", "question_text": "Who has given away less runs?", "greater": false}, {"player_1": {"name": "", "player_stat": 15.5}, "player_2": {"name": "", "player_stat": 15.5}, "stat": "strike_rate", "skill": "BOWL", "question_text": "Who has the better bowling strike rate?", "greater": false}, {"player_1": {"name": "", "player_stat": 27.22}, "player_2": {"name": "", "player_stat": 34.95}, "stat": "average", "skill": "BAT", "question_text": "Who has the better batting average?", "greater": true}, {"player_1": {"name": "", "player_stat": 213.0}, "player_2": {"name": "", "player_stat": 11.0}, "stat": "sixes", "skill": "BAT", "question_text": "Who has hit more sixes?", "greater": true}, {"player_1": {"name": "", "player_stat": 3.0}, "player_2": {"name": "", "player_stat": 5.0}, "stat": "hundreds", "skill": "BAT", "question_text": "Who has scored more hundreds?", "greater": true}, {"player_1": {"name": "", "player_stat": 3.0}, "player_2": {"name": "", "player_stat": 7.0}, "stat": "zeroes", "skill": "BAT", "question_text": "Who has more ducks?", "greater": true}, {"player_1": {"name": "", "player_stat": 12.0}, "player_2": {"name": "", "player_stat": 1.0}, "stat": "zeroes", "skill": "BAT", "question_text": "Who has more ducks?", "greater": true}]}{"images":[{"startdate":"20220515","fullstartdate":"202205150700","enddate":"20220516","url":"/th?id=OHR.BerninaBloodMoon_EN-US5538561384_1920x1080.jpg&rf=LaDigue_1920x1080.jpg&pid=hp","urlbase":"/th?id=OHR.BerninaBloodMoon_EN-US5538561384","copyright":"Panoramic view of the Bernina Range with blood moon, Eastern Alps, Engadin, Switzerland (© /Shutterstock)","copyrightlink":"https://www.bing.com/search?q=Total+lunar+eclipse&form=hpcapt&filters=HpDate%3a%2220220515_0700%22","title":"Get ready for the blood moon","quiz":"/search?q=Bing+homepage+quiz&filters=WQOskey:%22HPQuiz_20220515_BerninaBloodMoon%22&FORM=HPQUIZ","wp":true,"hsh":"3b435346a48ae25eb5958bad84a7e843","drk":1,"top":1,"bot":1,"hs":[]}],"tooltips":{"loading":"正在加载...","previous":"上一个图像","next":"下一个图像","walle":"此图片不能下载用作壁纸。","walls":"下载今日美图。仅限用作桌面壁纸。"}}1-10 { "id": "nodebb-plugin-global-chat", "name": "Global Chat", "description": "Adds a global chat room to NodeBB", "url": "https://github.com/NodeBB/nodebb-plugin-global-chat", "library": "index.js", "hooks": [ { "hook": "static:app.load", "method": "init" }, { "hook": "action:user.create", "method": "addUser" }, { "hook": "filter:messaging.loadRoom", "method": "roomLoad" }, { "hook": "filter:messaging.isRoomOwner", "method": "isRoomOwner" }, { "hook": "filter:messaging.addUsersToRoom", "method": "roomAddUsers" }, { "hook": "filter:messaging.notify", "method": "shouldNotify" }, { "hook": "filter:admin.header.build", "method": "adminMenu" } ], "templates": "templates", "languages": "languages", "less": [ "public/fixes.less" ], "scripts": [ "public/client.js" ], "acpScripts": [ "public/admin.js" ] }http://data.doremus.org/artist/9e9a892b-b065-35fb-a23b-adf3f2de16d2 http://data.doremus.org/artist/05b85db3-a916-3acc-b782-b4aa86f891fc http://data.doremus.org/artist/765726ee-efdb-30de-95a4-760f198d3063 http://data.doremus.org/artist/a35fc8b9-765e-343e-b632-7ba2c4ff07caJamesFrost/twitch-emoji10-100 {"template":{"small":"https://static-cdn.jtvnw.net/emoticons/v1/{image_id}/1.0","medium":"https://static-cdn.jtvnw.net/emoticons/v1/{image_id}/2.0","large":"https://static-cdn.jtvnw.net/emoticons/v1/{image_id}/3.0"},"channels":{"totalarmy":{"title":"TotalArmy","channel_id":60746966,"link":"http://twitch.tv/totalarmy","desc":null,"plans":{"$4.99":null,"$9.99":null,"$24.99":null},"id":"totalarmy","first_seen":null,"badge":"https://static-cdn.jtvnw.net/badges/v1/aa4d540d-9ce8-47d4-908f-a3eade6f03d3/1","badge_starting":"https://static-cdn.jtvnw.net/badges/v1/aa4d540d-9ce8-47d4-908f-a3eade6f03d3/3","badge_3m":null,"badge_6m":null,"badge_12m":null,"badge_24m":null,"badges":[{"image_url_1x":"https://static-cdn.jtvnw.net/badges/v1/aa4d540d-9ce8-47d4-908f-a3eade6f03d3/1","image_url_2x":"https://static-cdn.jtvnw.net/badges/v1/aa4d540d-9ce8-47d4-908f-a3eade6f03d3/2","image_url_4x":"https://static-cdn.jtvnw.net/badges/v1/aa4d540d-9ce8-47d4-908f-a3eade6f03d3/3","description":"Subscriber","title":"Subscriber","click_action":"subscribe_to_channel","click_url":""}],"bits_badges":null,"cheermote1":null,"cheermote100":null,"cheermote1000":null,"cheermote5000":null,"cheermote10000":null,"set":13175,"emotes":[{"code":"totalarmyCabra","image_id":50447,"set":13175},{"code":"totalarmyTrem","image_id":50472,"set":13175}]}}} 1-10 { "address": "1RedkCkVaXuVXrqCMpoXQS29bwaqsuFdL", "cert_auth_type": "web", "cert_sign": " "cert_user_id": "", "files": { "data.json": { "sha512": "fd5d3db5155165242835774d799c568528517f4efb6d56c1d4987ded589eef75", "size": 343 } }, "inner_path": "data/users/1GGYdxuTXGez39nj7SgHSNgZo2CSncnwL1/content.json", "modified": 1471101585.111488, "signs": { "1GGYdxuTXGez39nj7SgHSNgZo2CSncnwL1": " } }0 { "name": "nlw-projeto", "description": "Projeto produzido durante a Next level week, se trata de uma plataforma de estudos online chamada Proofy, desenvolvida com conceitos de HTML5, CSS3 e JavaScript", "scripts": { "dev": "nodemon src/server.js" }, "repository": { "type": "git", "url": "git+https://github.com/Vitor-Alvesz/NLW-Projeto.git" }, "bugs": { "url": "https://github.com/Vitor-Alvesz/NLW-Projeto/issues" }, "homepage": "https://github.com/Vitor-Alvesz/NLW-Projeto#readme", "dependencies": { "express": "^4.17.1", "nunjucks": "^3.2.2", "sqlite-async": "^1.1.0" }, "devDependencies": { "nodemon": "^2.0.4" } } {"brief":"slaughter","long":"Meaning: to \"slaughter\" (animals or men).
Usage: kill, (make) slaughter, slay.
Source: a primitive root;"} {"pe_name":"SettingsHandlers_Devices.dll.mui","pe_type":267,"pe_size":2560,"pe_subsystem":2,"pe_subsystem_caption":"The Windows graphical user interface (GUI) subsystem","pe_path":"c:\\Windows\\WinSxS\\amd64_microsoft-windows-s..s-devices.resources_31bf3856ad364e35_10.0.18362.1_en-us_f228999baa5b0536\\SettingsHandlers_Devices.dll.mui","pe_timedate_stamp":0,"pe_timedate_human":"1970-01-01T00:00:00.000Z","ImageDLLImports":[{"name":"","imports":0,"functions":[]}],"ImageDLLExports":{"exports":0,"functions":[]},"ImageHashSignatures":{"md5":"536c7fda9a5598249403c231b766c0e2","sha2":"93138164338f11f514ac25eeb908bb8383bda92da9fb9b810e2adcd6b476843c"}}gbetegon88/dataiku-contrib10-100 { "libraries" : [], "label" : "Tensorboard: monitor the re-training of deep learning models", "backendEnabled": true } {"yield": "Makes 4 to 6 servings", "nutritionEstimates": [], "prepTimeInSeconds": 1800, "totalTime": "1 hr 15 min", "images": [{"hostedSmallUrl": "http://lh5.ggpht.com/f5hhh41xyKtTW1CcvbgB_l43rXRIG-wXhfpEbzQc6_0GWttj9vKt3y0kKi_ALKTIPbhKmL13ws9ZMAa83XsXeg=s90", "hostedMediumUrl": "http://lh5.ggpht.com/f5hhh41xyKtTW1CcvbgB_l43rXRIG-wXhfpEbzQc6_0GWttj9vKt3y0kKi_ALKTIPbhKmL13ws9ZMAa83XsXeg=s180", "hostedLargeUrl": "http://lh5.ggpht.com/f5hhh41xyKtTW1CcvbgB_l43rXRIG-wXhfpEbzQc6_0GWttj9vKt3y0kKi_ALKTIPbhKmL13ws9ZMAa83XsXeg=s360", "imageUrlsBySize": {"90": "http://lh6.ggpht.com/Nolyg8_QZtjhwM_lpTBzCkkgLHGaitKKo-Iejj9HNaER3R4bJMCz_w1L9KU7uofnqah5JaNUnDM3VZ_-Cwo4Zg=s90-c", "360": "http://lh6.ggpht.com/Nolyg8_QZtjhwM_lpTBzCkkgLHGaitKKo-Iejj9HNaER3R4bJMCz_w1L9KU7uofnqah5JaNUnDM3VZ_-Cwo4Zg=s360-c"}}], "name": "Vegetarian Cassoulet", "source": {"sourceDisplayName": "Epicurious", "sourceSiteUrl": "http://www.epicurious.com", "sourceRecipeUrl": "http://www.epicurious.com/recipes/food/views/Vegetarian-Cassoulet-241753"}, "prepTime": "30 Min", "id": "Vegetarian-Cassoulet-Epicurious-53377", "ingredientLines": ["3 medium leeks (white and pale green parts only)", "4 medium carrots, halved lengthwise and cut into 1-inch-wide pieces", "3 celery ribs, cut into 1-inch-wide pieces", "4 garlic cloves, chopped", "1/4 cup olive oil", "4 thyme sprigs", "2 parsley sprigs", "1 Turkish or 1/2 California bay leaf", "1/8 teaspoon ground cloves", "3 (19-ounce) cans cannellini or Great Northern beans, rinsed and drained", "1 quart water", "4 cups coarse fresh bread crumbs from a baguette", "1/3 cup olive oil", "1 tablespoon chopped garlic", "1/4 cup chopped parsley"], "attribution": {"html": "Vegetarian Cassoulet recipe information powered by Yummly", "url": "http://www.yummly.co/recipe/Vegetarian-Cassoulet-Epicurious-53377", "text": "Vegetarian Cassoulet recipes: information powered by Yummly", "logo": "https://static.yummly.co/api-logo.png"}, "numberOfServings": 5, "totalTimeInSeconds": 4500, "attributes": {"course": ["Main Dishes"], "cuisine": ["French"]}, "flavors": {}, "rating": 5} { "kind": "Event", "name": "Window.pagehide_event", "href": "https://developer.mozilla.org/en-US/docs/Web/API/Window/pagehide_event", "description": "The pagehide event is sent to a Window when the browser hides the current page in the process of presenting a different page from the session's history.", "refs": [ { "name": "HTML Living Standard", "href": "https://html.spec.whatwg.org/multipage/browsing-the-web.html#event-pagehide", "description": "pagehide - HTML Living Standard" }, { "name": "HTML5", "href": "https://www.w3.org/TR/html52/browsers.html#event-pagehide", "description": "pagehide - HTML5" } ], "eventName": "pagehide", "eventType": "PageTransitionEvent", "eventHandlerProperty": "onpagehide", "eventBubbles": true, "eventCancelable": true } {"contributors":null,"truncated":false,"text":"Here's my brief statement about the awful events in Paris. Vive !\n http:\/\/t.co\/kspeTLCQ78","in_reply_to_status_id":null,"id":552842814061424640,"favorite_count":1529,"source":"Twitter for Websites<\/a>","retweeted":false,"coordinates":null,"entities":{"user_mentions":[],"symbols":[],"trends":[],"hashtags":[],"urls":[{"url":"http:\/\/t.co\/kspeTLCQ78","indices":[80,102],"expanded_url":"http:\/\/p.ost.im\/jG6HxY","display_url":"p.ost.im\/jG6HxY"}]},"in_reply_to_screen_name":null,"id_str":"552842814061424640","retweet_count":2912,"in_reply_to_user_id":null,"favorited":false,"user":{"follow_request_sent":null,"profile_use_background_image":true,"default_profile_image":false,"id":373416209,"verified":true,"profile_image_url_https":"https:\/\/pbs.twimg.com\/profile_images\/1655254469\/photo-2_normal.JPG","profile_sidebar_fill_color":"DDEEF6","profile_text_color":"333333","followers_count":872883,"profile_sidebar_border_color":"C0DEED","id_str":"373416209","profile_background_color":"C0DEED","listed_count":6212,"profile_background_image_url_https":"https:\/\/abs.twimg.com\/images\/themes\/theme1\/bg.png","utc_offset":-18000,"statuses_count":2827,"description":"In the immortal words of Popeye the Sailor Man: I yam what I yam and that's all that I yam.","friends_count":500,"location":"","profile_link_color":"0084B4","profile_image_url":"http:\/\/pbs.twimg.com\/profile_images\/1655254469\/photo-2_normal.JPG","following":null,"geo_enabled":false,"profile_background_image_url":"http:\/\/abs.twimg.com\/images\/themes\/theme1\/bg.png","name":"","lang":"en","profile_background_tile":false,"favourites_count":32,"screen_name":"SalmanRushdie","notifications":null,"url":"http:\/\/www.salman-rushdie.com","created_at":"Wed Sep 14 14:58:14 +0000 2011","contributors_enabled":false,"time_zone":"Eastern Time (US & Canada)","protected":false,"default_profile":true,"is_translator":false},"geo":null,"in_reply_to_user_id_str":null,"possibly_sensitive":false,"lang":"en","created_at":"Wed Jan 07 15:02:52 +0000 2015","filter_level":"low","in_reply_to_status_id_str":null,"place":null}{"ast":null,"code":"import{FaTimes}from'react-icons/fa';import{jsx as _jsx}from\"react/jsx-runtime\";import{jsxs as _jsxs}from\"react/jsx-runtime\";var Task=function Task(_ref){var task=_ref.task,onDelete=_ref.onDelete,onToggle=_ref.onToggle;return/*#__PURE__*/_jsxs(\"div\",{className:\"task \".concat(task.reminder?'reminder':'',\" \"),onDoubleClick:function onDoubleClick(){return onToggle(task.id);},children:[/*#__PURE__*/_jsxs(\"h3\",{children:[task.text,/*#__PURE__*/_jsx(FaTimes,{style:{color:'red',cursor:'pointer'},onClick:function onClick(){return onDelete(task.id);}})]}),/*#__PURE__*/_jsx(\"p\",{children:task.day})]});};export default Task;","map":{"version":3,"sources":["/Users/zhangyuan/Desktop/react-task-tracker/src/components/Task.js"],"names":["FaTimes","Task","task","onDelete","onToggle","reminder","id","text","color","cursor","day"],"mappings":"AAAA,OAASA,OAAT,KAAwB,gBAAxB,C,wFAEA,GAAMC,CAAAA,IAAI,CAAG,QAAPA,CAAAA,IAAO,MAAkC,IAA/BC,CAAAA,IAA+B,MAA/BA,IAA+B,CAAzBC,QAAyB,MAAzBA,QAAyB,CAAfC,QAAe,MAAfA,QAAe,CAC3C,mBACI,aAAK,SAAS,gBAAUF,IAAI,CAACG,QAAL,CAAgB,UAAhB,CAA6B,EAAvC,KAAd,CAA4D,aAAa,CAAE,+BAAMD,CAAAA,QAAQ,CAACF,IAAI,CAACI,EAAN,CAAd,EAA3E,wBACI,sBACKJ,IAAI,CAACK,IADV,cAEI,KAAC,OAAD,EAAS,KAAK,CAAE,CAAEC,KAAK,CAAE,KAAT,CAAgBC,MAAM,CAAE,SAAxB,CAAhB,CAAqD,OAAO,CAAE,yBAAMN,CAAAA,QAAQ,CAACD,IAAI,CAACI,EAAN,CAAd,EAA9D,EAFJ,GADJ,cAKI,mBAAIJ,IAAI,CAACQ,GAAT,EALJ,GADJ,CASH,CAVD,CAYA,cAAeT,CAAAA,IAAf","sourcesContent":["import { FaTimes } from 'react-icons/fa'\n\nconst Task = ({ task, onDelete, onToggle }) => {\n return (\n
onToggle(task.id)}>\n

\n {task.text} \n onDelete(task.id)}/>\n

\n

{task.day}

\n
\n )\n}\n\nexport default Task\n"]},"metadata":{},"sourceType":"module"}packets/seeds-utils/package.json10-100 { "name": "@sproutsocial/seeds-utils", "version": "1.2.1", "description": "Seeds utility functions for generating tokens", "seeds_ignore": true, "main": "package.json", "repository": { "type": "git", "url": "git+ssh://git@github.com/sproutsocial/seeds-packets.git" }, "author": ", Inc.", "license": "MIT" } {"count":1,"self":25.6235456,"total":32.178660799999996,"children":{"InitializeActuators":{"count":32,"self":0.0039854,"total":0.0039854,"children":null},"InitializeSensors":{"count":32,"self":0.0029977999999999997,"total":0.0029977999999999997,"children":null},"AgentSendState":{"count":5504,"self":0.3391612,"total":0.6704395,"children":{"CollectObservations":{"count":35232,"self":0.138773,"total":0.138773,"children":null},"WriteActionMask":{"count":35232,"self":0.0803933,"total":0.0803933,"children":null},"RequestDecision":{"count":35232,"self":0.11211199999999999,"total":0.11211199999999999,"children":null}}},"DecideAction":{"count":5504,"self":3.3968111999999997,"total":5.4332714,"children":{"RayPerceptionSensor.Perceive":{"count":70464,"self":2.0364603999999997,"total":2.0364603999999997,"children":null}}},"AgentAct":{"count":5504,"self":0.4424282,"total":0.4424282,"children":null}},"gauges":{"AgenteLaberinto.CumulativeReward":{"count":243,"max":1.973,"min":-2.10301733,"runningAverage":0.5061986,"value":1.87899911,"weightedAverage":0.7466056}},"metadata":{"timer_format_version":"0.1.0","start_time_seconds":"1614297039","unity_version":"2020.2.4f1","command_line_arguments":"E:\\Games\\Unity\\2020.2.4f1\\Editor\\Unity.exe -projectpath E:\/Proyectos Unity\/Escuela\/ML Agents\/ML Agents -useHub -hubIPC -cloudEnvironment production -hubSessionId a4765c50-7788-11eb-a715-b94e8122790b -accessToken ","communication_protocol_version":"1.5.0","com.unity.ml-agents_version":"1.8.0-preview","scene_name":"3x5 - 5x3","end_time_seconds":"1614297071"}}{ "resource_pack_name": "vanilla", "texture_name": "atlas.items", "texture_data": { "ws_paint_brush": { "textures": "textures/items/ws/paint_brush" } } }0 {"by":"anarbadalov","descendants":91,"id":18058649,"kids":[18059190,18059549,18059763,18059453,18060043,18059416,18059201,18059199],"score":70,"time":1537803895,"title":"Paper Trails: Living and Dying with Fragmented Medical Records","type":"story","url":"https://undark.org/article/medical-records-fragmentation-health-care/"}{ "version": "4.5.6", "flags": {}, "shapes": [ { "label": "river", "points": [ [ 0.597864768683273, 42.170818505338076 ], [ 5.580071174377224, 47.330960854092524 ], [ 24.263345195729535, 49.644128113879006 ], [ 23.729537366548044, 60.49822064056939 ], [ 41.52313167259786, 68.14946619217082 ], [ 58.96085409252669, 78.11387900355872 ], [ 84.76156583629893, 91.45907473309609 ], [ 101.66548042704626, 103.02491103202847 ], [ 117.3238434163701, 109.60854092526691 ], [ 127, 115 ], [ 127, 48 ], [ 121.77224199288256, 42.704626334519574 ], [ 107.0035587188612, 34.163701067615655 ], [ 91.70106761565836, 25.97864768683274 ], [ 73.72953736654804, 16.90391459074733 ], [ 58.2491103202847, 8.185053380782918 ], [ 51.48754448398576, 5.516014234875445 ], [ 40.09964412811388, 8.540925266903914 ], [ 32.804270462633454, 10.142348754448399 ], [ 36.89679715302491, 0.35587188612099646 ], [ 1.1316725978647675, 1.0676156583629892 ] ], "group_id": null, "shape_type": "polygon", "flags": {} } ], "imagePath": "../training/imgs_original_river74.jpg_0db12c59-409b-44b4-9052-c6036aa86615.jpg", "imageData": "imageHeight": 128, "imageWidth": 128 }inugroho/pemilu-2019-scraperresult/caleg/caleg_provinsi_322_5.json [{"namaKab":"MAMUJU","originalFilename":"KK JAYADI FOTO.jpg","namaPartai":"Partai NasDem","id":78709,"noUrut":1,"nama":", S.Ag., SH., MH","stringJenisKelamin":"Laki-Laki"},{"namaKab":"MAMUJU","originalFilename":"Foto kk Hendra 001.jpg","namaPartai":"Partai NasDem","id":80572,"noUrut":2,"nama":".E.","stringJenisKelamin":"Laki-Laki"},{"namaKab":"MAMUJU","originalFilename":"kk Wahida 001.jpg","namaPartai":"Partai NasDem","id":136384,"noUrut":3,"nama":"","stringJenisKelamin":"Perempuan"},{"namaKab":"MAMUJU","originalFilename":"foto kk kalvin 001.jpg","namaPartai":"Partai NasDem","id":88467,"noUrut":4,"nama":"Pdt.. .Th.","stringJenisKelamin":"Laki-Laki"},{"namaKab":"MAMUJU","originalFilename":"Foto Kk hatta 001.jpg","namaPartai":"Partai NasDem","id":98969,"noUrut":5,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"MAMUJU","originalFilename":"Foto Anugrah.jpg","namaPartai":"Partai NasDem","id":114991,"noUrut":6,"nama":"","stringJenisKelamin":"Perempuan"},{"namaKab":"MAMUJU","originalFilename":"Kk Azis 001.jpg","namaPartai":"Partai NasDem","id":101730,"noUrut":7,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"MAMUJU","originalFilename":"foto kk Haris 003.jpg","namaPartai":"Partai NasDem","id":80670,"noUrut":8,"nama":"., M.Ag","stringJenisKelamin":"Laki-Laki"},{"namaKab":"MAMUJU","originalFilename":"Naomi Foto 001.jpg","namaPartai":"Partai NasDem","id":159613,"noUrut":9,"nama":"KUMBAYA","stringJenisKelamin":"Perempuan"}]etc/data/counttreesspecies_comp.json "㞂‖੠蘂戅쨀\ud812쁶ڰ䆴஠ᨐͲ耧Ɯ焜耜‘씉ⰾ臬ճ䀗ᅰ័ꥦᲓ璨萌ᓥ뮧ꯞ3⡉졄⛔谈싣웪䎆退跓䭀᳂癐됙゚ᮀ伺ࡀ產萡ቈ욞惜\udc34泤嫌\udc00琐ᰑ⡊树氼汈汖긑Ê璌찀ᰀ谠舠訮鹮琰´꣜몪繒僜⍭송î댩팀착婖ᄀ䰀ఛ㔑\u001a²2ꗴ退뙐Ä嚺䌁䇰​ᬀ泵苶ᕎࢭ༳\udbcbꢞꑚ⬗訥蒲㒆䬣鏋瀊䔒륑쵒ꯕ墜Ḁ␉흫㵉㴾胉б肌와졘娛㚛첖⭵雇恶㪜‗殏ȇ甠㷁撗鯛鰤⟎Ⰺ䩂첨豎ₒ䨣튙沮弨嘩链찅耕鼘퐤딕貊潅࿕∍ࡴ톩≩ꢊ동쭒⫓涳\ud91c仧⮍쑟猅ꬼ欀ଧ왗Ⲩ⩽쫐焂寪飂痰ペ褶揍嵁ꓒ♈뎎蠥㗝䶧튛᪂쁱搙ཱི썼꣐뛧᷌➞欵閺忁ﭐ⶙ꕗ蕟ᢇ瞡\udab8庵⁩䜛톦걒Ӡ嫛꛳ꪞ笻⯍쳎柜礑﹠꘺ⵔ⅋\ude75锗祘鉬䅀㵒蒵塗偄䣓䐱㍛჌橚삖栠闎Ἆ\udbd1絠}茮䰵Ś㘔õ퉐㓌࣌䨎揳ⷞอ㲲㴫఩쁘켛亷뒸쥀譴㵡蒌敟爭郼ꢯ졳ꋿ䘼瑹┤욣所圞≰\udcc8भ⊷䊗⭊䪽擋⧓楛䔱ᤘ⿕臆觼䝿샋춞匡灱㣯㉖댌븮ਭ睒샵䎏⫃㘂炆퀗鍼靟츢苴뵌ി䇖跼ᢱ챕ྚ䡊霹㏅䭢Ꙁ랳ҤⲴ㳐鏞偘㸳폍쎒躌꾼钯䪮磔킿뚣뽡\ude8dᵣᚦ첕胎밋鮳栺갚늧⑋᯲褰ꮵ暒빪芞슈\udbb4蚪⿓맠塪ິొ⳧곭毺︳ⱃ닧㑍珥珍憛諦엩泞ꈣ썋ଶ鴱귚颃독궎꒺ᵜ獠梜ⱷ䑺㦂䳏榦䩻뇲䅬⊖䬣횺ꈮ\udaf4樚᜙蕥鲻\ud9e1㎛ኗ䤿騪潼⑘ꨉ껒姻ꖭ띊櫶ꉖ↮듫坡딳屳뗜띛䛧䍻긗廅ꀮ寍邲\udad3Ꝯ誁뺼ꩾ神怃흺鵚⫪㌼ﷇ꬝뱍⬏뺒矪轥\udb6a鹦樏Ⳉ젭烎㗬ᮽ푣㍹杌棙텤㼖뺖䈿஭詶㽖㸣꧙㨙\udeb2狮ᇏ瞻컮㘞펅㲽཈몷즀繝虪㖁\udc1f\ud8c8甹矗\udb2f탶蒝캜永뤞ꕌ窛૦粫굔踲웚匘ꆼ百쮛衷普뛱쪻\udb9b逪膌։瘶쀤祐檥ⴠ島빶훹♍蓽鶚絍կ⿩艿黷ࢻ帣࢖ۏ⯣ᴁ붲㕺ꬷ㊛チ죖떈ﷰ怉왾墆絒ጼ즿팑㟓뮃猢ﰘ匲慙얇ശᲣ伤퐾⻘ﯰ폨⏀暑ᄆ⹙僎帔辦㈱葯昐ꍘ劊辚㢆波얎賎䈣쟨齓뺴앺Ӷ䉕謉㘢⓻ೊ旽鎋゙괽៵剽疁ႌᢘ훦抂幏縃崇蒛ꤒ⚗응밨蒸鈘鏜椳ꥵ옅윍墀䭮噘♴劑꽉⤒䃘¡鮍莥咮폂ﮐ턖錚䬐\ud803┹ₔ겲ひ㴧寷✉놸垕覕≸⒖晃㱽亙㍬֖퍲窵এ⢤沗龽괌䵖焻譬現哈庰셣ː卲窝좺\udd3a᜼\udfec솤륌遼⪦᳢氢剆⼑墷ᖧ夘䮬竉├Ⅰⱷ頭䮕⹅㒦ꬤ증̱杳쇗▔኱骲ꅇ⯮꒼腍џ쫶磰㥠⣥틑弽ᥧ瑶꘯ᓃ仩આ芪셋鶪ᨽ핈൱녟ꮙ钒맙ㅧ₎齳㾱ⱕ岾ᘌ誐⬵宎픢뚺몣ᣝ饫鯍摳䃝舾㰈ꖈꯧ蚧孓ꍞ궍絘쪃磞戡䅏锩닔䴼ᦚ핖褙㼦ꖢ靐婼箫碒ꯕ芥鯪\ud972淎䔦ޣ썃綮턨ꤤ䳼\ud943퉆┚\udb65粌궃껄䴳쭚癸풫囒꣕˛糪槱宷᫴⭹ꪭ䎭짮뇙䨧捪鷣⚹麹\ude72榍䗁ꖽꞖ迤䨭可鑉핖ჷ푺⍉㾰잶뺬뱗䉫嵉덷罭큔螈歽螰圞蘨爘붒覻첄ὃ胛랽㧇෴資㎶毪쵡곘釧宫罌좄嶽ඖ蘺ߙ揭썎ઠ\uda8d᠝惱ᾃꑻ뎐⚌问Ᏼ瓖㆞壓耙疦഑鳛ꉛ弘ꍿꩱǰ娦멸髝𤋮铉\udd92枸닍ꦚ淋൫䦽경琼蟘猃磬揦燹㩧苧驃ꩱ磔余Ꝭጃ릏藲ઢ峁灷㞙꼴靯뾍ꏒ㴭\udecc붆廥虿瘑슰꞊캭⬍⟏㰿፧筈ᾳ奾껂颚윟궭渮ﲼ\ud896뫺던唉ㅚ㐰᝗䃛閡第跳왓髌孳Ƚ꺞锿㞨䦓ꍬ溭밶镢\ude83皰댱훦蘩豯❫\udeb6\uda63督蕔熽ᜦﻏ珇沎鲳망å姉ﵿ쁼剆췻簜櫈競仌㣻븤ᐣ猰\ud9ad䜰꥔簷覎⛱骛룾⻍편侙蚛嬘竬藊秋끘㑻䀶ⴳ馲埞晖荢嶲総ﵸᲓ찆贙Ꞹ캎\ude3e蝞㘟돔쉎筕\ude47㳷䶣ꕛ깕킾箣㑝뷖焮㉕䑎㋿ន鴸ꊩ휫Ꝡ䏵糎역뭵᤽ꝲ橪ꬦ\udf41禢헋\udbf7㛠鷛藛쵃놵盭䟬ᙸ㵗㻡㽛ﱵ껙ﱲ꣒ׄ텏跻뻁\udf76㷉쭴\udb51剻⼩㋄经෯妛걽⾚矉䚱团௯ῗ빷귳ힼ⿣㚎﴿Ϭ촆嗪啳썿⸤펞喣ꍼ寸矩篟薡瓇\udef2翻瞫荒꾾嘾ᵝッ߰攧㞀\ud85aᕿᄷ缔煟哒尚ᙯ曅潏풍⧶”そ篠\ud816遟㟲弘೿㠏鐨뗿䷷V收佽ℴ࿀ꍴ⃪핣簷㜸\fꃋᨌ鐳샘砖㼆೏ǵೂ併ₛ訊ﴞ౿ఌጬ∿徳샑ࣿ➐邧֜偶邎૟⸏邓闠逌㨐睏₲썁ೂ됽ඵᤌスീꨍゥಔ⭂䰡୐鄏줬阍礷芿紉潛萳烅ಣ伇樚㘐괴悳ᆶ됇ඓ␈袔ઈ蔏䃘觫ᰋꍪ냐舊킠貴粱歙ৈꔳ좘탰켁ꅹꑶ肦༨蠵⤑✏⃚衋óハɛ⊊ꬶ蹈뀷㼜뎟䐊䕩랿㐈梂襸Ṓꠟୋ঍⢎蠍ઍ쬗횿̏吘ᢞ댼3ꃜ௰間⣝諨ꤲ됩౰ꎌ鴓襧㎌棧茘쑰㝶襈뭧讘ᝅ躈ﲊ댐˂䇥䭺ੈҠ盁ṕࣗ輈骈狸閏壟货杳霩赈Үゟࡏꣳ횄빤ჿㆸ쑏඀붏㱲丗䩌¥覤\u0010ꙋ뢝街௷爎乀\udf28䯤龴\ud8ff貘蜲Ҏᒸ襎좩䨀蒚萐쬍꽗瓨앷⁄豊좬䧣侏炼䯔靰ࣖ䱸묂〻界뒜亴썄褨킇訴픑磌辘䫔빏낂俗奵㼒乔锓咋䣳⭉裭䶴咔䭲㘍ἲླྀ铈쾶旐䓟且켥썸ῳ㓐쫤ヴ쯤駅휟䫘뽏룑䦘閶䳧࠘듉钅듌쳪쭆䋧訬쵍敐屢貀䡌鳏鐁你챈᲍䧤︰嬙샘ᚏᢅ䩄허鳒遱ᝎ윙쵌轉泵恘殍梓쫬酎额둷⯋b춼鳘挀퇉峱쨟戏╼쯨鿊蜈쿼롶ਔ뽓㳐Ⳃ಍猷䷼맋\udcd8暜⏋챔ꆷ㲃㜂\ude8b碏퇜頮싟Ꮶ㰬⒰逭낃㊨흉䳨⺤瑤諊䊦⻐﷧냩쪂赲ꋯ㞶၌ᒾ⿭䣱䊧ʒ嘮병ⱏⰨ鋛\ude2c칂쬢귉벎⹬븧츼般抢⾒鹦೴⽢넬ⷱ㼐닼⨤뇉늽ⴛ卉ರ䱬䔒춾‬⊹⧲蝱ኊ⶜\uddc9狦攆梖鋭⿢ዑ띄Hꓭ⭣ᒚⰣ૫⢊勯즊찫諼걮斶⫗쪬\udb2e唃謈\udcd8꼒骫꒹窩튁⯘铆誢뉲팩૟ᘅḮ粫⡪\udd2f䫉ꎬ견ꩬ苉瀗撩糾ꭊ붉꠬쪩㊵ꫢ퐶✎컚풭⪽詛憫歑ꤺꏪꮩ냴긲㮨壜꬚꼙슚쬬ⷚ읉섯ꬌ햭浓꣞⦠擩긆窽궧࡬૗ொൺ\ud92e䚰橌䖨狶ꪪ캪웙隓៰몙꧚ڦ閪૨橺\ud995웠둪鼫㰴⠫䜭勪꼦ꚗ曫ఈ끭砃겂ꎨ窖櫪藒鲭ꦦ랭ᚣ䭸翤㊶긲덙梄⦤丘즯㭉擦ଆ디\udc8d楒짳⛲䦤峲쥊뱮嚋Ṯꢒ渺饬ᛏྖ䚒꿆샎湳桬Ꙩ웟ꦆꚗ蚠桢\ud9a8᪂洢쎊㋋檤裭曚ಹ庨囿殎큪㒫䑎횭ຣ༎잩훃ꉝ慫꼓氌蝤暒듯⻏Δ蝬몧桴샋皶⣶구檎驲䪿涎\uda6cທ沒竬袺詮ꘪ胈麉漢醯榟䓁䭀棲퇪囎巒黁쮼댭坲﨨콜ꍍق肫ໆ맫樅髩皵橎譪雵自ꍋ讟㨭貜欟꟨溙ꀜ돏࿮㛃䢿᩶⿾꭬Ǎ硊틶릫操ꤎ렞黓賴훍克뷨⪀㋟ᳱ궪훮⇅湡谞膿⸦쵨Ǚ캦ᴡ뺺쎫庼ᢑ舙1덁i뉊쨞쫸ᦡ﯏㊢ᡆ餙ᚃெ㻊⿁븉ꘂ粥嘲ߪ嫉䣪\udb1f㢐a썪첼蜜凉湑퓬䆵ẜ媄熷촤踑࣫ᳱ픛놞랾ጙ軫謙ᬏ蛁読ট꼉凊팘짽鶰曠鵞ﮯᇂ黱촫⦫ຮꄞ즘᱾ﴚꋗ䴪桂餭醀駋ᓇ닞淮漢ᦄ趋溁ḙ\udbe8뼅ᯑ㇮᯺藐ᱟê膝늊骞仜ᬉ冰暎\uddef⧁ᩞ馎圈ⶩ蛭歾逇壨鳦辮ງ鸹ﲞ秾驰零ᾦ청ᆔ뎚⦇捉ꮙ槅霚茣鯾뾞暗香ﯦֹᢿ呩䗎᤾뿌軏੩藏ꕒ鋕飱퉪譹\uda75薫릃饪캛姮騥㩒鬥﵈ꗶ婕歃燰夹졘엳駙뻱㻏얪ᰥ딮滧菏奝뢊鲥踓饱齶넙円顥鰁ߠ "[{"id":"2001","provinceId":"18","regencyId":"13","districtId":"05","name":"Kebuayan"},{"id":"2002","provinceId":"18","regencyId":"13","districtId":"05","name":"Way Nukak"},{"id":"2003","provinceId":"18","regencyId":"13","districtId":"05","name":"Way Sindi"},{"id":"2004","provinceId":"18","regencyId":"13","districtId":"05","name":"Penengahan"},{"id":"2005","provinceId":"18","regencyId":"13","districtId":"05","name":"Menyancang"},{"id":"2006","provinceId":"18","regencyId":"13","districtId":"05","name":"Laay"},{"id":"2007","provinceId":"18","regencyId":"13","districtId":"05","name":"Penggawa V Ulu"},{"id":"2008","provinceId":"18","regencyId":"13","districtId":"05","name":"Penggawa V Tengah"},{"id":"2009","provinceId":"18","regencyId":"13","districtId":"05","name":"Way Sindi Utara"},{"id":"2010","provinceId":"18","regencyId":"13","districtId":"05","name":"Tembakak Way Sindi"},{"id":"2011","provinceId":"18","regencyId":"13","districtId":"05","name":"Way Sindi Hanuan"},{"id":"2012","provinceId":"18","regencyId":"13","districtId":"05","name":"Asahan Way Sindi"}]ext/filter_ext/ft_8047.json {"id_8047":{"title":"Alice's Adventures in Wonderland (abridged, version 3)","language":"English","totaltime":"1:08:01","url_librivox":"http://librivox.org/alices-adventures-in-wonderland-abridged-version-3-by-lewis-carroll/","url_iarchive":"http://archive.org/details/alicesadventure_abridged_pc_librivox","readers":["5717"],"authors":"71","genres":"Action & Adventure"}}10-100 {"vendor":"sapmachine","filename":"sapmachine-jdk-16-ea.26_linux-x64_bin.tar.gz","release_type":"ea","version":"16-ea.26","java_version":"16-ea.26","jvm_impl":"hotspot","os":"linux","architecture":"x86_64","file_type":"tar.gz","image_type":"jdk","features":[],"url":"https://github.com/SAP/SapMachine/releases/download/sapmachine-16+26/sapmachine-jdk-16-ea.26_linux-x64_bin.tar.gz","md5":"9d6429057a9454f55ddc59edda005035","md5_file":"sapmachine-jdk-16-ea.26_linux-x64_bin.tar.gz.md5","sha1":"848a6eb426fa862248ea0089854404af7961aa07","sha1_file":"sapmachine-jdk-16-ea.26_linux-x64_bin.tar.gz.sha1","sha256":"e46098c584439219769ba69479190f747e09917d9ef3b394bb276f3e3e0ec987","sha256_file":"sapmachine-jdk-16-ea.26_linux-x64_bin.tar.gz.sha256","sha512":"70e3fabebd41410bd86e902ee054a8b28234b15a99b9f6ab85f2eb677a9fcbd20b83c7650ae14742ed41f4972b3897fce88027f5ca2e9cc70f28867ca0f9c5fd","sha512_file":"sapmachine-jdk-16-ea.26_linux-x64_bin.tar.gz.sha512","size":206421290} 1-10 { "prefix": "!event", "token": "Insert bot token", "_comment": " nizonrox Used to mark each id what a name to identify each admin easily", "admin": ["209847814719668225"], "host_channel": "ID of host channel" } 10-100 {"geometry": {"type": "Point", "coordinates": [-88.59, 35.17]}, "type": "Feature", "id": "38375", "properties": {"other_cities": "", "city": "Selmer", "state": "TN", "county": "McNairy County"}}{ "index": 1771, "hash": 3230389420, "blacklisted": false, "damageType": 0, "redacted": false, "isDisplayable": true, "displayProperties": { "icon": "/common/destiny2_content/icons/c81748a8a64ed6c3349596cbc88d31f7.png", "hasIcon": true, "description": "Aiming down sights loads a powerful explosive payload that [Stagger] staggers unshielded combatants. Strong against Unstoppable Champions.", "name": "Unstoppable Shot" } }AndrewSverdrup/monaco-textmate-languagesgrammars/typescript/ts.config.json { "id": "typescript", "scopeName": "source.ts", "extensions": [".ts"], "aliases": ["TypeScript", "typescript"], "mimeType": ["application/typescript"] }{"code": "STEU19", "lang": "de", "description": "\nKostensteuern abz\u00fcglich Subventionen\n\nErl\u00e4uterung f\u00fcr folgende Statistik(en):\n42251 Kostenstrukturerhebung im Verarb. Gewerbe, Bergbau\n\nBegriffsinhalt:\nAls Kostensteuern z\u00e4hlen die Steuern, die als Kosten\nanzusehen sind, wie z.B. die Grundsteuer, die Gewerbesteuer,\ndie Kraftfahrzeugsteuer und die Verbrauchssteuern, sowie\n\u00d6ffentliche Geb\u00fchren und Betr\u00e4ge.\n\nSubventionen sind Transferleistungen an den Staat.\n\nDer Wert ist i.d.R. positiv, da die Subventionen kleiner\nsind als die kostensteuerlichen Aufwendungen der\nUnternehmen.\n\n\u00a9 Statistisches Bundesamt, Wiesbaden 2004", "name": "Kostensteuern abz\u00fcglich Subventionen", "type": "Merkmal"}{ "name": {"message": "ওয়েবপেইজ স্ক্রীণশট - Open Screenshot" }, "dir": {"message": "ltr" }, "description": {"message": "সহজেই দ্রুততার সাথে ওয়েবপেইজের স্ক্রীণশট নিন এবং .jpg হিসেবে সংরক্ষণ করুন। প্রথম ক্রোম এক্সটেনশন যেটি পুরো পেইজের স্ক্রীণশট নিতে.." } }0 [ { "Name": "JLitte cash 100bb ante", "Filepath": "ranges_trainer/range_dicts/JLittle_cash_100bb_ante.pkl" }, { "Name": "Push/Fold, big blind ante", "Filepath": "range_dicts/Push-Fold_BigBlindAnte.pkl" }, { "Name": "Heads Up cash 100bb", "Filepath": "range_dicts/HeadsUp_cash_100bb.pkl" }, { "Name": "GTO implementable 75bb", "Filepath": "range_dicts/GTO_implementable_75bb.pkl" }, { "Name": "GTO implementable 40bb", "Filepath": "range_dicts/GTO_implementable_40bb.pkl" }, { "Name": "GTO implementable 25bb", "Filepath": "range_dicts/GTO_implementable_25bb.pkl" }, { "Name": "GTO implementable 15bb", "Filepath": "range_dicts/GTO_implementable_15bb.pkl" }, { "Name": "6max GTO implementable 100bb cash", "Filepath": "range_dicts/6max_GTO_implementable_100bb_cash.pkl" }, { "Name": "Art", "Filepath": "ranges_trainer/range_dicts/art.pkl" } ]{"categories":["Uncategorized"],"desc":"\n","details":{"authors":", ","format":"pdf","isbn-10":"1449337996","isbn-13":"978-1449337995","pages":"397 pages","publication date":"March 2, 2013","publisher":"Maker Media, Inc","size":"164.00Mb"},"img":"http://192.168.3.11/covers/97/97fd0edaf88a6204bea614bea0709a95.jpg","link":"https://rapidhosting.info/files/91u","title":"Vintage Tomorrows: A Historian And A Futurist Journey Through Steampunk Into The Future of Technology"}[ {"name": "webide:newproject", "bind": {"mac": "Command-N", "win": "Ctrl-Shift-N"}}, {"name": "webide:new", "bind": {"mac": "Command-N", "win": "Alt-N"}}, {"name": "webide:open", "bind": {"mac": "Command-E", "win": "Ctrl-E"}, "event": "webide.windowRemote('/window/open', {'width': 1000, 'height': 550})"}, {"name": "webide:save", "bind": {"mac": "Command-S", "win": "Ctrl-S"}}, {"name": "webide:saveas", "bind": {"mac": "Command-Shift-S", "win": "Ctrl-Shift-S"}}, {"name": "webide:closefile", "bind": {"mac": "Option-W", "win": "Alt-W"}}, {"name": "file:closeallfiles", "bind": {"mac": "Option-Shift-W", "win": "Alt-W"}}, {"name": "webide:saveas", "bind": {"mac": "Command-Shift-S", "win": "Ctrl-Shift-S"}}, {"name": "webide:undo", "bind": {"mac": "Command-Z", "win": "Ctrl-Z"}}, {"name": "webide:redo", "bind": {"mac": "Command-Y", "win": "Ctrl-Y"}}, {"name": "webide:cut", "bind": {"mac": "Command-X", "win": "Ctrl-X"}}, {"name": "webide:copy", "bind": {"mac": "Command-C", "win": "Ctrl-C"}}, {"name": "webide:paste", "bind": {"mac": "Command-V", "win": "Ctrl-V"}} ] { "slug": "trade-alert", "title": "Trade Alert", "description": "

A personal project that aims to use computational power to detect possible trades to be make on stock market.

Using web scrapping techniques I was able to get all stock prices from Brazilian Stock Market (bovespa) since 2010 and store them into a mongodb database. Currently, it's analyzing moving averages and channels breakouts to send e-mails alerting of possible trades. The goal is to apply Machine Learning into this project.

Back-end developed with Laravel 8 and mysql to store alerts and users. Python being used to identify trades. MongoDB storing all stock prices. TensorFlow being used as Machine Learning framework.

", "technologies": ["laravel", "mysql", "python", "tensorflow", "mongodb"], "date": "Jan 2020", "pictures": [ { "url": "1.jpg", "desc": "Breakout channel example" }, { "url": "2.jpg", "desc": "Crossing moving averages example" } ] } package.json { "name": "eslint-plugin-artistco", "version": "0.0.1", "author": " <>", "description": "Artistco specific linting rules for ESLint", "main": "index.js", "scripts": { "coveralls": "cat ./reports/coverage/lcov.info | coveralls", "lint": "eslint ./", "pretest": "npm run lint", "test": "npm run unit-test", "unit-test": "istanbul cover --dir reports/coverage mocha tests/**/*.js -- --reporter dot" }, "repository": { "type": "git", "url": "https://github.com/svengau/eslint-plugin-artistco" }, "homepage": "https://github.com/svengau/eslint-plugin-artistco", "bugs": "https://github.com/svengau/eslint-plugin-artistco/issues", "dependencies": { "has": "^1.0.1", "lodash": "^4.17.5" }, "devDependencies": { "babel-eslint": "^8.2.1", "coveralls": "^3.0.0", "eslint": "^4.18.0", "eslint-config-airbnb-base": "^12.1.0", "eslint-plugin-import": "^2.9.0", "istanbul": "^0.4.5", "mocha": "^5.0.1" }, "peerDependencies": { "eslint": "^3.0.0 || ^4.0.0" }, "engines": { "node": ">=9" }, "license": "MIT" } jacobzmidzinski/droidDev.news-database { "17-android11BetaPlans": { "tags": ["android"], "title": "Android 11: Beta Plans", "curator": { "name": "", "url": "https://jacobzmidzinski.com" }, "contentType": "article", "publishDate": { "_seconds": 1588723200 }, "description": ", VP of Engineering tells us about Android 11 schedule update, App compatibility and how to get started with Android 11", "url": "https://android-developers.googleblog.com/2020/05/android-11-beta-plans.html" } }{ "name" : "Restore Repository", "storage" : { "cacheName" : "restoreRepository", "cacheConfiguration" : "config/restore-repo-config-infinispan.xml", "binaryStorage" : { "type" : "file", "directory": "target/backupArea/restoreRepo/binaries", "minimumBinarySizeInBytes" : 40 } }, "node-types" : ["cnd/cars.cnd"] }etc/items/actions/lift_crystal_key.json0 { "commands": [ "lift crystal key" ], "response_type": "display", "response": "Cool. You've lifted the Crystal Key. Did you mean to pick up instead?" } {"PRODUCT_NAME":"JUNIOR HOUSE SLIPPERS","MANUFACTURER":"PICK AND PACK ENTERPRISES","BRANDNAME":"CK","WEIGHT":"NET"}HiBrowser/Hi-Browsersections/26-1307.json {"text":"If the charter be granted as aforesaid, it, together with the certificate of the Council of the District of Columbia granting the same indorsed thereon, shall be filed for record in the Office of the Recorder of Deeds for the District of Columbia, and shall be recorded by him. On the filing of the said certificate with the said Recorder of Deeds as herein provided, approved as aforesaid by the said Council, the persons named therein and their successors shall thereupon and thereby be and become a body corporate and politic, and as such shall be vested with all the powers and charged with all the liabilities conferred upon and imposed by this chapter upon companies organized under the provisions hereof; provided, however, that no corporation created and organized under the provisions hereof, or availing itself of the provisions hereof as contained in § 26-1313, shall be authorized to transact the business of a trust company, or any business of a fiduciary character, until it shall have filed with the Superintendent of Banking and Financial Institutions a copy of its certificate of organization and charter, and shall have obtained from him and filed the same for record with the said Recorder of Deeds, a certificate that the said capital stock of said company has been paid in and the deposit of securities made with said Superintendent of Banking and Financial Institutions in the manner and to the extent required by this chapter.","historical":"Prior Codifications\n\n1981 Ed., § 26-407.\n\n1973 Ed., § 26-307.\n\nLegislative History of Laws\n\nFor legislative history of D.C. Law 6-107, see Historical and Statutory Notes following § 26-403.\n\nChange in Government\n\nThis section originated at a time when local government powers were delegated to a Board of Commissioners of the District of Columbia (see Acts Relating to the Establishment of the District of Columbia and its Various Forms of Governmental Organization in Volume 1). Section 402(223) of Reorganization Plan No. 3 of 1967 (see Reorganization Plans in Volume 1) transferred all of the functions of the Board of Commissioners under this section to the District of Columbia Council, subject to the right of the Commissioner as provided in § 406 of the Plan. The District of Columbia Self-Government and Governmental Reorganization Act, 87 Stat. 818, § 711 (D.C. Code, § 1-207.11), abolished the District of Columbia Council and the Office of Commissioner of the District of Columbia. These branches of government were replaced by the Council of the District of Columbia and the Office of Mayor of the District of Columbia, respectively. Accordingly, and also pursuant to § 714(a) of such Act (D.C. Code, § 1-207.14(a)), appropriate changes in terminology were made in this section.\n\nDC CODE § 26-1307\n\nCurrent through December 11, 2012","credits":"(Mar. 3, 1901, 31 Stat. 1304, ch. 854, § 719; Nov. 23, 1985, D.C. Law 6-63, § 106(a)(6), as added Apr. 11, 1986, D.C. Law 6-107, § 2(k), 33 DCR 1168.)","sections":[],"division":{"identifier":"V","text":"Local Business Affairs"},"title":{"identifier":"26","text":"Banks and Other Financial Institutions. (Refs & Annos)"},"chapter":{"identifier":"13","text":"Trust, Loan, Mortgage, Safe Deposit and Title Corporations. (Refs & Annos)"},"subchapter":{"identifier":"I","text":"General."},"heading":{"title":"26","chaptersection":"1307","identifier":"26-1307","catch_text":"Charter of incorporation--Recording; certificate to be obtained from Superintendent of Banking and Financial Institutions."}}ccronje/bilara-data10-100 { "an3.46:0.1": "Aṅguttara Nikāya 3 ", "an3.46:0.2": "5. Cūḷavagga ", "an3.46:0.3": "46. Sīlavantasutta ", "an3.46:1.1": "“Yaṁ, bhikkhave, sīlavanto pabbajitā gāmaṁ vā nigamaṁ vā upanissāya viharanti. ", "an3.46:1.2": "Tattha manussā tīhi ṭhānehi bahuṁ puññaṁ pasavanti. ", "an3.46:1.3": "Katamehi tīhi? ", "an3.46:1.4": "Kāyena, vācāya, manasā. ", "an3.46:1.5": "Yaṁ, bhikkhave, sīlavanto pabbajitā gāmaṁ vā nigamaṁ vā upanissāya viharanti. ", "an3.46:1.6": "Tattha manussā imehi tīhi ṭhānehi bahuṁ puññaṁ pasavantī”ti. ", "an3.46:1.7": "Chaṭṭhaṁ. " }{ "process_id" : 14804, "version" : "2019.3.5f1", "app_path" : "C:/Program Files/Unity/Hub/Editor/2019.3.5f1/Editor/Unity.exe", "app_contents_path" : "C:/Program Files/Unity/Hub/Editor/2019.3.5f1/Editor/Data" }[{"commerce":"Carrefour","location":"Río Grande","address":"Perú 76, Río Grande, Tierra del Fuego, Argentina","province":"Tierra del Fuego","id":9185},{"commerce":"Carrefour","location":"Río Grande","address":"San Martin 685, Río Grande, Tierra del Fuego, Argentina","province":"Tierra del Fuego","id":9186},{"commerce":"La Anonima","location":"Río Grande","address":"San Martín553, Río Grande, Tierra del Fuego , ZIP 9420, Argentina","province":"Tierra del Fuego","id":9187},{"commerce":"La Anonima","location":"Río Grande","address":"VIEDMA 841, 9420, Río Grande, Tierra del Fuego, Argentina","province":"Tierra del Fuego","id":9188},{"commerce":"La Anonima","location":"Río Grande","address":"25 de Mayo 1926, Río Grande, Tierra del Fuego , ZIP 9420, Argentina","province":"Tierra del Fuego","id":9189},{"commerce":"La Anonima","location":"Río Grande","address":"San Martín 1605, Río Grande, Tierra del Fuego , ZIP 9420, Argentina","province":"Tierra del Fuego","id":9190},{"commerce":"La Anonima","location":"Tolhuin","address":"Constitucion Nacional Argentina 1146, Tolhuin, Tierra del Fuego, ZIP 9412, Argentina","province":"Tierra del Fuego","id":9191},{"commerce":"La Anonima","location":"Uhuaia","address":"Perito Moreno 1550, Uhuaia, Tierra del Fuego , ZIP 9410, Argentina","province":"Tierra del Fuego","id":9192},{"commerce":"Carrefour","location":"Ushuaia","address":"Av. 12 de Octubre 169, Ushuaia, Tierra del Fuego, Argentina","province":"Tierra del Fuego","id":9193},{"commerce":"La Anonima","location":"Ushuaia","address":"San Martín1506, Ushuaia, Tierra del Fuego, ZIP 9410, Argentina","province":"Tierra del Fuego","id":9194},{"commerce":"La Anonima","location":"Ushuaia","address":"Los Ñires 2237, Ushuaia, Tierra del Fuego, ZIP 9410, Argentina","province":"Tierra del Fuego","id":9195},{"commerce":"La Anonima","location":"Ushuaia","address":"Concejal Rubinos 188, Ushuaia, Tierra del Fuego , ZIP 9410, Argentina","province":"Tierra del Fuego","id":9196},{"commerce":"La Anonima","location":"Ushuaia","address":"Gobernador Paz 190, Ushuaia, Tierra del Fuego , ZIP 9410, Argentina","province":"Tierra del Fuego","id":9197}]{ "replace": false, "values": [ "minecraft:wooden_sword", "minecraft:stone_sword", "minecraft:iron_sword", "minecraft:golden_sword", "minecraft:diamond_sword", "minecraft:netherite_sword", "rswords:black_iron_sword", "rswords:blood_iron_sword", "rswords:bruh_sword", "rswords:cobalt_sword", "rswords:crimson_sword", "rswords:ender_sword", "rswords:fire_sword", "rswords:hell_bender_sword", "rswords:hell_iron_sword", "rswords:holy_sword", "rswords:infested_sword", "rswords:silver_sword", "rswords:vampiric_sword" ] } assets/zipBibles/english_NIV/english_NIV/76_english_NIV.json { "version": "english_NIV", "book": "Genesis", "chapter": 3, "verse": 20, "word": "Adam named his wife Eve, because she would become the mother of all the living." }H/Haberdasher_noun.json { "word": "Haberdasher", "definitions": [ "A dealer in small items used in sewing, such as buttons, zips, and thread.", "A dealer in men's clothing." ], "parts-of-speech": "Noun" }package.json { "name": "nettube", "version": "1.0.0", "description": "Movie App", "main": "index.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1", "start": "node index.js", "heroku-postbuild": "NPM_CONFIG_PRODUCTION=false npm install --prefix client && npm run build --prefix client" }, "repository": { "type": "git", "url": "git+https://github.com/shubhamkhan/nettube.git" }, "author": "", "license": "MIT", "bugs": { "url": "https://github.com/shubhamkhan/nettube/issues" }, "homepage": "https://github.com/shubhamkhan/nettube#readme", "dependencies": { "bcryptjs": "^2.4.3", "cookie-parser": "^1.4.6", "dotenv": "^16.0.0", "express": "^4.17.2", "jsonwebtoken": "^8.5.1", "mongoose": "^6.2.0", "nodemon": "^2.0.15" } } charles-halifax/recipes { "directions": [ "Cook and stir chorizo sausage in a skillet over medium heat until cooked completely, 5 to 7 minutes; drain and transfer to a slow cooker.", "Stir diced tomatoes, cream cheese, and processed cheese in with the chorizo.", "Cook on Low until cheese is melted, stirring occasionally, 1 1/2 to 2 hours." ], "ingredients": [ "10 ounces bulk chorizo sausage", "1 (10 ounce) can diced tomatoes with green chile peppers (such as RO*TEL\u00ae), drained", "1 (8 ounce) package cream cheese, cubed", "1 (8 ounce) package processed cheese (such as Velveeta\u00ae), cubed" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Chorizo Queso Dip", "url": "http://allrecipes.com/recipe/221371/chorizo-queso-dip/" } [{"id": 2250, "gempId": "1_30", "side": "Light", "rarity": "C2", "set": "1", "printings": [{"set": "1"}], "front": {"title": "\u2022\u2022\u2022", "imageUrl": "https://res.starwarsccg.org/cards/Premiere-Light/large/shistavanenwolfman.gif", "type": "Character", "subType": "Alien", "uniqueness": "***", "destiny": "2", "power": "2", "ability": "1", "deploy": "3", "forfeit": "2", "icons": ["Warrior"], "characteristics": ["scout"], "gametext": "May move to an adjacent site as a 'react'.", "lore": "Lak Sivrak is a typical Shistavanen male. Ferocious, but not aggressive. Often trained as scouts at Imperial academies, but they despise the ambitions of the New Order."}, "pulledBy": ["General Crix Madine", " (when at a Jabba's Palace site you control)", "", "Nar Shaddaa Wind Chimes", "Nar Shaddaa Wind Chimes & Out Of Somewhere"], "legacy": false}]{ "name": "XChat", "description": "A cross-platform IRC client.", "url": "http://xchat.org/" }{ "id": 134011, "name": "SPM-USUncut", "description": "An easy on the eyes version of US Uncut.", "user": { "id": 354857, "name": "", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": "publicdomain" }, "updated": "2016-10-19T17:02:31.000Z", "weekly_install_count": 0, "total_install_count": 29, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/134011_after.png?r=1533643532", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": [ "https://userstyles.org/style_screenshots/134011_additional_22579.png?r=1533643532", "https://userstyles.org/style_screenshots/134011_additional_22580.png?r=1533643532" ], "license": "publicdomain", "created": "2016-10-19T17:02:31.000Z", "category": "site", "raw_subcategory": "usuncut", "subcategory": "usuncut", "additional_info": null, "style_tags": [], "css": "@-moz-document domain(\"usuncut.com\") {\r\n#main-content-wrap {\r\n background: #333;\r\n}\r\n\r\n.content-main {\r\n color: white;\r\n}\r\n\r\nspan.post-tags-header, span.post-header, #post-header .post-title, .archive-list-info span, .head-latest-text h2, .feat-main-top-text h2, .feat-main-sub-text h2, .feat-wide-text h2, .feat-main2-text h2, .archive-list-text a {\r\n color: white;\r\n}\r\n\r\n#leader-wrap {\r\n background: #333;\r\n}\r\n\r\n#fly-wrap {\r\n background: transparent;\r\n}\r\n\r\n.ab_thumbnails-rr_abp-mode .syndicatedItem .video-title {\r\n color: white;\r\n}\r\n\r\n#sidebar-wrap .head-latest-out {\r\n background: #1a1a1a;\r\n border-radius: 5px;\r\n}\r\n\r\nul.head-latest-list li {\r\n background: #1a1a1a;\r\n border-radius: 8px;\r\n}\r\nul.head-latest-list li {\r\n border-top: 1px solid grey;\r\n}\r\n\r\n#main-nav-wrap {\r\n \r\n\tbackground: url(http://cdn.xl.thumbs.canstockphoto.com/canstock12334116.jpg);\r\n}\r\n\r\n.ab_thumbnails-a_abp-mode .syndicatedItem .video-title {\r\n color: white !important;\r\n}\r\n\r\n.post-tags span.post-tags-header {\r\n color: grey;\r\n}\r\n\r\nul.archive-list li {\r\n background: #1a1a1a;\r\n border-top: 1px solid grey;\r\n border-radius: 8px;\r\n}\r\n\r\n#foot-wrap {\r\n background: url(http://www.myfreetextures.com/wp-content/uploads/2011/06/another-rough-old-and-worn-parchment-paper.jpg) right;\r\n max-width: 100%;\r\n}\r\n\r\n.foot-widget {\r\n color: black;\r\n font-weight: bold !important;\r\n}\r\n\r\n#foot-wrap a {\r\n color: black;\r\n}\r\n\r\nul.archive-list li:first-child {\r\n border-top: 1px solid grey;\r\n}\r\n\r\n#sidebar-wrap .head-latest-out {\r\n border-top: 1px solid grey;\r\n}\r\n\r\n.trc_elastic .trc_rbox {\r\n width: auto;\r\n}\r\n\r\n.ab_thumbnails-rr_abp-mode {\r\n border-color: grey;\r\n border-radius: 8px !important;\r\n}\r\n\r\nimg {\r\n border-radius: 8px;\r\n}\r\n\r\n._2pi8 {\r\n border-radius: 8px;\r\n}\r\n\r\nspan.archive-share-but {\r\n background: #a8a8a8;\r\n color: #444;\r\n border-radius: 8px;\r\n}\r\n\r\n\r\n.head-main-out {\r\n margin-left: -330px;\r\n background: #333;\r\n}\r\n\r\n.sec-marg-out2, .sec-marg-out4 {\r\n background: #333;\r\n}\r\n\r\n#main-nav-right {\r\n background: #333;\r\n}\r\n\r\n.head-latest-scroll {\r\n background: #1a1a1a;\r\n color: #999;\r\n border-radius: 8px;\r\n}\r\n\r\n#head-main-top {\r\n background: #333;\r\n}\r\n\r\n#fly-wrap, #site-fixed, .fly-bottom-soc, .fly-to-top, .fly-soc-head {\r\n background: transparent;\r\n}\r\n\r\nheader {\r\n height: auto;\r\n background-color: #333;\r\n}\r\n\r\n.content-main blockquote p {\r\n color: lightgrey;\r\n}\r\n\r\nnav.main-menu {\r\n background: #333;\r\n border-radius: 8px;\r\n}\r\n\r\nnav.main-menu ul li a {\r\n color: white;\r\n}\r\n\r\n.fly-but-search, .fly-open {\r\n background: url(http://cdn.xl.thumbs.canstockphoto.com/canstock12334116.jpg);;\r\n}\r\n\r\n.fly-menu-out {\r\n background: #333;\r\n}\r\n\r\n.fly-search-out {\r\n background: url(http://cdn.xl.thumbs.canstockphoto.com/canstock12334116.jpg);\r\n}\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/134011/spm-usuncut.user.js", "style_settings": [] }AravindVasudev/Flask-ChatBotdata/answers.json { "greet": "Hey, what's up?", "restaurant_search": "restaurant search", "affirm": "affirm", "thankyou": "anytime." } { "header": { "category": "the-dr-peter-breggin-hour", "image": { "type": "image", "url": "http://drpeterbregginshow.podbean.com/mf/web/8u7ne/Dr-Peter-Breggin-AlbumArt.jpg" }, "layout": "podcast", "link": "http://drpeterbregginshow.podbean.com/e/the-dr-peter-breggin-hour-craig-wiener-081312/", "podcast": { "developer-note": "", "length": "58:39", "note": "", "size": 14077440, "type": "audio/mpeg", "url": "http://drpeterbregginshow.podbean.com/mf/feed/tfzmhw/Breggin081312.mp3" }, "show": { "category": [ "Health", "Self-Help" ], "image": { "height": 144, "url": "http://imglogo.podbean.com/image-logo/297199/Dr-Peter-Breggin-AlbumArt.jpg", "width": 144 }, "owner": { "email": "", "name": "Progressive Radio Network" }, "published": { "string": "Wed, 25 Feb 2015 21:49:04 +0000", "timestamp": "1424918944" }, "subtitle": "Psychiatrist , MD believes you can make a marvelous life. Great guests, callers and conversations to inspire you.", "title": "The Dr. Peter Breggin Hour" }, "tags": [ "Health", "Self-Help", "the-dr-peter-breggin-hour" ], "title": "" }, "title": "", "date": "2012-08-13 00:00:00 -0400", "categories": [ "the-dr-peter-breggin-hour" ], "content": "

My guest , PhD and I talk about ADHD-like behaviors, their sources, and how to help change them through...\n<!--more-->\nchanging specific parent-child interactions.
\n A practical, useful, and insightful hour for therapists, teachers, and parents; and even for older children and adults who think they have “ADHD.”\n

\n" } stacktrace.js/1.1.2.json {"stacktrace-with-promises-and-json-polyfills.js":","stacktrace-with-promises-and-json-polyfills.min.js":","stacktrace.concat.js":","stacktrace.min.js":"}composer.json { "name": "sculpin/sculpin-sourcetree", "type": "library", "license": "MIT", "authors": [ { "name": "", "email": "" } ], "require": { "php": "^7.1" }, "require-dev": { "malukenho/docheader": "^0.1.5", "phpstan/phpstan": "^0.6.4", "phpunit/phpunit": "^6.0", "squizlabs/php_codesniffer": "^2.8" }, "autoload": { "psr-4": { "Sculpin\\SourceTree\\": "src/" } }, "scripts": { "check": [ "@license-check", "@cs-check", "@phpstan", "@test" ], "cs-check": "phpcs", "cs-fix": "phpcbf", "license-check": "vendor/bin/docheader check src/ test/", "phpstan": "phpstan analyse -l 4 src/", "test": "phpunit --colors=always", "test-coverage": "phpdbg -qrr vendor/bin/phpunit --coverage-clover clover.xml --coverage-text --colors=always" } } kseckinc/manager { "pci_database_confirm_delete_confirm": "Conferma", "pci_database_confirm_delete_cancel": "Annulla", "pci_database_confirm_delete": "Elimina il servizio", "pci_database_confirm_delete_description_1": "I seguenti servizi risultano associati al servizio {{serviceName}} per cui è stata richiesta l’eliminazione:", "pci_database_confirm_delete_description_2": "Vuoi davvero eliminare il servizio {{ databaseName }}?", "pci_database_confirm_delete_description_3": "Questa operazione comporta l’eliminazione dell'associazione (replica o integrazione).", "pci_database_confirm_delete_success_message": "Il servizio è stato disattivato correttamente.", "pci_database_confirm_delete_error_message": "Si è verificato un errore durante l'eliminazione del servizio: {{message}}" } index/m/mamas-lasagna-scramble.json { "directions": [ "Preheat oven to 375 degrees F (190 degrees C).", "Bring a large pot of lightly salted water to a boil. Cook cavatappi in the boiling water, stirring occasionally until cooked through but firm to the bite, 8 to 9 minutes. Drain and transfer to a large casserole dish.", "Heat a large skillet over medium-high heat. Cook and stir beef in the hot skillet until browned and crumbly, 5 to 7 minutes; drain and discard grease. Stir tomato sauce, diced tomatoes, feta cheese, black pepper, onion salt, garlic powder, basil, oregano, parsley, and thyme into ground beef. Simmer until flavors blend, 5 to 10 minutes. Pour ground beef mixture over cavatappi.", "Bake in the preheated oven until bubbly, about 20 minutes. Let cool 5 minutes before serving." ], "ingredients": [ "1/2 (16 ounce) package cavatappi (corkscrew) pasta", "1 pound ground beef", "1 (15 ounce) can tomato sauce", "1 (14.5 ounce) can diced tomatoes", "1 (6 ounce) container crumbled feta cheese", "1 1/2 teaspoons ground black pepper", "1 1/2 teaspoons onion salt", "1 1/2 teaspoons garlic powder", "1 1/2 teaspoons dried basil", "1 1/2 teaspoons dried oregano", "1 1/2 teaspoons dried parsley", "1 1/2 teaspoons dried thyme" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Mama's Lasagna Scramble", "url": "http://allrecipes.com/recipe/232746/mamas-lasagna-scramble/" } SalmanAlSaigal/infection { "short_name": "Infection!", "name": "Infection!", "orientation": "portrait", "developer": { "name": "", "url": "https://github.com/SalmanAlSaigal" }, "start_url": "./index.html", "display": "fullscreen" } { "address": "backup.dmz", "proxy": "http://test-forwardproxy.nrs.bcgov:23128", "fluentBitRelease": "1.7.4", "apps": [ { "id": "wfdlv-rp", "type": "apache_reverse_proxy", "context": { "environment": "wfdlv", "!logs_path": "{% if localLogsPathPrefix %}{{localLogsPathPrefix}}/wfdlv{% else %}/sw_ux/httpd01/logs/hot{% endif %}/*-access*.log" } }, { "id": "wfint-rp", "type": "apache_reverse_proxy", "context": { "environment": "wfint", "!logs_path": "{% if localLogsPathPrefix %}{{localLogsPathPrefix}}/wfint{% else %}/sw_ux/httpd02/logs/hot{% endif %}/*-access*.log" } }, { "id": "wftst-rp", "type": "apache_reverse_proxy", "context": { "environment": "wftst", "!logs_path": "{% if localLogsPathPrefix %}{{localLogsPathPrefix}}/wftst{% else %}/sw_ux/httpd03/logs/hot{% endif %}/*-access*.log" } } ], "context": {} } {"title": "Sierra Leone - Social Protection and Labor", "downloads": 62, "tags": ["hxl", "indicators", "socioeconomics", "Sierra Leone"], "hxl": 1, "org": "World Bank Group", "id": "9ce6dc92-50e0-4392-bd6f-b8de9b0e88ea", "resources": [{"update_date": "2021-01-31T21:04:12.830405", "link": "https://data.humdata.org/dataset/9ce6dc92-50e0-4392-bd6f-b8de9b0e88ea/resource/32d7add6-f580-4c5a-896e-d04bc82f838b/download/social-protection-and-labor_sle.csv"}, {"update_date": "2021-01-31T21:04:12.830405", "link": "https://data.humdata.org/dataset/9ce6dc92-50e0-4392-bd6f-b8de9b0e88ea/resource/76e91f70-d4cf-43b1-9e85-6b91694738ac/download/qc_social-protection-and-labor_sle.csv"}]}rukysandy/consulting-pro-school {"componentChunkName":"component---src-pages-services-js","path":"/Services/","result":{"pageContext":{}},"staticQueryHashes":["2471692465","2918511044","3896500540","4216886299"]}htcondor/htcondor.org {"id": 849, "title": "Ticket #849: HDFS is not in default DC_DAEMON_LIST", "description": "
\nHDFS is not in the default DC_DAEMON_LIST. Shouldn't it be?
", "remarks": "
\n
\n2010-Aug-23 17:07:50 by adesmet:
\n\nBulk change of target version from v070402 to v070404 using ./ticket_target_mover.\n\n


\n2010-Oct-07 00:53:23 by ilikhan:
\n\nAvailable in v7.4.4 and later\n\n


\n2011-Dec-05 13:37:01 by danb:
\n\nAs of 7.6.4, HDFS is still not in DC_DAEMON_LIST by default. It appears that it never was. Perhaps the patch for this problem was never checked in?\n\n


\n2012-Feb-20 13:18:05 by danb:
\n\nStrangely,
[28578] and [28579] are not showing up in this ticket, even though they contain the usual decoration indicating which ticket they belong to.\n\n


\n2012-Feb-27 14:37:49 by adesmet:
\n\nLooks reasonable to me. Closing.
", "derived_tickets": "", "attachments": "", "check_ins": "\n\n\n\n\n\n\n
2011-Dec-05 13:42\n\u00a0 \nCheck-in [28579]: Documented addition of HDFS to DC_DAEMON_LIST. ===GT=== #849 ===VersionHistory:Complete=== (By )
2011-Dec-05 13:39\n\u00a0 \nCheck-in [28578]: Added HDFS to default DC_DAEMON_LIST. #849 (By )
", "type": "defect", "last_change": "2012-Feb-27 14:37", "status": "resolved", "created": "2009-Oct-13 17:41", "fixed_version": "2009-Oct-13 17:41", "broken_version": "v070400", "priority": "4", "subsystem": "Daemons", "assigned_to": "adesmet", "derived_from": "", "creator": "danb", "rust": "", "customer_group": "other", "visibility": "public", "notify": "", "due_date": ""}{"PetMonstrosity": ["FluffyTheTerrible", "FluffyTamer", "TheBeastMaster", "TeamPet", "Mons", "MonsterRoommate", "MonsterAllies", "SupportingTheMonsterLovedOne", "FluffyTamer", "BewareTheNiceOnes", "FluffytheTerrible", "InvertedTrope", "RaisedByWolves", "InvertedTrope", "GiantSpider", "FormallyNamedPet", "BigFriendlyDog", "MixAndMatchCritters", "HeroicBastard", "UndyingLoyalty", "NobleWolf", "EvilDetectingDog", "FluffyTheTerrible", "TheFairFolk", "BuffySpeak", "EldritchAbomination", "MoreTeethThanTheOsmondFamily", "EldritchAbomination", "HeroicBastard", "UndyingLoyalty", "NobleWolf", "EvilDetectingDog", "FluffyTheTerrible", "ManEatingPlant", "UndyingLoyalty", "NobleWolf", "EvilDetectingDog", "UndyingLoyalty", "NobleWolf", "EvilDetectingDog", "BearsAreBadNews", "PantheraAwesome", "EverythingsEvenWorseWithSharks", "GoddamnedBats", "OurDragonsAreDifferent", "SeaMonster", "RefugeInAudacity", "LightningBruiser", "BearsAreBadNews", "ScaryScorpions", "TheDreaded", "MindControlDevice", "BossInMookClothing", "LightningBruiser", "BearsAreBadNews", "ScaryScorpions", "TheDreaded", "MindControlDevice", "BossInMookClothing", "GiantSpider", "SuperMode", "OneWingedAngel", "PowerUpFood", "Prehistoria", "LevelUpAtIntimacy5", "Mascot", "EldritchAbomination", "EldritchAbomination", "BribingYourWayToVictory", "HelloInsertNameHere", "UglyCute", "RentAZilla", "SternTeacher", "DarkIsNotEvil", "PoisonIsCorrosive", "MixAndMatchCritter", "BigCreepyCrawlies", "GoneHorriblyWrong", "NighInvulnerable", "MythologyGag", "EgomaniacHunter", "CanisMajor", "MixAndMatchCritters", "MythologyGag", "PhantomZone", "EldritchAbomination", "EverybodyMustGetStoned", "CardCarryingVillain", "HellHound", "NiceJobBreakingItHero", "EgomaniacHunter"]}{ "name": "rescript-js-map", "version": "1.1.0", "main": "index.js", "repository": "https://github.com/resinfo/rescript-js-map.git", "author": " <>", "license": "MIT", "description": "ReScript bindings to the native JavaScript Map data type", "files": [ "src/js_map.res", "bsconfig.json" ], "keywords": [ "rescript", "bindings", "map" ], "devDependencies": { "ava": "^3.15.0", "rescript": "^9.1.4", "rescript-ava": "^3.15.0" }, "scripts": { "make": "rescript build -with-deps", "test": "ava" }, "dependencies": { "rescript-js-iterator": "^1.1.0" } } {"Department":"Генеральна Прокуратура України","Region":"Загальнодержавний","Position":"Прокурор відділу правової допомоги управління міжнародно-правового співробітництва та європейської інтеграції Генеральної прокуратури України","Name":"","Декларації 2013":"","Декларації 2014":"","Декларації 2015":"https://public.nazk.gov.ua/declaration/d19ae4fa-31eb-4895-af09-21b058ee455a","Декларації 2016":"https://public.nazk.gov.ua/declaration/c481fd78-e308-4d53-87b1-0653d16568e3","Фото":"","Як живе":"","Декларації доброчесності":"http://www.gp.gov.ua/integrity_profile/files/aef97db3737cccf98f59313f5a19f7e4.pdf","type":"prosecutor","key":"aksonov_oleg_volodimirovich","analytics":[{"y":2015,"m":276450,"i":276450,"fi":20670},{"y":2016,"m":272530,"i":272530,"fi":26095},{"y":2017,"m":478485,"i":478485,"fi":78095}],"declarationsLinks":[{"id":"nacp_d19ae4fa-31eb-4895-af09-21b058ee455a","year":2015,"provider":"declarations.com.ua.opendata"},{"id":"nacp_c481fd78-e308-4d53-87b1-0653d16568e3","year":2016,"provider":"declarations.com.ua.opendata"},{"id":"nacp_b1832ee6-7571-43b7-b37d-ba1823288c97","year":2017,"provider":"declarations.com.ua.opendata"}]}{ "vorgangId": "61187", "VORGANG": { "WAHLPERIODE": "18", "VORGANGSTYP": "Schriftliche Frage", "TITEL": "Regelungen für Bundesbehörden und Zuwendungsempfänger zur Investition in Erneuerbare-Energie-Anlagen und zur Abgabe von Ladestrom für Elektrofahrzeuge ", "AKTUELLER_STAND": "Beantwortet", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "WICHTIGE_DRUCKSACHE": { "DRS_HERAUSGEBER": "BT", "DRS_NUMMER": "18/1742", "DRS_TYP": "Schriftliche Fragen", "DRS_LINK": "http://dipbt.bundestag.de:80/dip21/btd/18/017/1801742.pdf" }, "EU_DOK_NR": "", "SCHLAGWORT": [ "Bundesbehörde", { "_fundstelle": "true", "__cdata": "Elektrofahrzeug" }, { "_fundstelle": "true", "__cdata": "Energietechnik" }, "Erneuerbare Energie", "Helmholtz-Gemeinschaft Deutscher Forschungszentren", "Investition", "Zuwendung " ], "ABSTRAKT": " Originaltext der Frage(n): \r\n\r\n Ist es Institutionen des Bundes (Ministerien, Behörden) und Institutionen mit Zuwendungsgebern \"Bund\" (z. B. Forschungszentren der Helmholtz-Gemeinschaft) gestattet, mit den Zuwendungsgeldern in Erneuerbare-Energien-Anlagen zu investieren, und wenn nein, auf welcher rechtlichen Grundlage? \r\n\r\n Ist es Institutionen des Bundes gestattet, Ladestrom für Elektrofahrzeuge an Mitarbeiter und Besucher abzugeben, und falls nicht, auf welcher rechtlichen Grundlage? \r\n\r\n Wie beurteilt die Bundesregierung die in den Fragen 42 und 43 jeweils greifenden Regelungen? \r\n" }, "VORGANGSABLAUF": { "VORGANGSPOSITION": { "ZUORDNUNG": "BT", "URHEBER": "Schriftliche Frage/Schriftliche Antwort ", "FUNDSTELLE": "13.06.2014 - BT-Drucksache 18/1742, Nr. 42-44", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/18/017/1801742.pdf", "PERSOENLICHER_URHEBER": [ { "VORNAME": "Ralph", "NACHNAME": "Lenkert", "FUNKTION": "MdB", "FRAKTION": "DIE LINKE", "AKTIVITAETSART": "Frage" }, { "PERSON_TITEL": "Dr.", "VORNAME": "Michael", "NACHNAME": "Meister", "FUNKTION": "Parl. Staatssekr.", "RESSORT": "Bundesministerium der Finanzen", "AKTIVITAETSART": "Antwort" } ] } } } .vscode/settings.json { "python.formatting.provider": "black", "python.analysis.typeCheckingMode": "off", "python.analysis.diagnosticSeverityOverrides": { "reportUndefinedVariable": "none" }, "mypy.runUsingActiveInterpreter": true, "mypy.configFile": "pyproject.toml" } { "config": { "title": null, "claim" : "Thanks so much for submiting your feedback! It's extremely valuable to improve for coming posts!", "claim2" : "Do you have ideas, feedback, bugs report or want to share your thoughts? Fill the form and let's improve the website togueter ;)", "sendButton" : "Send feedback" }, "form" : { "name" : "What is your name?", "email" : "And your email?", "message" : "What do you think about the post? How can I improve it?", "newsletter": "Subscribe to updates from Jorge" } }{"index.js":"}{ "dropdown": ".rc-virtual-list", "notification": ".ant-notification-notice-message", "drawerBody": ".ant-drawer-wrapper-body", "notificationCloseIcon": ".ant-notification-close-icon", "notificationDesc": ".ant-notification-notice-description" } 1-10 {"name":"Ships","description":"обновленная механика кораблей","addiction":[{"name":"Era Cycle","href":"https://drive.google.com/file/d/1545d46Oa3Z7swY9BvWakqosk16-Gps_o/view?usp=sharing"}],"tags":["mod","new","medium","sea","units"],"author":"ЪЫЪ#1834","doc":"https://drive.google.com/file/d/1gKLz2Qb1AOuVcDNHNZhKgKFhnRmSByTY/view?usp=sharing"}{"css/metro-all.css":"sha256-MdpVi4kie19ECucdAj0Kyefbkcmul7kIBSZ+z9D1c9M=","css/metro-all.min.css":"sha256-aVWo/deBqFsauW/ecLv+GNKsyz57SRy+K0difajJxvU=","css/metro-colors.css":"sha256-O8XfC0aMWdiUm17mZBtBWiurGy7Y3r0CJmxl4RizBJ8=","css/metro-colors.min.css":"sha256-6XSiwAjyccbeDugtaIn/0v5Un4P1pg+8L0EWoUH9QxI=","css/metro-icons.css":"sha256-mUFkd/PY4zPWWLFoKfOVfd1NLI6RqaH4w3Ii6ZYEVTE=","css/metro-icons.min.css":"sha256-6mGHjOhLGo921RunoNCRED9l3ycs/FHAtgDCyDRyY4Q=","css/metro-rtl.css":"sha256-7K+E8lJZTArbsbnFSaZtGYTM3aUpZ8AOZyh3/Y844Vs=","css/metro-rtl.min.css":"sha256-UXWGDvKyFXx7dEXEH/tqJIHZ95rFKQiBtIyMG5TqKvg=","css/metro.css":"sha256-x1MnEDWMJ/hedlI0++BydNB9l7hWiWfCyTr6nimx2Mo=","css/metro.min.css":"sha256-GiZjLM6mJKr6eclj4QDiPXb4csLbSHLrNhTjypdxrqc=","js/metro.js":"sha256-Qgw4+qcgUeIa0rxh0hdlgE/oAI/qThwxYkcowenvTOI=","js/metro.min.js":"sha256-QmX+}{"name":"right","subject":1008,"date":"10122009-044932","paths":{"Pen":{"strokes":[{"x":-781,"y":38,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":-781,"y":38,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":-792,"y":39,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":-792,"y":39,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":-792,"y":39,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":-792,"y":39,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":-792,"y":39,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":-785,"y":29,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":-768,"y":31,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":-751,"y":23,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":-711,"y":23,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":-675,"y":16,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":-612,"y":10,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":-546,"y":-3,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":-461,"y":-13,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":-375,"y":-28,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":-278,"y":-41,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":-179,"y":-56,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":-83,"y":-67,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":16,"y":-77,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":106,"y":-84,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":186,"y":-89,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":261,"y":-92,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":332,"y":-91,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":391,"y":-97,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":451,"y":-96,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":500,"y":-96,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":530,"y":-102,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":560,"y":-111,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":583,"y":-124,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Lenovo X61 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}0 { "name": "gretzlab.", "private": true, "scripts": { "start": "yarn run generateFiles && yarn run serve", "serve": "babel-node tools/server.js", "lint": "eslint webpack.config.* app tools --color --fix", "pretty": "prettier --write \"**/*.{js,scss}\"", "generateIndexFiles": "run-p generateActionIndexFiles generateComponentIndexFiles generateReducerIndexFiles generateServiceIndexFiles generateSelectorIndexFiles", "generateActionIndexFiles": "babel-node tools/generateActionIndexFiles.js", "generateComponentIndexFiles": "babel-node tools/generateComponentIndexFiles.js", "generateServiceIndexFiles": "babel-node tools/generateServiceIndexFiles.js", "generateSelectorIndexFiles": "babel-node tools/generateSelectorIndexFiles.js", "generateReducerIndexFiles": "babel-node tools/generateReducerIndexFiles.js", "generateRootReducer": "babel-node tools/generateRootReducer.js", "generateFiles": "run-p generateIndexFiles generateRootReducer", "webpack": "webpack-cli --config webpack.config.prod.babel.js --bail", "clean": "rimraf /lib", "build": "yarn run generateFiles && yarn run webpack", "precommit": "lint-staged" }, "lint-staged": { "*.js": [ "prettier --write", "eslint --color --fix", "git add" ], "*.scss": [ "prettier --write", "git add" ] }, "dependencies": { "@hocs/with-lifecycle": "^0.5.0", "babel-polyfill": "^6.26.0", "history": "^4.7.2", "immutable": "^3.8.2", "jquery": "^3.3.1", "lodash": "^4.17.10", "moment": "^2.22.2", "react": "^16.4.1", "react-dom": "^16.4.1", "react-redux": "^5.0.7", "react-router": "^4.3.1", "react-router-dom": "^4.3.1", "react-router-redux": "next", "react-spring": "^5.4.0", "react-sw-img": "^1.0.1", "recompose": "^0.27.1", "redux": "^4.0.0", "redux-async-await": "^1.0.1", "redux-promise-middleware": "^5.1.1", "redux-thunk": "^2.3.0", "reselect": "^3.0.1", "semantic-ui-react": "^0.82.0", "truefit-react-utils": "^0.0.14" }, "devDependencies": { "autoprefixer": "^8.6.5", "babel-cli": "^6.26.0", "babel-core": "^6.26.3", "babel-eslint": "^8.2.5", "babel-loader": "^7.1.5", "babel-plugin-external-helpers": "^6.22.0", "babel-plugin-syntax-dynamic-import": "^6.18.0", "babel-plugin-transform-decorators-legacy": "^1.3.5", "babel-plugin-transform-object-rest-spread": "^6.26.0", "babel-plugin-transform-react-remove-prop-types": "^0.4.13", "babel-preset-env": "^1.7.0", "babel-preset-react": "^6.24.1", "babel-preset-react-hmre": "^1.1.1", "babel-register": "^6.26.0", "browser-sync": "^2.24.5", "connect-history-api-fallback": "^1.5.0", "copy-webpack-plugin": "^4.5.2", "cross-env": "^5.2.0", "css-loader": "^1.0.0", "cssnano": "^4.0.0", "eslint": "^5.1.0", "eslint-config-prettier": "^2.9.0", "eslint-plugin-import": "^2.13.0", "eslint-plugin-react": "^7.10.0", "file-loader": "^1.1.11", "html-webpack-plugin": "^3.2.0", "husky": "^0.14.3", "lint-staged": "^7.2.0", "mini-css-extract-plugin": "^0.4.1", "node-dir": "^0.1.17", "node-sass": "^4.9.2", "npm-run-all": "^4.1.3", "postcss-loader": "^2.1.5", "prettier": "^1.13.7", "react-hot-loader": "^4.3.3", "redux-devtools-extension": "^2.13.5", "redux-immutable": "^4.0.0", "rimraf": "^2.6.2", "sass-loader": "^7.0.3", "style-loader": "^0.21.0", "url-loader": "^1.0.1", "webpack": "^4.15.1", "webpack-cli": "^3.0.8", "webpack-dev-middleware": "^3.1.3", "webpack-hot-middleware": "^2.22.2", "webpack-stylish": "^0.1.8" } } { "class" : { }, "instance" : { "formatProcessorTitle:" : "smalltalkCI 5/17/2017 19:13", "gemstoneClient" : "AndreiChis 5/16/2017 20:56:42", "gemstoneClient:" : "AndreiChis 5/16/2017 20:56:42", "gtDisplayOn:" : "smalltalkCI 5/17/2017 19:16", "gtSpotterGemstoneClassesProcessorOn:" : "smalltalkCI 5/17/2017 19:12", "gtSpotterGemstoneImplementorsProcessorOn:" : "AndreiChis 5/24/2017 15:25", "gtSpotterGemstoneMethodsProcessorOn:" : "smalltalkCI 5/17/2017 19:12", "gtSpotterGemstoneSendersProcessorOn:" : "AndreiChis 5/24/2017 17:36", "gtSpotterGemstoneUsersProcessorOn:" : "smalltalkCI 5/17/2017 19:12", "gtSpotterRawGemstoneClassesProcessorOn:" : "AndreiChis 5/16/2017 20:56:42", "gtSpotterRawGemstoneImplementorsProcessorOn:" : "AndreiChis 5/24/2017 17:35", "gtSpotterRawGemstoneMethodsProcessorOn:" : "AndreiChis 5/16/2017 20:56:42", "gtSpotterRawGemstoneSendersProcessorOn:" : "AndreiChis 5/24/2017 17:35", "gtSpotterRawGemstoneUsersProcessorOn:" : "AndreiChis 5/16/2017 20:56:42", "isStandalone" : "smalltalkCI 5/17/2017 10:54", "makeStandalone" : "smalltalkCI 5/17/2017 10:54", "spotterProcessorsFor:" : "AndreiChis 5/16/2017 20:56:42" } } { "body": "ARE TRUTHS KNOWN IN MORE PARTICULAR INSTANCES, AS WELL AS IN THOSE GENERAL MAXIMS, AND KNOWN ALSO IN PARTICULAR INSTANCES,", "next": "https://raw.githubusercontent.com/CAPSELOCKE/CAPSELOCKE/master/tweets/15072.json" }Gremlin/CosmosDb.GremlinApi.WithSqlApi/ComicBookData/edge aaba9cf0-570d-4a9b-9fa0-896dcf615795.json {"label":"seenIn","id":"aaba9cf0-570d-4a9b-9fa0-896dcf615795","_sink":"e2cd336e-79fd-4a10-8a99-7758e944b1fd","_sinkLabel":"comic","_vertexId":"1284c940-6ad6-4de4-a56f-4aedb8cbd601","_vertexLabel":"hero","_isEdge":true,"_rid":"VyAjAPMRAACVBQAAAAAAAA==","_self":"dbs/VyAjAA==/colls/VyAjAPMRAAA=/docs/VyAjAPMRAACVBQAAAAAAAA==/","_etag":"\"50004dee-0000-0000-0000-595ebc010000\"","_attachments":"attachments/","_ts":1499380737}{ "pp2.1:1.2": "phusitvā → phassitvā (bj)", "pp2.1:2.2": "phusitvā → phassitvā (bj)", "pp2.1:12.2": "eḷā → jaḷā (sya-all, pts-vp-pli1)", "pp2.1:13.2": "paññavanto → paññāvantā (bj)", "pp2.1:31.2": "sambodhiparāyano → sambodhiparāyaṇo (bj, mr)", "pp2.1:37.3": "kālakiriyaṁ → kālaṁ kiriyaṁ (mr)" }{ "browsers": ["chrome:headless", "firefox:headless"], "appCommand": "gatsby serve", "reporter": "minimal" } { "aliases": [ "Agent Zero", "", "", ", Maverick", "Maverick" ], "authors": [ "" ], "description": " (born Christoph \"Christopher\" Nord) is a mutant comic book character in the fictional Marvel Universe.", "images": { "background": null, "thumbnail": "https://upload.wikimedia.org/wikipedia/en/thumb/7/76/WEAPON_X_3.jpg/250px-WEAPON_X_3.jpg" }, "mainColor": null, "name": "", "partners": [ ], "powers": [ "Depowered", "Inodorosity", "Kinetic absorption", "Kinetic blasts" ], "ranking": { "comicCount": 0, "eventCount": 0, "pageviewCount": 7008, "serieCount": 0, "storyCount": 0 }, "secretIdentities": [ "", "Christopher" ], "species": [ "Mutant" ], "superName": "", "teams": [ "CIA", "Cell Six", "Central Intelligence Agency", "Team X", "Weapon X" ], "urls": { "marvel": null, "wikipedia": "https://en.wikipedia.org/wiki/David_North_(comics)" } }{"ast":null,"code":"var _jsxFileName = \"C:\\\\Users\\\\AbdullahBozdag\\\\Documents\\\\_apo_bozdag\\\\no303\\\\no303-cafemenu-frontend\\\\src\\\\components\\\\MenuButton\\\\MenuButton.js\";\nimport React from \"react\";\nimport TouchableHighlight from \"react-native-web/dist/exports/TouchableHighlight\";\nimport Image from \"react-native-web/dist/exports/Image\";\nimport Text from \"react-native-web/dist/exports/Text\";\nimport View from \"react-native-web/dist/exports/View\";\nimport PropTypes from \"prop-types\";\nimport styles from \"./styles\";\nexport default function MenuButton(props) {\n var title = props.title,\n onPress = props.onPress,\n source = props.source;\n return React.createElement(TouchableHighlight, {\n onPress: onPress,\n style: styles.btnClickContain,\n underlayColor: \"rgba(128, 128, 128, 0.1)\",\n __self: this,\n __source: {\n fileName: _jsxFileName,\n lineNumber: 10,\n columnNumber: 5\n }\n }, React.createElement(View, {\n style: styles.btnContainer,\n __self: this,\n __source: {\n fileName: _jsxFileName,\n lineNumber: 11,\n columnNumber: 7\n }\n }, React.createElement(Image, {\n source: source,\n style: styles.btnIcon,\n __self: this,\n __source: {\n fileName: _jsxFileName,\n lineNumber: 12,\n columnNumber: 9\n }\n }), React.createElement(Text, {\n style: styles.btnText,\n __self: this,\n __source: {\n fileName: _jsxFileName,\n lineNumber: 13,\n columnNumber: 9\n }\n }, title)));\n}\nMenuButton.propTypes = {\n onPress: PropTypes.func,\n source: PropTypes.number,\n title: PropTypes.any\n};","map":{"version":3,"sources":["C:/Users/AbdullahBozdag/Documents/_apo_bozdag/no303/no303-cafemenu-frontend/src/components/MenuButton/MenuButton.js"],"names":["React","PropTypes","styles","MenuButton","props","title","onPress","source","btnClickContain","btnContainer","btnIcon","btnText","propTypes","func","number","any"],"mappings":";AAAA,OAAOA,KAAP,MAAkB,OAAlB;;;;;AAEA,OAAOC,SAAP,MAAsB,YAAtB;AACA,OAAOC,MAAP;AAEA,eAAe,SAASC,UAAT,CAAoBC,KAApB,EAA2B;AAAA,MAChCC,KADgC,GACLD,KADK,CAChCC,KADgC;AAAA,MACzBC,OADyB,GACLF,KADK,CACzBE,OADyB;AAAA,MAChBC,MADgB,GACLH,KADK,CAChBG,MADgB;AAGxC,SACE,oBAAC,kBAAD;AAAoB,IAAA,OAAO,EAAED,OAA7B;AAAsC,IAAA,KAAK,EAAEJ,MAAM,CAACM,eAApD;AAAqE,IAAA,aAAa,EAAC,0BAAnF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,KACE,oBAAC,IAAD;AAAM,IAAA,KAAK,EAAEN,MAAM,CAACO,YAApB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,KACE,oBAAC,KAAD;AAAO,IAAA,MAAM,EAAEF,MAAf;AAAuB,IAAA,KAAK,EAAEL,MAAM,CAACQ,OAArC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IADF,EAEE,oBAAC,IAAD;AAAM,IAAA,KAAK,EAAER,MAAM,CAACS,OAApB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,KAA8BN,KAA9B,CAFF,CADF,CADF;AAQD;AAEDF,UAAU,CAACS,SAAX,GAAuB;AACrBN,EAAAA,OAAO,EAAEL,SAAS,CAACY,IADE;AAErBN,EAAAA,MAAM,EAAEN,SAAS,CAACa,MAFG;AAGrBT,EAAAA,KAAK,EAAEJ,SAAS,CAACc;AAHI,CAAvB","sourcesContent":["import React from \"react\";\nimport { TouchableHighlight, Image, Text, View } from \"react-native\";\nimport PropTypes from \"prop-types\";\nimport styles from \"./styles\";\n\nexport default function MenuButton(props) {\n const { title, onPress, source } = props;\n\n return (\n \n \n \n {title}\n \n \n );\n}\n\nMenuButton.propTypes = {\n onPress: PropTypes.func,\n source: PropTypes.number,\n title: PropTypes.any,\n};\n"]},"metadata":{},"sourceType":"module"}{"organizations": [], "uuid": "474bcaed88fb13647f3df85a505353de202c2b89", "thread": {"social": {"gplus": {"shares": 0}, "pinterest": {"shares": 0}, "vk": {"shares": 0}, "linkedin": {"shares": 0}, "facebook": {"likes": 0, "shares": 0, "comments": 0}, "stumbledupon": {"shares": 0}}, "site_full": "www.tripadvisor.com", "main_image": "https://media-cdn.tripadvisor.com/media/photo-s/0a/0e/8f/85/terrasse-exterieure-roof.jpg", "site_section": "https://www.tripadvisor.com/Hotel_Review-g187147-d198085-Reviews-Terrass_Hotel_by_MH-Paris_Ile_de_France.html", "section_title": "Terrass'' Hotel by MH - UPDATED 2017 Reviews & Price Comparison (Paris, France) - TripAdvisor", "url": "https://www.tripadvisor.com/ShowUserReviews-g187147-d198085-r465407434-Terrass_Hotel_by_MH-Paris_Ile_de_France.html", "country": "US", "domain_rank": 189, "title": "First visit to Paris", "performance_score": 0, "site": "tripadvisor.com", "participants_count": 1, "title_full": "First visit to Paris - Review of Terrass'' Hotel by MH, Paris, France - TripAdvisor", "spam_score": 0.048, "site_type": "discussions", "published": "2017-03-07T02:00:00.000+02:00", "replies_count": 0, "uuid": "474bcaed88fb13647f3df85a505353de202c2b89"}, "author": "", "url": "https://www.tripadvisor.com/ShowUserReviews-g187147-d198085-r465407434-Terrass_Hotel_by_MH-Paris_Ile_de_France.html", "ord_in_thread": 0, "title": "First visit to Paris", "locations": [], "entities": {"persons": [], "locations": [], "organizations": []}, "highlightText": "", "language": "english", "persons": [], "text": "We were a group of 6 (3x2 couples) staying for 2 nights Saturday & Sunday.\nUnfortunately - the weather for our entire visit was appalling with non stop heavy rain.\nAfter an initial greeting and check in by the highly professional & courteous staff, we went to unpack.\nThe room we were allocated smelt really strongly of cigarettes; as neither my husband or I have ever smoked we could not stay that room, We went to the reception to register the issue - and were immediately given another room along with an apology.\nThe room was a good size, beautifully clean, with a very comfortable bed & pillows.\nThe wall unit designed to hang our clothes on had no long hanging space to accommodate the short dress I took.\nSmall irritation with the shower room was the lack of anywhere to hand our towels after we showering.\nBecause of the weather we were deterred from wanting to walk around looking for a restaurant; we would have loved to stay in the restaurant for our evening drinks and meals - but the cost of both was sadly, way above our budget.\nA shame - because the views from the bar are lovely - although its very chilly despite the heaters and blankets provided!\nA very comfortable stay in a good location; however, we found the cost of eating and drinking, and we went no where special I might add - way too expensive once the various taxes were added - so its unlikely we will return!", "external_links": [], "published": "2017-03-07T02:00:00.000+02:00", "crawled": "2017-03-27T05:12:34.305+03:00", "highlightTitle": ""}{"id": 38193, "date": "2014-11-28 01:02:33", "user": "sambling", "post": "<center>[![](http://i.imgur.com/f83NbwC.png)](https://holderhost.com/)</center>\r\n\r\n**Black Friday is upon us and we have two great specials if you signup today!**</br>\r\n<ul>\r\n<li>2 FREE EXTRA IPS in Phoenix, AZ. That\u2019s right. We\u2019ve got some to spare so we\u2019re giving you a total of 3 IPs on every plan ordered in Phoenix, AZ. No need to ticket in, it\u2019s all automatic.</li>\r\n<li>$1 first month in Buffalo, NY. Coupon Code:$1BLCKFRIDAY</li>\r\n</ul>\r\n<center>![](http://i.imgur.com/j13HRXa.png)</center>\r\n\r\nholderhost.com was originally founded, by me (), on July 16 2012 so we are over two years old. came on board last year as an equal partner in holderhost. We are a registered company in [New Zealand](http://www.business.govt.nz/companies/app/ui/pages/companies/5130101). \r\n\r\n<center>![](http://i.imgur.com/3L3lCVo.png)</center>\r\n\r\n**512mb Plan**<br>\r\n512mb Dedicated Ram | 512mb vSwap<br>\r\n1 IP<br>\r\n30GB diskspace<br>\r\n2048GB bandwidth<br>\r\n1Gbs port<br>\r\nPrice: <del>$4.5/month</del> $3/ month or $30 / Year <br>\r\n**[ORDER PHOENIX- 2 EXTRA IPs](https://holderhost.com/clients/cart.php?gid=3)** | **[ORDER BUFFALO- $1 FIRST MONTH!](https://holderhost.com/clients/cart.php?gid=7)**\r\n\r\n**1GB Pan**:<br>\r\n1024mb Dedicated Ram | 1024mb vSwap<br>\r\n1 IP<br>l\r\n75GB diskspace<br>\r\n3072GB bandwdith<br>\r\n1Gbs port<br>\r\nPrice: <del>$7/month</del> $5/ month<br>\r\n**[ORDER PHOENIX- 2 EXTRA IPs](https://holderhost.com/clients/cart.php?gid=3)** | **[ORDER BUFFALO- $1 FIRST MONTH!](https://holderhost.com/clients/cart.php?gid=7)**\r\n\r\n**2GB Plan**:<br>\r\n2048mb Dedicated Ram | 2048mb vSwap<br>\r\n1 IP<br>\r\n150GB diskspace<br>\r\n4096GB bandwidth<br>\r\n1Gbs port<br>\r\nPrice: <del>$10/month</del> $7/month or $21/Qtr <br>\r\n**[ORDER PHOENIX- 2 EXTRA IPs](https://holderhost.com/clients/cart.php?gid=3)** | **[ORDER BUFFALO- $1 FIRST MONTH!](https://holderhost.com/clients/cart.php?gid=7)**<br><br>\r\n**FREE** UPGRADE TO **3GB** RAM ON WHEN PAYING QUARTERLY. JUST REPLY BELOW WITH YOUR ORDER # OR TICKET IN. \r\n\r\n\r\n**Additional IPs**: $1.5/mo (we may be able to do bulk pricing with justification, please contact us).\r\n\r\n\r\n**Important Links**:\r\n\r\n[**AUP**](http://holderhost.com/clients/knowledgebase.php?action=displayarticle&id=13) | [**TOS** ](http://holderhost.com/clients/knowledgebase.php?action=displayarticle&id=12) | \r\n[**Privacy Policy** ](http://holderhost.com/clients/knowledgebase.php?action=displayarticle&id=10)\r\n\r\nQuick AUP: IRC, Public Proxies, Public VPNs, Public Torrenting, TOR, Gameservers are not allowed. Nothing Illegal in the US (including Spamming). "}alexa/alexa-dataset-redtab {"relation": [["Author", "Stevesesy", "Stevesesy", "Stevesesy", "Stevesesy", "Stevesesy"], ["Title", "Love: Part 1 Devotion: Page 10", "EVERDRED", "Love: Part 1 Devotion: Page 9", "Edisnoom by Stevesesy", "Pink Hair"], ["Description", "Love - #10 My MOTH3R Comic, just for you!", "EVER DRED :D", "Love - #09 My MOTH3R Comic, just for you! And maybe Rockwell...", "Ed is noom?", "She's a princess."], ["Date", "7/22/06", "3/2/08", "7/22/06", "9/30/06", "8/10/06"], ["Rank", "0.00", "0.00", "0.00", "0.00", "0.00"]], "pageTitle": "STARMEN.NET - EarthBound / Mother 3 Goodness.", "title": "", "url": "http://starmen.net/vote/vote.php?id=15751&stuff=RankDESC&stuffPageNumber=13", "hasHeader": true, "headerPosition": "FIRST_ROW", "tableType": "RELATION", "tableNum": 0, "s3Link": "common-crawl/crawl-data/CC-MAIN-2015-32/segments/1438042988061.16/warc/CC-MAIN-20150728002308-00256-ip-10-236-191-2.ec2.internal.warc.gz", "recordEndOffset": 219846963, "recordOffset": 219838761, "tableOrientation": "HORIZONTAL", "textBeforeTable": "> 9 8 7 6 5 3 0 < Other Submissions by Stevesesy XX This image has been resized for your convenience. Click on it to view the full size version. Mother Mother - by Stevesesy \u00a0Mother2 Go Back! --> } } } ThisImgResized.style.display = \"\"; //alert (ThisImgResized.style.display); ThisImgResized = document.getElementById('ImgResized') what.width=(winW-275); what.style.cursor = \"pointer\"; saveWidth = what.width; { else what.width=saveWidth; if (what.width==(winW-275)) if (what.width>(winW-275) || saveWidth>(winW-275)) { what = document.getElementById(what); function scaleImg(what){ } winW = document.body.offsetWidth; if (navigator.appName.indexOf(\"Microsoft\")!=-1) { winW = window.innerWidth; if (navigator.appName==\"Netscape\") //what = document.getElementById(what); //function scaleImg(what){ var saveWidth = 0;", "textAfterTable": "1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 Junk: Weird Junk: Earthbound Thing of the Day. Awesome Fan Thing of the Day.", "hasKeyColumn": true, "keyColumnIndex": 1, "headerRowIndex": 0}result/caleg/caleg_kabupaten_perubahan_6990_20.json [{"namaKab":"BELITUNG","originalFilename":"1. EDDY SURAHMAN.jpg","namaPartai":"Partai Keadilan dan Persatuan Indonesia","id":202261,"noUrut":1,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"BELITUNG","originalFilename":"2. DJAHIRI.jpg","namaPartai":"Partai Keadilan dan Persatuan Indonesia","id":204286,"noUrut":2,"nama":"DJAHIRI","stringJenisKelamin":"Laki-Laki"},{"namaKab":"BELITUNG","originalFilename":"3. WAHYUNI, SE.jpg","namaPartai":"Partai Keadilan dan Persatuan Indonesia","id":208188,"noUrut":3,"nama":"WAHYUNI, SE","stringJenisKelamin":"Perempuan"},{"namaKab":"BELITUNG","originalFilename":"4.SONI JAYA PUTRA.jpg","namaPartai":"Partai Keadilan dan Persatuan Indonesia","id":248709,"noUrut":4,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"BELITUNG","originalFilename":"5. SUPARTI.jpg","namaPartai":"Partai Keadilan dan Persatuan Indonesia","id":249418,"noUrut":5,"nama":"SUPARTI","stringJenisKelamin":"Perempuan"},{"namaKab":"BELITUNG","originalFilename":"6. NOVITA SARI PUTRI HARTONO, SE.jpg","namaPartai":"Partai Keadilan dan Persatuan Indonesia","id":280502,"noUrut":6,"nama":", SE","stringJenisKelamin":"Perempuan"}]{ "directions": [ "Mix together olive oil, balsamic vinegar, oregano, salt and pepper." ], "ingredients": [ "3 tablespoons extra virgin olive oil", "2 tablespoons balsamic vinegar", "1/2 teaspoon dried oregano", "salt and pepper to taste" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Simple Sicilian Salad Dressing", "url": "http://allrecipes.com/recipe/14263/simple-sicilian-salad-dressing/" } { "image": " "name": "", "description": "This pony is Spearmint, with bright Blue eyes. When they are not being rented by Unicorn Rentals users, they love AWS" }package.json0 { "name": "metadata-1000000-demo", "version": "0.0.1", "description": "Демо-приложение: справочник 1000000 записей", "main": "''", "scripts": { "build": "webpack --config webpack.production.config.js --progress --profile --colors", "start": "webpack-dev-server --progress --profile --colors", "lint": "eslint --ext js --ext jsx src || exit 0" }, "repository": { "type": "git", "url": "https://github.com/oknosoft/million" }, "author": " <> (http://www.oknosoft.ru)", "license": "MIT", "homepage": "https://github.com/oknosoft/million#readme", "dependencies": { "node-sass": "^3.9", "react": "^15.3", "react-dom": "^15.3", "react-addons-shallow-compare": "^15.3", "react-virtualized": "^8.0", "sass-loader": "^4.0", "material-ui": "^0.16", "metadata-core": "^2.0.0-beta.8", "metadata-pouchdb": "^2.0.0-beta.8", "normalize.css": "^5.0" }, "devDependencies": { "babel-core": "^6.16", "babel-loader": "^6.2", "babel-plugin-transform-async-to-generator": "^6.16", "babel-plugin-transform-class-properties": "^6.16", "babel-plugin-transform-decorators-legacy": "^1.3", "babel-plugin-transform-runtime": "^6.15", "babel-preset-es2015": "^6.16", "babel-preset-react": "^6.16", "babel-preset-stage-0": "^6.6", "babel-runtime": "^6.11", "copy-webpack-plugin": "^3.0", "css-loader": "^0.25", "eslint": "^3.0", "eslint-plugin-react": "^6.4", "extract-text-webpack-plugin": "^1.0", "file-loader": "^0.9", "html-webpack-plugin": "^2.24", "json-loader": "^0.5", "react-hot-loader": "^3.0.0-beta.6", "style-loader": "^0.13", "url-loader": "^0.5", "webpack": "^1.13", "webpack-cleanup-plugin": "^0.4", "webpack-dev-server": "^1.16" } } node_modules/.cache/babel-loader/1399415ca6a20d439013241198df9f74.json0 {"ast":null,"code":"\"use strict\";\n\nvar _classCallCheck = require(\"/Users/gurvinderdehl/Documents/GitHub/New-React-Portifolio/node_modules/babel-preset-react-app/node_modules/@babel/runtime/helpers/classCallCheck\");\n\nvar _createClass = require(\"/Users/gurvinderdehl/Documents/GitHub/New-React-Portifolio/node_modules/babel-preset-react-app/node_modules/@babel/runtime/helpers/createClass\");\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.Attract = void 0;\n\nvar Attract = /*#__PURE__*/function () {\n function Attract() {\n _classCallCheck(this, Attract);\n\n this.enable = false;\n this.rotate = {\n x: 3000,\n y: 3000\n };\n }\n\n _createClass(Attract, [{\n key: \"load\",\n value: function load(data) {\n var _a, _b, _c, _d;\n\n if (data === undefined) {\n return;\n }\n\n if (data.enable !== undefined) {\n this.enable = data.enable;\n }\n\n var rotateX = (_b = (_a = data.rotate) === null || _a === void 0 ? void 0 : _a.x) !== null && _b !== void 0 ? _b : data.rotateX;\n\n if (rotateX !== undefined) {\n this.rotate.x = rotateX;\n }\n\n var rotateY = (_d = (_c = data.rotate) === null || _c === void 0 ? void 0 : _c.y) !== null && _d !== void 0 ? _d : data.rotateY;\n\n if (rotateY !== undefined) {\n this.rotate.y = rotateY;\n }\n }\n }, {\n key: \"rotateX\",\n get: function get() {\n return this.rotate.x;\n },\n set: function set(value) {\n this.rotate.x = value;\n }\n }, {\n key: \"rotateY\",\n get: function get() {\n return this.rotate.y;\n },\n set: function set(value) {\n this.rotate.y = value;\n }\n }]);\n\n return Attract;\n}();\n\nexports.Attract = Attract;","map":{"version":3,"sources":["/Users/gurvinderdehl/Documents/GitHub/New-React-Portifolio/node_modules/tsparticles/dist/Options/Classes/Particles/Move/Attract.js"],"names":["Object","defineProperty","exports","value","Attract","enable","rotate","x","y","data","_a","_b","_c","_d","undefined","rotateX","rotateY"],"mappings":"AAAA;;;;;;AACAA,MAAM,CAACC,cAAP,CAAsBC,OAAtB,EAA+B,YAA/B,EAA6C;AAAEC,EAAAA,KAAK,EAAE;AAAT,CAA7C;AACAD,OAAO,CAACE,OAAR,GAAkB,KAAK,CAAvB;;IACMA,O;AACF,qBAAc;AAAA;;AACV,SAAKC,MAAL,GAAc,KAAd;AACA,SAAKC,MAAL,GAAc;AACVC,MAAAA,CAAC,EAAE,IADO;AAEVC,MAAAA,CAAC,EAAE;AAFO,KAAd;AAIH;;;;yBAaIC,I,EAAM;AACP,UAAIC,EAAJ,EAAQC,EAAR,EAAYC,EAAZ,EAAgBC,EAAhB;;AACA,UAAIJ,IAAI,KAAKK,SAAb,EAAwB;AACpB;AACH;;AACD,UAAIL,IAAI,CAACJ,MAAL,KAAgBS,SAApB,EAA+B;AAC3B,aAAKT,MAAL,GAAcI,IAAI,CAACJ,MAAnB;AACH;;AACD,UAAMU,OAAO,GAAG,CAACJ,EAAE,GAAG,CAACD,EAAE,GAAGD,IAAI,CAACH,MAAX,MAAuB,IAAvB,IAA+BI,EAAE,KAAK,KAAK,CAA3C,GAA+C,KAAK,CAApD,GAAwDA,EAAE,CAACH,CAAjE,MAAwE,IAAxE,IAAgFI,EAAE,KAAK,KAAK,CAA5F,GAAgGA,EAAhG,GAAqGF,IAAI,CAACM,OAA1H;;AACA,UAAIA,OAAO,KAAKD,SAAhB,EAA2B;AACvB,aAAKR,MAAL,CAAYC,CAAZ,GAAgBQ,OAAhB;AACH;;AACD,UAAMC,OAAO,GAAG,CAACH,EAAE,GAAG,CAACD,EAAE,GAAGH,IAAI,CAACH,MAAX,MAAuB,IAAvB,IAA+BM,EAAE,KAAK,KAAK,CAA3C,GAA+C,KAAK,CAApD,GAAwDA,EAAE,CAACJ,CAAjE,MAAwE,IAAxE,IAAgFK,EAAE,KAAK,KAAK,CAA5F,GAAgGA,EAAhG,GAAqGJ,IAAI,CAACO,OAA1H;;AACA,UAAIA,OAAO,KAAKF,SAAhB,EAA2B;AACvB,aAAKR,MAAL,CAAYE,CAAZ,GAAgBQ,OAAhB;AACH;AACJ;;;wBA5Ba;AACV,aAAO,KAAKV,MAAL,CAAYC,CAAnB;AACH,K;sBACWJ,K,EAAO;AACf,WAAKG,MAAL,CAAYC,CAAZ,GAAgBJ,KAAhB;AACH;;;wBACa;AACV,aAAO,KAAKG,MAAL,CAAYE,CAAnB;AACH,K;sBACWL,K,EAAO;AACf,WAAKG,MAAL,CAAYE,CAAZ,GAAgBL,KAAhB;AACH;;;;;;AAmBLD,OAAO,CAACE,OAAR,GAAkBA,OAAlB","sourcesContent":["\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\nexports.Attract = void 0;\nclass Attract {\n constructor() {\n this.enable = false;\n this.rotate = {\n x: 3000,\n y: 3000,\n };\n }\n get rotateX() {\n return this.rotate.x;\n }\n set rotateX(value) {\n this.rotate.x = value;\n }\n get rotateY() {\n return this.rotate.y;\n }\n set rotateY(value) {\n this.rotate.y = value;\n }\n load(data) {\n var _a, _b, _c, _d;\n if (data === undefined) {\n return;\n }\n if (data.enable !== undefined) {\n this.enable = data.enable;\n }\n const rotateX = (_b = (_a = data.rotate) === null || _a === void 0 ? void 0 : _a.x) !== null && _b !== void 0 ? _b : data.rotateX;\n if (rotateX !== undefined) {\n this.rotate.x = rotateX;\n }\n const rotateY = (_d = (_c = data.rotate) === null || _c === void 0 ? void 0 : _c.y) !== null && _d !== void 0 ? _d : data.rotateY;\n if (rotateY !== undefined) {\n this.rotate.y = rotateY;\n }\n }\n}\nexports.Attract = Attract;\n"]},"metadata":{},"sourceType":"script"}0 { // configure linters "eslint.enable": true, "eslint.packageManager": "npm", "eslint.provideLintTask": true, "eslint.validate": ["javascript", "svelte"], "javascript.validate.enable": false, "stylelint.enable": true, "svelte.plugin.typescript.diagnostics.enable": false, // configure formatters "html.format.enable": false, "json.format.enable": false, "javascript.format.enable": false, "typescript.format.enable": false, "prettier.requireConfig": true, "editor.defaultFormatter": "esbenp.prettier-vscode", "[svelte]": { "editor.defaultFormatter": "JamesBirtles.svelte-vscode" }, // configure editor to match code style "editor.formatOnSave": true, "editor.detectIndentation": false, "editor.tabSize": 2, "editor.trimAutoWhitespace": true, "editor.insertSpaces": false, "editor.rulers": [80], "files.eol": "\n", "files.insertFinalNewline": true, "files.trimFinalNewlines": true, "files.trimTrailingWhitespace": true, "javascript.preferences.quoteStyle": "single", "typescript.preferences.quoteStyle": "single", // allow breakpoints in Svelte templates "debug.allowBreakpointsEverywhere": true, // ignore npm- and Sapper-generated directores "files.exclude": { "**/.git/**": true, "**/node_modules": true, "**/__sapper__": true }, "files.watcherExclude": { "**/node_modules/**": true, "**/__sapper__/**": true }, "search.exclude": { "**/.git/**": true, "**/node_modules/**": true, "**/__sapper__/**": true }, // git commit message settings "git.inputValidation": "always", "git.inputValidationLength": 72, "git.inputValidationSubjectLength": 50, // npm integration "npm.autoDetect": "on", "npm.enableScriptExplorer": true, "npm.packageManager": "npm" } 100-1000 { "id": 99840, "name": "Cute mini cursor", "description": "A cute, mini yet simple cursor for your dashboard. \r\nDon't install if you're not used to a mini one! :) Enjoy this mini cursor ^^", "user": { "id": 261718, "name": "chloebunnie", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": "ccbync" }, "updated": "2014-04-01T07:46:52.000Z", "weekly_install_count": 0, "total_install_count": 7561, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/99840_after.png?r=1618733357", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": [ "https://userstyles.org/style_screenshots/99840_additional_10624.gif?r=1618733357" ], "license": "ccbync", "created": "2014-04-01T06:53:57.000Z", "category": "site", "raw_subcategory": "tumblr", "subcategory": "tumblr", "additional_info": null, "style_tags": [], "css": "@-moz-document url-prefix('http://www.tumblr.com/'), url-prefix('https://www.tumblr.com/') {\r\n\r\n\r\nbody \r\n {cursor: url(http://media.tumblr.com/tumblr_lqs4idHqTZ1qfoi4t.png),\r\nprogress !important;}\r\n\r\na, a:hover\r\n {cursor: url(http://media.tumblr.com/tumblr_m2umkqvNUT1qfamg6.gif),\r\nprogress !important;}\r\n\r\n}\r\n\r\n\r\n", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/99840/cute-mini-cursor.user.js", "style_settings": [] }{ "_from": "traceparent@^1.0.0", "_id": "traceparent@1.0.0", "_inBundle": false, "_integrity": "sha512-b/hAbgx57pANQ6cg2eBguY3oxD6FGVLI1CC2qoi01RmHR7AYpQHPXTig9FkzbWohEsVuHENZHP09aXuw3/LM+w==", "_location": "/traceparent", "_phantomChildren": {}, "_requested": { "type": "range", "registry": true, "raw": "traceparent@^1.0.0", "name": "traceparent", "escapedName": "traceparent", "rawSpec": "^1.0.0", "saveSpec": null, "fetchSpec": "^1.0.0" }, "_requiredBy": [ "/elastic-apm-node" ], "_resolved": "https://registry.npmjs.org/traceparent/-/traceparent-1.0.0.tgz", "_shasum": "9b14445cdfe5c19f023f1c04d249c3d8e003a5ce", "_spec": "traceparent@^1.0.0", "_where": "/home/egovridc/mfa/MFAServer/node_modules/elastic-apm-node", "author": { "name": "", "email": "", "url": "https://github.com/qard" }, "bugs": { "url": "https://github.com/elastic/node-traceparent/issues" }, "bundleDependencies": false, "dependencies": { "random-poly-fill": "^1.0.1" }, "deprecated": false, "description": "Context management helper for the w3c traceparent header format", "devDependencies": { "tape": "^4.9.2" }, "homepage": "https://github.com/elastic/node-traceparent#readme", "keywords": [ "elastic", "apm", "trace", "context", "w3c", "traceparent" ], "license": "MIT", "main": "index.js", "name": "traceparent", "repository": { "type": "git", "url": "git+https://github.com/elastic/node-traceparent.git" }, "scripts": { "test": "tape test.js" }, "version": "1.0.0" } 10-100 { "id": 55633744, "name": "slick-menubar", "fullName": "joshbuddy/slick-menubar", "owner": { "login": "joshbuddy", "id": 8898, "avatarUrl": "https://avatars1.githubusercontent.com/u/8898?v=3", "gravatarId": "", "url": "https://api.github.com/users/joshbuddy", "htmlUrl": "https://github.com/joshbuddy", "followersUrl": "https://api.github.com/users/joshbuddy/followers", "subscriptionsUrl": "https://api.github.com/users/joshbuddy/subscriptions", "organizationsUrl": "https://api.github.com/users/joshbuddy/orgs", "reposUrl": "https://api.github.com/users/joshbuddy/repos", "receivedEventsUrl": "https://api.github.com/users/joshbuddy/received_events", "type": "User" }, "private": false, "htmlUrl": "https://github.com/joshbuddy/slick-menubar", "description": "An electron menubar app serving slick", "fork": false, "url": "https://api.github.com/repos/joshbuddy/slick-menubar", "forksUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/forks", "teamsUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/teams", "hooksUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/hooks", "eventsUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/events", "tagsUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/tags", "languagesUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/languages", "stargazersUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/stargazers", "contributorsUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/contributors", "subscribersUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/subscribers", "subscriptionUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/subscription", "mergesUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/merges", "downloadsUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/downloads", "deploymentsUrl": "https://api.github.com/repos/joshbuddy/slick-menubar/deployments", "createdAt": "2016-04-06T19:13:40.000Z", "updatedAt": "2016-04-06T20:24:08.000Z", "pushedAt": "2016-04-06T20:37:20.000Z", "gitUrl": "git://github.com/joshbuddy/slick-menubar.git", "sshUrl": "git@github.com:joshbuddy/slick-menubar.git", "cloneUrl": "https://github.com/joshbuddy/slick-menubar.git", "svnUrl": "https://github.com/joshbuddy/slick-menubar", "homepage": null, "size": 2, "stargazersCount": 0, "watchersCount": 0, "language": "JavaScript", "hasIssues": true, "hasDownloads": true, "hasWiki": true, "hasPages": false, "forksCount": 0, "mirrorUrl": null, "openIssuesCount": 0, "openIssues": 0, "watchers": 0, "defaultBranch": "master", "permissions": { "admin": false, "push": false, "pull": true }, "license": null, "networkCount": 0, "subscribersCount": 1, "status": 200, "packageJSON": { "name": "slick-menubar", "version": "1.0.0", "description": "", "main": "index.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "author": "", "license": "ISC", "dependencies": { "slick-io": "*", "menubar": "4.1.0", "electron-prebuilt": "0.37.4", "electron-rebuild": "1.1.3" }, "devDependencies": { "electron-packager": "6.0.0" } }, "packageStatus": 200, "firstCommit": { "sha": "f61c1426a90e7bb9799a17ab61aa5ed670c1114e", "commit": { "author": { "name": "", "email": "", "date": "2016-04-06T20:23:14Z" }, "committer": { "name": "", "email": "", "date": "2016-04-06T20:23:41Z" }, "message": "Initial checkin", "tree": { "sha": "f9d095e095d6e3ff6b9e7436bc3579edf2823919", "url": "https://api.github.com/repos/joshbuddy/slick-menubar/git/trees/f9d095e095d6e3ff6b9e7436bc3579edf2823919" }, "url": "https://api.github.com/repos/joshbuddy/slick-menubar/git/commits/f61c1426a90e7bb9799a17ab61aa5ed670c1114e", "commentCount": 0 } }, "filename": "joshbuddy___slick-menubar.json", "hasProjects": true, "lastFetchedAt": "2017-05-04T17:40:33.714Z", "packageLastFetchedAt": "2017-05-05T15:41:39.237Z" }src/main/resources/modinfo/CE/CE4258.json { "courseCode": "CE4258", "courseCredit": "4", "description": "This module provides students with basic knowledge of structural stability and dynamics for the analysis of civil engineering structures. The topics covered include general principles of stability and dynamics; buckling of beam, columns and frames; design against local and overall stability. Dynamics analysis will cover single-degree-of-freedom systems, multi-degree-of-freedom systems and continuous systems. Students are taught to deal with general stability and vibration problems of frames including computer applications and numerical formulation. The module of specialized context targets at undergraduate and graduate students in research or engineering practices relating to structural engineering applications", "faculty": "Engineering", "fulfillRequirements": [ "CE5885A" ], "preclusion": "TCE4258", "prereqTree": { "and": [ "CE2407", "CE3155" ] }, "title": "Structural Stability & Dynamics" } { "name": "msphp/helpers", "description": "A collection of helper functions for the MsPHP Framework", "keywords": [ "msphp", "framework", "helpers" ], "homepage": "http://msphp.umtuwen.com", "license": "MIT", "authors": [ { "name": "Levine", "email": "" }, { "name": "Sytlw", "email": "" } ], "support": { "issues": "https://github.com/msphp/helpers/issues", "source": "https://github.com/msphp/helpers" }, "require": { "php": ">=7.2" }, "autoload": { "files": [ "src/helpers.php" ] } }{ "name": "smtp2mqtt addon Home Assistant", "url": "https://github.com/yaseregko/smtp2mqtt_addon", "maintainer": " <>" } { "name": "chrome-steamautoauth", "version": "0.2.0", "description": "A chrome web extension to automatically generate and enter mobile code on steam login.", "scripts": { "build": "browserify src/content.js -o dist/bundle.js && browserify src/popup/script.js -o src/popup/bundle.js" }, "repository": { "type": "git", "url": "git+https://github.com/mabdu11ah/chrome-steamautoauth.git" }, "author": "", "license": "ISC", "bugs": { "url": "https://github.com/mabdu11ah/chrome-steamautoauth/issues" }, "homepage": "https://github.com/mabdu11ah/chrome-steamautoauth#readme", "dependencies": { "steam-totp": "^2.1.1" }, "devDependencies": { "browserify": "^17.0.0" } } { "ConnectionStrings": { "<%= name %>": "Server=.\\;Database=<%= name %>Db;User Id=spa_admin;Password=;" } }1-10 http://data.doremus.org/expression/aa0fb987-f3ce-3980-8ce0-ac2edf7f21f2 http://data.doremus.org/expression/b3fa417f-257e-3d04-99ae-70f73d812b5b http://data.doremus.org/expression/7b0cf91f-3f9a-3f4b-af3a-038c1e166399 http://data.doremus.org/expression/5ab43dce-1e3e-396d-9289-ab2636c2c8a8 http://data.doremus.org/expression/f49bf291-4d4f-3ce1-acd7-75bf0eacaba1 http://data.doremus.org/expression/6696b9c0-e537-31e0-bfbe-a769f809f5ad http://data.doremus.org/expression/dd46abb4-d66b-3727-b30d-9298abb4f626 http://data.doremus.org/expression/44d5f155-722b-3bc6-97e1-2adf07585c84 http://data.doremus.org/expression/90ea9aa7-4188-3b80-b91f-195e915cd02b http://data.doremus.org/expression/8ca469df-a20f-3a55-8d6d-c1c3860a1e30 http://data.doremus.org/expression/8e627bba-74a0-3643-b05b-46dfed74d743 http://data.doremus.org/expression/9b8f867f-1acf-3c37-b957-c29075d0fba4COVID19Tracking/covid-public-api {"date":20210128,"state":"MT","positive":92934,"probableCases":2039,"negative":null,"pending":null,"totalTestResultsSource":"totalTestsViral","totalTestResults":936928,"hospitalizedCurrently":114,"hospitalizedCumulative":4203,"inIcuCurrently":19,"inIcuCumulative":null,"onVentilatorCurrently":15,"onVentilatorCumulative":null,"recovered":87720,"lastUpdateEt":"1/28/2021 01:59","dateModified":"2021-01-28T01:59:00Z","checkTimeEt":"01/27 20:59","death":1210,"hospitalized":4203,"hospitalizedDischarged":null,"dateChecked":"2021-01-28T01:59:00Z","totalTestsViral":936928,"positiveTestsViral":null,"negativeTestsViral":null,"positiveCasesViral":90895,"deathConfirmed":null,"deathProbable":null,"totalTestEncountersViral":null,"totalTestsPeopleViral":null,"totalTestsAntibody":null,"positiveTestsAntibody":null,"negativeTestsAntibody":null,"totalTestsPeopleAntibody":null,"positiveTestsPeopleAntibody":null,"negativeTestsPeopleAntibody":null,"totalTestsPeopleAntigen":null,"positiveTestsPeopleAntigen":null,"totalTestsAntigen":null,"positiveTestsAntigen":null,"fips":"30","positiveIncrease":403,"negativeIncrease":0,"total":92934,"totalTestResultsIncrease":8947,"posNeg":92934,"dataQualityGrade":null,"deathIncrease":9,"hospitalizedIncrease":26,"hash":"69aa3af84d1086ae3cc622811a7779d44844de11","commercialScore":0,"negativeRegularScore":0,"negativeScore":0,"positiveScore":0,"score":0,"grade":""} { "typeDefaults": { "comment": "This file provides defaults for various attributes depending on the field type. One can always override these defaults by providing value directly in the field" } } 0 [ { "Id": "1127451", "ThreadId": "470700", "Html": "Hello guys,\r
\n
\nI'm trying to create a small m:ui app, and I need some controls from Telerik.\r
\nIs there any way to achieve this, using the latest version of m:ui (1.0.5)?\r
\n
\nThere is previous discussion thread related to this. for version 1.0.2, but I was not able yet to make that work. Anyway, I would like to use the latest release.\r
\n
\nAny kind of help will be appreciated!\r
\nCheers!
\n", "PostedDate": "2013-11-21T02:23:57.427-08:00", "UserRole": null, "MarkedAsAnswerDate": null }, { "Id": "1127662", "ThreadId": "470700", "Html": "All I had to do was install Telerik on my computer and include the references I needed in my project\r
\n
\nI am using a radgridview and some buttons so far.\r
\n
\nCan you show me what you treid to do so far?
\n", "PostedDate": "2013-11-21T11:13:46.98-08:00", "UserRole": null, "MarkedAsAnswerDate": null } ]bundie1990/new-website {"hola_player.dash.dev.js":"sha256-RfwV+jO0N05kNVKBoNNyKtSRxOcN/hgJiVOtVDHS9n8=","hola_player.dash.js":"sha256-zlyLSNXlBcuHjEMVw6x8UR7Z5tsS6f8ZHiD4Zlw5wcg=","hola_player.dev.js":"sha256-IqTgbGYJmkSzXvgVGWFCkIY9+zit45gUPleR6s/AUrU=","hola_player.js":"sha256-50Vqob3LOVemXcrpfvonhRp4yyRt8ZwWwYEGngQGsXs="}{ "citations" : [ { "textCitation" : "[See eqid on Metamath](http://us.metamath.org/mpegif/eqid.html)" } ], "names" : [ "eqid" ], "language" : "METAMATH_SET_MM", "lookupTerms" : [ "#T_cA", "#T_wceq", "#T_cA" ], "metaLanguage" : "METAMATH", "remarks" : " Law of identity (reflexivity of class equality). Theorem 6.4 of [Quine] p. 41. This is part of Frege's eighth axiom per Proposition 54 of [Frege1879] p. 50; see also ~ biid . This law is thought to have originated with Aristotle (_Metaphysics_, Zeta, 17, 1041 a, 10-20). (Thanks to and BJ for this information.) (Contributed by NM, 21-Jun-1993.) (Revised by BJ, 14-Oct-2017.) \n\n---\n\n Law of identity (reflexivity of class equality). Theorem 6.4 of [Quine] p. 41. This law is thought to have originated with Aristotle (_Metaphysics_, Zeta, 17, 1041 a, 10-20). (Thanks to and BJ for this information.) (Contributed by NM, 5-Aug-1993.) (Revised by BJ, 14-Oct-2017.) ", "statement" : "eqid $p |- A = A $." }{ "alfredsnippet" : { "snippet" : "def barhplot_plotly(df,x,y,xlabel,ylabel,title,filename):\n data = [\n go.Bar(\n x=df[x],\n y=df[y],\n orientation='h'\n )\n ]\n\n layout = go.Layout(autosize=False,\n title=title,\n xaxis=dict(title=xlabel),\n yaxis=dict(title=ylabel),\n margin=dict(l=100,pad=4)\n )\n fig = go.Figure(data=data, layout=layout)\n # iplot to show image in jupyter notebook\n iplot(fig, filename=filename)\n\n# plot\nimport pandas as pd\n\ndf = pd.DataFrame({'x':[10,20,30],'y':[100,200,300]})\nx,y = 'x', 'y'\nxlabel, ylabel = 'x', 'y'\ntitle = 'plot'\nfilename = 'pandas-horizontal-bar.html'\nbarhplot_plotly(df,x,y,xlabel,ylabel,title,filename)", "uid" : "C61F9A1F-D996-49BF-8990-41CF9E2B80C0", "name" : "plot plotly barhplot", "keyword" : "plot_barhplot_plotly" } }{"nom":"Saint-Paul","circ":"4ème circonscription","dpt":"Vosges","inscrits":99,"abs":55,"votants":44,"blancs":5,"nuls":3,"exp":36,"res":[{"nuance":"LR","nom":"","voix":21},{"nuance":"SOC","nom":"","voix":15}]}{"word":"shuttlecock","definition":"A cork stuck with feathers, which is to be struck by a battledoor in play; also, the play itself.\n\nTo send or toss to and fro; to bandy; as, to shuttlecock words. Thackeray."}1-10 { "@metadata": { "authors": [ "Abaddon1337" ] }, "simplesamlphp-desc": "Fournit une authentification utilisant simpleSAMLphp conjointement avec PluggableAuth" } data_bank/wgbb_8679.json {"questions": [{"player_1": {"name": "", "player_stat": 16.0}, "player_2": {"name": "", "player_stat": 10.0}, "stat": "sixes", "skill": "BAT", "question_text": "Who has hit more sixes?", "greater": true}, {"player_1": {"name": "", "player_stat": 4.0}, "player_2": {"name": "", "player_stat": 3.0}, "stat": "zeroes", "skill": "BAT", "question_text": "Who has more ducks?", "greater": true}, {"player_1": {"name": "", "player_stat": 77.0}, "player_2": {"name": "", "player_stat": 45.0}, "stat": "wickets", "skill": "BOWL", "question_text": "Who has taken more wickets?", "greater": true}, {"player_1": {"name": "", "player_stat": 4.0}, "player_2": {"name": "", "player_stat": 15.0}, "stat": "fifties", "skill": "BAT", "question_text": "Who has scored more fifties?", "greater": true}, {"player_1": {"name": "", "player_stat": 45.0}, "player_2": {"name": "", "player_stat": 106.0}, "stat": "wickets", "skill": "BOWL", "question_text": "Who has taken more wickets?", "greater": true}, {"player_1": {"name": "", "player_stat": 2349.0}, "player_2": {"name": "", "player_stat": 457.0}, "stat": "runs_given", "skill": "BOWL", "question_text": "Who has given away less runs?", "greater": false}, {"player_1": {"name": "", "player_stat": 29.0}, "player_2": {"name": "", "player_stat": 30.0}, "stat": "sixes", "skill": "BAT", "question_text": "Who has hit more sixes?", "greater": true}, {"player_1": {"name": "", "player_stat": 10.0}, "player_2": {"name": "", "player_stat": 1.0}, "stat": "maidens", "skill": "BOWL", "question_text": "Who has bowled more maidens?", "greater": true}, {"player_1": {"name": "", "player_stat": 29.15}, "player_2": {"name": "", "player_stat": 21.77}, "stat": "average", "skill": "BOWL", "question_text": "Who has the better bowling average?", "greater": false}, {"player_1": {"name": "", "player_stat": 42.0}, "player_2": {"name": "", "player_stat": 71.0}, "stat": "wickets", "skill": "BOWL", "question_text": "Who has taken more wickets?", "greater": true}, {"player_1": {"name": "", "player_stat": 7.0}, "player_2": {"name": "", "player_stat": 3.0}, "stat": "zeroes", "skill": "BAT", "question_text": "Who has more ducks?", "greater": true}, {"player_1": {"name": "", "player_stat": 66.0}, "player_2": {"name": "", "player_stat": 114.0}, "stat": "highest", "skill": "BAT", "question_text": "Who has the greater highest score?", "greater": true}, {"player_1": {"name": "", "player_stat": 3.0}, "player_2": {"name": "", "player_stat": 6.0}, "stat": "zeroes", "skill": "BAT", "question_text": "Who has more ducks?", "greater": true}, {"player_1": {"name": "", "player_stat": 21.61}, "player_2": {"name": "", "player_stat": 23.04}, "stat": "average", "skill": "BOWL", "question_text": "Who has the better bowling average?", "greater": false}, {"player_1": {"name": "", "player_stat": 2964.0}, "player_2": {"name": "", "player_stat": 520.0}, "stat": "runs_given", "skill": "BOWL", "question_text": "Who has given away less runs?", "greater": false}, {"player_1": {"name": "", "player_stat": 146.51}, "player_2": {"name": "", "player_stat": 130.07}, "stat": "strike_rate", "skill": "BAT", "question_text": "Who has the better batting strike rate?", "greater": true}, {"player_1": {"name": "", "player_stat": 8.83}, "player_2": {"name": "", "player_stat": 7.98}, "stat": "economy", "skill": "BOWL", "question_text": "Who has the better economy rate?", "greater": false}, {"player_1": {"name": "", "player_stat": 13.0}, "player_2": {"name": "", "player_stat": 8.0}, "stat": "zeroes", "skill": "BAT", "question_text": "Who has more ducks?", "greater": true}, {"player_1": {"name": "", "player_stat": 25.5}, "player_2": {"name": "", "player_stat": 24.4}, "stat": "strike_rate", "skill": "BOWL", "question_text": "Who has the better bowling strike rate?", "greater": false}, {"player_1": {"name": "", "player_stat": 89.0}, "player_2": {"name": "", "player_stat": 100.0}, "stat": "highest", "skill": "BAT", "question_text": "Who has the greater highest score?", "greater": true}]}{"Name":"R_10_10_08","Objects":[],"Items":[{"Length":94,"Height":104,"Demand":1,"DemandMax":null,"Value":9776},{"Length":93,"Height":110,"Demand":1,"DemandMax":null,"Value":10230},{"Length":110,"Height":96,"Demand":1,"DemandMax":null,"Value":10560},{"Length":100,"Height":107,"Demand":1,"DemandMax":null,"Value":10700},{"Length":107,"Height":99,"Demand":1,"DemandMax":null,"Value":10593},{"Length":93,"Height":94,"Demand":1,"DemandMax":null,"Value":8742},{"Length":93,"Height":95,"Demand":1,"DemandMax":null,"Value":8835},{"Length":96,"Height":104,"Demand":1,"DemandMax":null,"Value":9984},{"Length":109,"Height":110,"Demand":1,"DemandMax":null,"Value":11990},{"Length":90,"Height":106,"Demand":1,"DemandMax":null,"Value":9540}]}{"ast":null,"code":"\"use strict\";\n\nvar _interopRequireDefault = require(\"@babel/runtime/helpers/interopRequireDefault\");\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nObject.defineProperty(exports, \"default\", {\n enumerable: true,\n get: function get() {\n return _Modal.default;\n }\n});\nObject.defineProperty(exports, \"ModalManager\", {\n enumerable: true,\n get: function get() {\n return _ModalManager.default;\n }\n});\n\nvar _Modal = _interopRequireDefault(require(\"./Modal\"));\n\nvar _ModalManager = _interopRequireDefault(require(\"./ModalManager\"));","map":{"version":3,"sources":["/home/vsonline/workspace/people10challenge-alpha/code-challenge/challenge-client-server-app/client/node_modules/ra-ui-materialui/node_modules/@material-ui/core/Modal/index.js"],"names":["_interopRequireDefault","require","Object","defineProperty","exports","value","enumerable","get","_Modal","default","_ModalManager"],"mappings":"AAAA;;AAEA,IAAIA,sBAAsB,GAAGC,OAAO,CAAC,8CAAD,CAApC;;AAEAC,MAAM,CAACC,cAAP,CAAsBC,OAAtB,EAA+B,YAA/B,EAA6C;AAC3CC,EAAAA,KAAK,EAAE;AADoC,CAA7C;AAGAH,MAAM,CAACC,cAAP,CAAsBC,OAAtB,EAA+B,SAA/B,EAA0C;AACxCE,EAAAA,UAAU,EAAE,IAD4B;AAExCC,EAAAA,GAAG,EAAE,SAASA,GAAT,GAAe;AAClB,WAAOC,MAAM,CAACC,OAAd;AACD;AAJuC,CAA1C;AAMAP,MAAM,CAACC,cAAP,CAAsBC,OAAtB,EAA+B,cAA/B,EAA+C;AAC7CE,EAAAA,UAAU,EAAE,IADiC;AAE7CC,EAAAA,GAAG,EAAE,SAASA,GAAT,GAAe;AAClB,WAAOG,aAAa,CAACD,OAArB;AACD;AAJ4C,CAA/C;;AAOA,IAAID,MAAM,GAAGR,sBAAsB,CAACC,OAAO,CAAC,SAAD,CAAR,CAAnC;;AAEA,IAAIS,aAAa,GAAGV,sBAAsB,CAACC,OAAO,CAAC,gBAAD,CAAR,CAA1C","sourcesContent":["\"use strict\";\n\nvar _interopRequireDefault = require(\"@babel/runtime/helpers/interopRequireDefault\");\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nObject.defineProperty(exports, \"default\", {\n enumerable: true,\n get: function get() {\n return _Modal.default;\n }\n});\nObject.defineProperty(exports, \"ModalManager\", {\n enumerable: true,\n get: function get() {\n return _ModalManager.default;\n }\n});\n\nvar _Modal = _interopRequireDefault(require(\"./Modal\"));\n\nvar _ModalManager = _interopRequireDefault(require(\"./ModalManager\"));"]},"metadata":{},"sourceType":"script"}samuelbaizg/ssguan { "auth/signup": { "method": "post", "function": "ssguan.ignitor.auth.service.signup", "content-type": "application/json", "resource": "", "operation": "" }, "auth/updatepwd": { "method": "post", "function": "ssguan.ignitor.auth.service.update_user_password", "content-type": "application/json", "resource": "", "operation": "" } }{"nom":"Vendegies-sur-Ecaillon","circ":"12ème circonscription","dpt":"Nord","inscrits":849,"abs":404,"votants":445,"blancs":37,"nuls":6,"exp":402,"res":[{"nuance":"REM","nom":"","voix":260},{"nuance":"FN","nom":"","voix":142}]}burgamacha/lexicalDispersion {"organizations": [], "uuid": "c06f777c74d9315c1804ee449026e9acbf7e0cb2", "thread": {"social": {"gplus": {"shares": 0}, "pinterest": {"shares": 0}, "vk": {"shares": 0}, "linkedin": {"shares": 0}, "facebook": {"likes": 0, "shares": 0, "comments": 0}, "stumbledupon": {"shares": 0}}, "site_full": "www.tripadvisor.com", "main_image": "https://media-cdn.tripadvisor.com/media/photo-s/07/44/ee/6a/the-st-regis-new-york.jpg", "site_section": "https://www.tripadvisor.com/Hotel_Review-g60763-d93614-Reviews-The_St_Regis_New_York-New_York_City_New_York.html", "section_title": "The St. Regis New York - UPDATED 2017 Hotel Reviews & Price Comparison (New York City) - TripAdvisor", "url": "https://www.tripadvisor.com/ShowUserReviews-g60763-d93614-r469953445-The_St_Regis_New_York-New_York_City_New_York.html", "country": "US", "domain_rank": 189, "title": "Disappointing", "performance_score": 0, "site": "tripadvisor.com", "participants_count": 2, "title_full": "Disappointing - Review of The St. Regis New York, New York City, NY - TripAdvisor", "spam_score": 0.0, "site_type": "discussions", "published": "2017-03-24T03:00:00.000+03:00", "replies_count": 1, "uuid": "c06f777c74d9315c1804ee449026e9acbf7e0cb2"}, "author": "wrldtrvlr_13", "url": "https://www.tripadvisor.com/ShowUserReviews-g60763-d93614-r469953445-The_St_Regis_New_York-New_York_City_New_York.html", "ord_in_thread": 0, "title": "Disappointing", "locations": [], "entities": {"persons": [], "locations": [], "organizations": []}, "highlightText": "", "language": "english", "persons": [], "text": "Overall, a very disappointing experience. I've stayed at this property multiple times in the past with very mixed results, ranging from great (upgraded to a suite with a platter of fruit in the room on arrival) to the opposite of great (tried checking in at 5pm and had to wait until almost 9pm until a room was ready?!). This stay solidified my opinion to not return.\nThere wasn't a single major problem in this case but more of an accumulation of several small annoyances that when taken together, add up to a very negative opinion.\nFor starters, I was booked into a 1-night stay but had a last minute travel change that required me being in NYC for 2 nights. I called the property to inquire about adding an extra night and was told they were \"sold out\" and could not accommodate. However, all third party travel sites (Kayak, Hotels.com, etc etc) showed availability, their own website showed availability and when I called SPG's reservation center they also confirmed that not only was there still availability but that at least three different room types were available.\nNext, upon check-in, I was told I would not receive any Platinum benefits because I was booked on a corporate rate. I average close to 200 nights across hotel brands (giving me the highest tier status in 3 different major loyalty programs) and have never had a single property anywhere else in the world at any of these brands deny benefits because of a corporate rate.\nAfter checking out, I realized I had left something in the room and called the property to ask if they could check the room for the item. The first time I called I was told someone would check and call me back. 30 minutes later and no call, I called again and was told by the person I was speaking with that they were on their way to the room as we were talking and I'd receive a call back in 5 minutes. Another 20 minutes and no call, I called a 3rd time and was told no one had checked and they weren't sure who I had spoken with the previous two times as they didn't have record of a request.\nAt this price point, you can stay anywhere in NYC and that's exactly what I'd recommend - stay at ANY of them over this place.", "external_links": [], "published": "2017-03-24T03:00:00.000+03:00", "crawled": "2017-03-29T03:56:18.739+03:00", "highlightTitle": ""}export_old/issues/netoffice_issue_17986.json { "WorkItem": { "AffectedComponent": { "DisplayName": "", "Name": "" }, "AssignedTo": "", "ClosedBy": "", "ClosedComment": "", "CommentCount": 1, "Custom": "", "Description": "Right now, the DebugConsole class has options to log to the console, file or memorylist.\nCould you please add 'Trace' as an option which logs a message using the .NET framework trace logging system.\nA call to System.Diagnostics.Trace.WriteLine(message) will do fine.\n \nI request this because it is the most easy way to find problems when developing Add-Ins for us (there is no console available for an add-in). \nBy default, System.Diagnostics.Trace.WriteLine, messages are written to the Windows debug output. Those messages are displayed in the Visual Studio debug output window in debug mode, and can also be shown using the sysinternals 'DebugView' tool.\nBy using the app.config, the user can even log the trace output to other outputs.\n \nThank you for this great product, much much better than VSTO!", "HtmlDescription": "Right now, the DebugConsole class has options to log to the console, file or memorylist.
\nCould you please add 'Trace' as an option which logs a message using the .NET framework trace logging system.
\nA call to System.Diagnostics.Trace.WriteLine(message) will do fine.
\n 
\nI request this because it is the most easy way to find problems when developing Add-Ins for us (there is no console available for an add-in).
\nBy default, System.Diagnostics.Trace.WriteLine, messages are written to the Windows debug output. Those messages are displayed in the Visual Studio debug output window in debug mode, and can also be shown using the sysinternals 'DebugView' tool.
\nBy using the app.config, the user can even log the trace output to other outputs.
\n 
\nThank you for this great product, much much better than VSTO!
\n", "PlainDescription": "Right now, the DebugConsole class has options to log to the console, file or memorylist.\nCould you please add 'Trace' as an option which logs a message using the .NET framework trace logging system.\nA call to System.Diagnostics.Trace.WriteLine(message) will do fine.\n \nI request this because it is the most easy way to find problems when developing Add-Ins for us (there is no console available for an add-in). \nBy default, System.Diagnostics.Trace.WriteLine, messages are written to the Windows debug output. Those messages are displayed in the Visual Studio debug output window in debug mode, and can also be shown using the sysinternals 'DebugView' tool.\nBy using the app.config, the user can even log the trace output to other outputs.\n \nThank you for this great product, much much better than VSTO!\n", "LastUpdatedBy": "SebastianDotNet", "LastUpdatedDate": "2013-06-14T09:33:16.377+02:00", "PlannedForRelease": "", "ReleaseVisibleToPublic": false, "Priority": { "Name": "Low", "Severity": 50, "Id": 1 }, "ProjectName": "netoffice", "ReportedBy": "Piet123", "ReportedDate": "2012-05-04T08:43:14.603+02:00", "CanContactReportedByUser": false, "Status": { "Name": "Resolved", "Id": 7 }, "ReasonClosed": { "Name": "Unassigned" }, "Summary": "Please add 'Trace' ConsoleMode to DebugConsole", "Type": { "Name": "Issue", "Id": 3 }, "VoteCount": 1, "Id": 17986, "HasUserVoted": false }, "FileAttachments": [], "Comments": [ { "Message": "Trace is available in the updated 1.5.1 RC\r\n\r\n*have a nice day", "PostedBy": "SebastianDotNet", "PostedDate": "2012-05-04T19:15:07.81+02:00", "WorkItemId": 17986, "Id": 203047 } ], "CanDeleteWorkItem": true, "CanDeleteComments": true }{"vue.common.js":","vue.esm.js":","vue.js":","vue.min.js":","vue.runtime.common.js":"sha512-zaTpT5NXG8b6/UM6w+SeWOk39KczTDorqVf7qrVJLTf6h64slShrQOFqQbMCwd8TSu9p86Is/4i/qGXFs1PO3A==","vue.runtime.esm.js":","vue.runtime.js":","vue.runtime.min.js":"}{ "body": "TO THIS OF WRONG HYPOTHESIS MAY BE REDUCED THE ERROURS THAT MAY BE OCCASIONED BY A TRUE HYPOTHESIS, OR RIGHT PRINCIPLES,", "next": "https://raw.githubusercontent.com/CAPSELOCKE/CAPSELOCKE/master/tweets/13650.json" }scripts/satd_commit_pipeline/results/airflow/project-33884891_c-b56cb5cc.json {"total": 0, "p": 1, "ps": 100, "paging": {"pageIndex": 1, "pageSize": 100, "total": 0}, "effortTotal": 0, "issues": [], "components": [{"key": "project-33884891:airflow/task_runner/base_task_runner.py", "name": "base_task_runner.py", "qualifier": "FIL", "path": "airflow/task_runner/base_task_runner.py", "language": "py", "measures": [{"metric": "ncloc", "value": "83"}, {"metric": "complexity", "value": "15"}, {"metric": "bugs", "value": "0", "bestValue": true}, {"metric": "code_smells", "value": "0", "bestValue": true}, {"metric": "duplicated_blocks", "value": "0", "bestValue": true}, {"metric": "violations", "value": "0", "bestValue": true}, {"metric": "vulnerabilities", "value": "0", "bestValue": true}]}, {"key": "project-33884891:airflow/contrib/task_runner/cgroup_task_runner.py", "name": "cgroup_task_runner.py", "qualifier": "FIL", "path": "airflow/contrib/task_runner/cgroup_task_runner.py", "language": "py", "measures": [{"metric": "ncloc", "value": "114"}, {"metric": "complexity", "value": "22"}, {"metric": "bugs", "value": "0", "bestValue": true}, {"metric": "code_smells", "value": "0", "bestValue": true}, {"metric": "duplicated_blocks", "value": "0", "bestValue": true}, {"metric": "violations", "value": "0", "bestValue": true}, {"metric": "vulnerabilities", "value": "0", "bestValue": true}]}, {"key": "project-33884891:tests/impersonation.py", "name": "impersonation.py", "qualifier": "FIL", "path": "tests/impersonation.py", "language": "py", "measures": [{"metric": "ncloc", "value": "67"}, {"metric": "complexity", "value": "7"}, {"metric": "bugs", "value": "0", "bestValue": true}, {"metric": "code_smells", "value": "0", "bestValue": true}, {"metric": "duplicated_blocks", "value": "0", "bestValue": true}, {"metric": "violations", "value": "0", "bestValue": true}, {"metric": "vulnerabilities", "value": "0", "bestValue": true}]}], "facets": []}vue/1.0.25.json1-10 {"vue.common.js":"sha512-jUmMp+,"vue.common.min.js":","vue.js":","vue.min.js":"}medialab/climateDebateExplorer0 { "countries": [ "Ukraine", "Belarus", "Senegal", "Finland", "India" ], "topics": [ "Flexibility Mechanisms", "Compliance and Enforcement" ], "section_title": "ELECTION OF OFFICERS", "enb_start_date": "28-Nov-11", "enb_short_title": "COP17", "subtype": "", "actors": [ "European Union" ], "sentences": [ "On Sunday, 11 December, the CMP elected officers to the Clean Development Mechanism (CDM), Adaptation Fund, Compliance Committee (facilitative branch and enforcement branch) and Joint Implementation Supervisory Committee.", "They also elected (Senegal) and (Finland) as Chair and Vice-Chair of the AWG-KP. PROPOSALS FOR AMENDMENTS TO THE KP This item (FCCC/KP/CMP/2010/3 and FCCC/KP/CMP/2009/2-13) was first addressed by the CMP plenary on 30 November.", "India said it will not agree to changes to Annex B unless a second commitment period is agreed.", "Belarus, the EU and Ukraine favored simplifying amendment procedures.", "During the 11 December closing plenary, the CMP decided consideration of this item will continue at CMP 8.", "This issue is also addressed under the AWG-KP Outcome Document (FCCC/KP/CMP/2011/L.3 Add.1) Annexes II and III, which include proposed amendments to the Kyoto Protocol." ], "enb_url": "http://www.iisd.ca/vol12/enb12534e.html", "enb_long_title": "Durban Climate Change Conference - COP17/CMP7", "type": "", "id": "enb12534e_50", "enb_end_date": "09-Dec-11" }raguay/mint-codemirror1-10 { "name": "mint-codemirror", "source-directories": [ "source" ], "external-javascripts": [ "assets/asset-loader.js" ] } { "kind": "Type", "name": "SVGFEBlendElement", "href": "https://developer.mozilla.org/en-US/docs/Web/API/SVGFEBlendElement", "description": "The SVGFEBlendElement interface corresponds to the <feBlend> element.", "refs": [ { "name": "Filter Effects Module Level 2", "href": "https://drafts.fxtf.org/filter-effects/#InterfaceSVGFEBlendElement", "description": "(Filter Effects 2) # InterfaceSVGFEBlendElement" } ] } { "name": "CopyBlobBuilder", "derive": "Debug, Clone", "uses": [ "crate::blob::blob::generate_blob_uri", "crate::blob::blob::responses::CopyBlobResponse", "crate::core::prelude::*", "crate::{RehydratePriority, RehydratePriorityOption, RehydratePrioritySupport}", "azure_core::errors::{check_status_extract_headers_and_body, AzureError}", "azure_core::lease::LeaseId", "azure_core::prelude::*", "azure_core::{No, ToAssign, Yes}", "hyper::{Method, StatusCode}", "std::collections::HashMap", "std::convert::TryInto", "std::marker::PhantomData" ], "inline": true, "extra_types": [ "'a", "C" ], "extra_wheres": [ "C: Client" ], "constructor_fields": [ { "name": "client", "field_type": "&'a C", "trait_get": "ClientRequired<'a, C>" } ], "fields": [ { "name": "container_name", "field_type": "&'a str", "builder_type": "ContainerNameSet", "optional": false, "trait_get": "ContainerNameRequired<'a>", "trait_set": "ContainerNameSupport<'a>" }, { "name": "blob_name", "field_type": "&'a str", "builder_type": "BlobNameSet", "optional": false, "trait_get": "BlobNameRequired<'a>", "trait_set": "BlobNameSupport<'a>" }, { "name": "source_url", "field_type": "&'a str", "builder_type": "SourceUrlNameSet", "optional": false, "trait_get": "SourceUrlRequired<'a>", "trait_set": "SourceUrlSupport<'a>" }, { "name": "metadata", "field_type": "&'a HashMap<&'a str, &'a str>", "optional": true, "trait_get": "MetadataOption<'a>", "trait_set": "MetadataSupport<'a>" }, { "name": "timeout", "field_type": "u64", "optional": true, "trait_get": "TimeoutOption", "trait_set": "TimeoutSupport" }, { "name": "if_since_condition", "field_type": "IfSinceCondition", "optional": true, "trait_get": "IfSinceConditionOption", "trait_set": "IfSinceConditionSupport" }, { "name": "if_source_since_condition", "field_type": "IfSinceCondition", "optional": true, "trait_get": "IfSourceSinceConditionOption", "trait_set": "IfSourceSinceConditionSupport" }, { "name": "if_match_condition", "field_type": "IfMatchCondition<'a>", "optional": true, "trait_get": "IfMatchConditionOption<'a>", "trait_set": "IfMatchConditionSupport<'a>" }, { "name": "if_source_match_condition", "field_type": "IfMatchCondition<'a>", "optional": true, "trait_get": "IfSourceMatchConditionOption<'a>", "trait_set": "IfSourceMatchConditionSupport<'a>" }, { "name": "lease_id", "field_type": "&'a LeaseId", "optional": true, "trait_get": "LeaseIdOption<'a>", "trait_set": "LeaseIdSupport<'a>" }, { "name": "source_lease_id", "field_type": "&'a LeaseId", "optional": true, "trait_get": "SourceLeaseIdOption<'a>", "trait_set": "SourceLeaseIdSupport<'a>" }, { "name": "access_tier", "field_type": "AccessTier", "optional": true, "trait_get": "AccessTierOption", "trait_set": "AccessTierSupport" }, { "name": "rehydrate_priority", "field_type": "RehydratePriority", "optional": true, "trait_get": "RehydratePriorityOption", "trait_set": "RehydratePrioritySupport" }, { "name": "client_request_id", "field_type": "&'a str", "optional": true, "trait_get": "ClientRequestIdOption<'a>", "trait_set": "ClientRequestIdSupport<'a>" } ] } 0 { "add_total_row": 0, "color": "#4d0000", "creation": "2019-04-30 18:16:40.117839", "disable_prepared_report": 1, "disabled": 0, "docstatus": 0, "doctype": "Report", "idx": 0, "is_standard": "Yes", "letter_head": "letterhead", "modified": "2019-07-05 14:10:05.270867", "modified_by": "Administrator", "module": "Stock Table", "name": "All Product Query For Sales", "owner": "Administrator", "prepared_report": 0, "query": "select \n\titem_code as 'Item Code:Link/Item:100',\n\titem_name as 'Item Name:Data:150',\n\tdescription as 'description:Data:200',\n\tstock_uom as 'UOM:Link/UOM:80',\n\tstandard_rate as 'Rate:Currency:80',\n\twholesale_rate as 'Wholesale Rate:Currency:80',\n\t(select actual_qty from `tabBin` where item_code = `tabItem`.item_code and warehouse = 'SN - B') as 'SN Stock:Float:100',\n\t(select actual_qty from `tabBin` where item_code = `tabItem`.item_code and warehouse = 'AH - B') as 'AH Stock:Float:100',\n\t(select actual_qty from `tabBin` where item_code = `tabItem`.item_code and warehouse = 'MN - B') as 'MN Stock:Float:100',\n\t(select actual_qty from `tabBin` where item_code = `tabItem`.item_code and warehouse = 'SZ - B') as 'SZ Stock:Float:100',\n\t(select actual_qty from `tabBin` where item_code = `tabItem`.item_code and warehouse = 'SH - B') as 'SH Stock:Float:100',\n\t(select sum(actual_qty) from `tabBin` where item_code = `tabItem`.item_code and (warehouse = 'SN - B' or warehouse = 'SZ - B' or warehouse = 'AH - B' or warehouse = 'MN - B' or warehouse = 'SH - B')) as 'Total Stock:Float:100',\n (select ordered_qty from `tabBin` where item_code = `tabItem`.item_code and ordered_qty != 0 order by name limit 1) as 'Stock In Transit:Float:100'\n\tfrom `tabItem`;", "ref_doctype": "Item", "report_name": "All Product Query For Sales", "report_type": "Query Report", "roles": [ { "role": "Item Manager" }, { "role": "Stock Manager" }, { "role": "Stock User" }, { "role": "Sales User" }, { "role": "Purchase User" }, { "role": "Maintenance User" }, { "role": "Accounts User" }, { "role": "Manufacturing User" } ] }1-10 {"text":"Subject to the restrictions in section 21-2055(c), the court may confer on a conservator, at the time of appointment or later, in addition to the powers conferred by sections 21-2070 and 21-2071, any power that the court itself could exercise under section 21-2055(b)(2). The court, at the time of appointment or later, may limit the powers of a conservator otherwise conferred by sections 21-2070 and 21-2071 or previously conferred by the court and may at any time remove or modify any limitations. If the court limits any power conferred on the conservator by section 21-2070 or section 21-2071, or specifies, as provided in section 21-2066(a), that title to some but not all assets of the protected individual vest in the conservator, the limitation or specification of assets subject to the conservatorship shall be endorsed upon the letters of appointment.","historical":"Prior Codifications\n\n1981 Ed., § 21-2072.\n\nLegislative History of Laws\n\nFor legislative history of D.C. Law 6-204, see Historical and Statutory Notes following § 21-2001.\n\nUniform Law\n\nThis section is based upon § 2-325 of the Uniform Guardianship and Protective Proceedings Act (1982 Act). See 8A Uniform Laws Annotated, Master Edition, or ULA Database on WESTLAW.\n\nDC CODE § 21-2072\n\nCurrent through December 11, 2012","credits":"(Feb. 28, 1987, D.C. Law 6-204, § 2(a), 34 DCR 632.)","sections":[],"division":{"identifier":"III","text":"Decedents' Estates and Fiduciary Relations."},"title":{"identifier":"21","text":"Fiduciary Relations and Persons with Mental Illness. (Refs & Annos)"},"chapter":{"identifier":"20","text":"Guardianship, Protective Proceedings, and Durable Power of Attorney. (Refs & Annos)"},"subchapter":{"identifier":"VI","text":"Protection of Property of Incapacitated, Disappeared or Detained Individuals. (Refs & Annos)"},"heading":{"title":"21","chaptersection":"2072","identifier":"21-2072","catch_text":"Enlargement or limitation of powers of conservator."}}index/b/black-olive-hummus.json { "directions": [ "Place garlic in food processor and pulse until garlic is minced. Add garbanzo beans; puree until chickpeas have broken down into a paste. Pour in liquid from olives, 1 tablespoon at a time; puree until fully incorporated, about 2 to 3 tablespoons liquid in total.", "Keep food processor running and slowly drizzle olive oil into hummus until smooth and desired consistency is reached. Add olives to the hummus and pulse 3 to 7 times until olives are chopped and evenly distributed." ], "ingredients": [ "1 clove garlic", "1 (15 ounce) can garbanzo beans (chickpeas), drained and rinsed", "1 (6 ounce) can black olives, drained and liquid reserved", "1/4 cup olive oil, or as needed" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Black Olive Hummus", "url": "http://allrecipes.com/recipe/230641/black-olive-hummus/" } {"name": "pykaldi", "description": "Python wrapper for Kaldi decoders (Kaldi https://sourceforge.net/projects/kaldi/)", "license": {"key": "other", "name": "Other", "spdx_id": null, "url": null}, "starNum": 63, "folkNum": 22, "watchNum": 63, "topic": []}{ "name": "love-typescript-definitions", "version": "0.9.5", "description": "Write LÖVE 2D projects with TypeScript", "scripts": { "test": "tslint -c tslint.json -p tsconfig.json include/**.d.ts include/**/**.d.ts include/**/*_spec.d.ts && tstl -p tsconfig.json" }, "repository": { "type": "git", "url": "git+https://github.com/hazzard993/love-typescript-definitions.git" }, "keywords": [ "love2d", "love", "lua", "typescript" ], "license": "MIT", "bugs": { "url": "https://github.com/hazzard993/love-typescript-definitions/issues" }, "homepage": "https://github.com/hazzard993/love-typescript-definitions#readme", "devDependencies": { "tslint": "^5.13.0", "typescript-to-lua": "^0.17.0" }, "dependencies": { "lua-types": "^2.3.2" } } alexa/alexa-dataset-redtab {"relation": [["Author", "SimonBob", "SimonBob", "SimonBob", "SimonBob", "SimonBob"], ["Title", "A Personal Challenge", "A Response", "Are They Playing Mother 3?", "Article Six", "article_man"], ["Description", "752", "814", "Sing it to the tune of \"Do They Know It's Christmastime?\"", "804", "Poem/Song"], ["Date", "7/31/06", "7/31/06", "1/10/06", "7/31/06", "1/7/05"], ["Rank", "0.00", "0.00", "0.00", "0.00", "0.00"]], "pageTitle": "STARMEN.NET - EarthBound / Mother 3 Goodness.", "title": "", "url": "http://starmen.net/vote/vote.php?id=14727", "hasHeader": true, "headerPosition": "FIRST_ROW", "tableType": "RELATION", "tableNum": 0, "s3Link": "common-crawl/crawl-data/CC-MAIN-2015-32/segments/1438042988061.16/warc/CC-MAIN-20150728002308-00261-ip-10-236-191-2.ec2.internal.warc.gz", "recordEndOffset": 224673903, "recordOffset": 224665701, "tableOrientation": "HORIZONTAL", "textBeforeTable": "> Y X W T S R P O N M L I H G F E D B A < Other Submissions by SimonBob So the moral of my article today is \"This is what happens when you go crazy trying to get the Star Pendant.\" Don't try this at home, I'm a professional. But what about repaying the loan? That million bucks didn't appear out of nowhere. It came from the Minches. And I'm sure that Lardna would want some of it back, and with interest too. Paying back a loan that size would be like paying off a mortgage. You'd have to calculate interest and make monthly installments. If Aloysius weren't sucking back iced frappachinos in Fourside, he could be quite a rich man - even richer", "textAfterTable": "1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Junk: Weird Junk: Earthbound Thing of the Day. Awesome Fan Thing of the Day. Activities:", "hasKeyColumn": true, "keyColumnIndex": 1, "headerRowIndex": 0}SkyEditor.RomEditor.Rtdx.ConsoleApp/Mods/RestoreRegularAttack/mod.json10-100 { "Id": "SkyEditor.RestoreRegularAttack", "Version": "1.0.0", "Target": "RTDX", "Name": "Restore Regular Attack", "Description": "This mod restores the regular attack from previous PMD games. You can use it by pressing ZL+ZR.", "Scripts": [ "RegularAttack.csx" ] } data/provinces/11/regencies/02/districts/16/villages.json [{"id":"2001","provinceId":"11","regencyId":"02","districtId":"16","name":"Bintang Alga Musara"},{"id":"2002","provinceId":"11","regencyId":"02","districtId":"16","name":"Bintang Bukit Indah"},{"id":"2003","provinceId":"11","regencyId":"02","districtId":"16","name":"Tanjung Sari"},{"id":"2004","provinceId":"11","regencyId":"02","districtId":"16","name":"Bun-bun Alas"},{"id":"2005","provinceId":"11","regencyId":"02","districtId":"16","name":"Naga Timbul"},{"id":"2006","provinceId":"11","regencyId":"02","districtId":"16","name":"Gunung Pak-Pak"},{"id":"2007","provinceId":"11","regencyId":"02","districtId":"16","name":"Bukit Meriah"},{"id":"2008","provinceId":"11","regencyId":"02","districtId":"16","name":"Permata Musara"},{"id":"2009","provinceId":"11","regencyId":"02","districtId":"16","name":"Laut Tawar"},{"id":"2010","provinceId":"11","regencyId":"02","districtId":"16","name":"Kane Mende"},{"id":"2011","provinceId":"11","regencyId":"02","districtId":"16","name":"Lawe Sekhakut"},{"id":"2012","provinceId":"11","regencyId":"02","districtId":"16","name":"Bunbun Indah"},{"id":"2013","provinceId":"11","regencyId":"02","districtId":"16","name":"Suka Damai"},{"id":"2014","provinceId":"11","regencyId":"02","districtId":"16","name":"Tuah Kekhine"},{"id":"2015","provinceId":"11","regencyId":"02","districtId":"16","name":""},{"id":"2016","provinceId":"11","regencyId":"02","districtId":"16","name":"Sade Ate"},{"id":"2017","provinceId":"11","regencyId":"02","districtId":"16","name":"Akhih Majile"},{"id":"2018","provinceId":"11","regencyId":"02","districtId":"16","name":"Ukhat Peseluk"},{"id":"2019","provinceId":"11","regencyId":"02","districtId":"16","name":"Tunas Mude"},{"id":"2020","provinceId":"11","regencyId":"02","districtId":"16","name":""},{"id":"2021","provinceId":"11","regencyId":"02","districtId":"16","name":"Kute Hakhapen"},{"id":"2022","provinceId":"11","regencyId":"02","districtId":"16","name":"Kompas"},{"id":"2023","provinceId":"11","regencyId":"02","districtId":"16","name":"Sepakat"}]{ "name": "node-typescript-boilerplate", "version": "0.0.0", "description": "Minimalistic boilerplate to quick-start Node.js development in TypeScript.", "main": "./build/src/main.js", "engines": { "node": ">= 10 <13" }, "config": { "function_name": "starlingToYnab", "region": "us-central1", "gcp_project": "starling-to-ynab", "runtime": "nodejs10", "topic": "starling-to-ynab-default" }, "devDependencies": { "@types/jest": "~24.0.25", "@types/luxon": "^1.21.0", "@types/node": "~12.12.22", "@typescript-eslint/eslint-plugin": "~2.14.0", "@typescript-eslint/parser": "~2.14.0", "eslint": "~6.8.0", "eslint-config-prettier": "~6.9.0", "eslint-plugin-jest": "~23.2.0", "jest": "~24.9.0", "prettier": "~1.19.1", "rimraf": "~3.0.0", "ts-jest": "~24.2.0", "tsutils": "~3.17.0", "typescript": "~3.7.4" }, "scripts": { "start": "node -r dotenv-yaml/config ./build/src/run", "clean": "rimraf coverage build tmp", "deploy": "gcloud functions deploy $npm_package_config_function_name --env-vars-file .env.yml --runtime $npm_package_config_runtime --region=$npm_package_config_region --trigger-topic $npm_package_config_topic --project $npm_package_config_gcp_project", "build": "tsc -p tsconfig.release.json", "build:watch": "tsc -w -p tsconfig.release.json", "lint": "eslint . --ext .ts,.tsx", "init:env": "cp env-example.yml .env.yml", "test": "jest --coverage", "test:watch": "jest --watch" }, "author": " <>", "license": "Apache-2.0", "dependencies": { "bignumber.js": "^9.0.0", "dotenv": "^8.2.0", "dotenv-yaml": "^0.1.4", "luxon": "^1.21.3", "starling-developer-sdk": "^1.0.0", "tslib": "~1.10.0", "ynab": "^1.17.0" } } { "title": "Earthrise 1: Historic Image Remastered", "credit": "NASA, Apollo 8 Crew, ; Processing and License: ", "explanation": "\"Oh my God! Look at that picture over there! Here's the Earth coming up. Wow is that pretty!\" Soon after that pronouncement, 50 years ago today, one of the most famous images ever taken was snapped from the orbit of the Moon. Now known as \"Earthrise\", the iconic image shows the Earth rising above the limb of the Moon, as taken by the crew of Apollo 8. But the well-known Earthrise image was actually the second image taken of the Earth rising above the lunar limb -- it was just the first in color. With modern digital technology, however, the real first Earthrise image -- originally in black and white -- has now been remastered to have the combined resolution and color of the first three images. Behold! The featured image is a close-up of the picture that Apollo 8 astronaut was talking about. Thanks to modern technology and human ingenuity, now we can all see it. (Historical note: A different historic black & white image of the Earth setting behind the lunar limb was taken by the robotic Lunar Orbiter 1 two years earlier.)", "date": "2018-12-24", "hdurl": "https://apod.nasa.gov/apod/image/1812/Earthrise1_Apollo8AndersWeigang_2048.jpg", "service_version": "v1", "media_type": "image", "url": "https://apod.nasa.gov/apod/image/1812/Earthrise1_Apollo8AndersWeigang_960.jpg" }{ "0" : { "block_name" : "air", "transparent" : true }, "1" : { "block_name" : "stone", "transparent" : false, "top_texture" : {"texture_x" : 2,"texture_y" : 0}, "bottom_texture" : {"texture_x" : 2,"texture_y" : 0}, "left_texture" : {"texture_x" : 2,"texture_y" : 0}, "right_texture" : {"texture_x" : 2,"texture_y" : 0}, "forward_texture" : {"texture_x" : 2,"texture_y" : 0}, "backward_texture" : {"texture_x" : 2,"texture_y" : 0} }, "2" : { "block_name" : "dirt", "transparent" : false, "top_texture" : {"texture_x" : 0,"texture_y" : 1}, "bottom_texture" : {"texture_x" : 0,"texture_y" : 1}, "left_texture" : {"texture_x" : 0,"texture_y" : 1}, "right_texture" : {"texture_x" : 0,"texture_y" : 1}, "forward_texture" : {"texture_x" : 0,"texture_y" : 1}, "backward_texture" : {"texture_x" : 0,"texture_y" : 1} }, "3" : { "block_name" : "grass", "transparent" : false, "top_texture" : {"texture_x" : 0,"texture_y" : 0}, "bottom_texture" : {"texture_x" : 0,"texture_y" : 1}, "left_texture" : {"texture_x" : 1,"texture_y" : 1}, "right_texture" : {"texture_x" : 1,"texture_y" : 1}, "forward_texture" : {"texture_x" : 1,"texture_y" : 1}, "backward_texture" : {"texture_x" : 1,"texture_y" : 1} }, "4" : { "block_name" : "cobble", "transparent" : false, "top_texture" : {"texture_x" : 1,"texture_y" : 0}, "bottom_texture" : {"texture_x" : 1,"texture_y" : 0}, "left_texture" : {"texture_x" : 1,"texture_y" : 0}, "right_texture" : {"texture_x" : 1,"texture_y" : 0}, "forward_texture" : {"texture_x" : 1,"texture_y" : 0}, "backward_texture" : {"texture_x" : 1,"texture_y" : 0} }, "5" : { "block_name" : "log", "transparent" : false, "top_texture" : {"texture_x" : 3,"texture_y" : 1}, "bottom_texture" : {"texture_x" : 3,"texture_y" : 1}, "left_texture" : {"texture_x" : 3,"texture_y" : 0}, "right_texture" : {"texture_x" : 3,"texture_y" : 0}, "forward_texture" : {"texture_x" : 3,"texture_y" : 0}, "backward_texture" : {"texture_x" : 3,"texture_y" : 0} }, "6" : { "block_name" : "leaves", "transparent" : false, "top_texture" : {"texture_x" : 3,"texture_y" : 2}, "bottom_texture" : {"texture_x" : 3,"texture_y" : 2}, "left_texture" : {"texture_x" : 3,"texture_y" : 2}, "right_texture" : {"texture_x" : 3,"texture_y" : 2}, "forward_texture" : {"texture_x" : 3,"texture_y" : 2}, "backward_texture" : {"texture_x" : 3,"texture_y" : 2} }, "7" : { "block_name" : "bedrock", "transparent" : false, "top_texture" : {"texture_x" : 4,"texture_y" : 0}, "bottom_texture" : {"texture_x" : 4,"texture_y" : 0}, "left_texture" : {"texture_x" : 4,"texture_y" : 0}, "right_texture" : {"texture_x" : 4,"texture_y" : 0}, "forward_texture" : {"texture_x" : 4,"texture_y" : 0}, "backward_texture" : {"texture_x" : 4,"texture_y" : 0} } }10-100 ["angular-if-state","angular-middle-ellipses","angular-seconds-to-date","bmapflash","connman-config","crowsnest","drivelist","drivelist-scanner","electron-json-config","electron-json-storage","elevator","etcher-image-stream","etcher-image-write","etcher-latest-version","ghrequest","git-patch-additions","hidepath","hubspell-simplifier-markdown","hubspell-speller","inquirer-dynamic-list","nplugm","observable-json-storage","partitioninfo","queryl","removedrive","replacefile","resin-cli-auth","resin-cli-errors","resin-cli-events","resin-cli-form","resin-cli-visuals","resin-config-json","resin-device-config","resin-device-init","resin-device-logs","resin-device-operations","resin-device-path","resin-device-status","resin-discoverable-services","resin-errors","resin-image-fs","resin-image-manager","resin-image-write","resin-pine","resin-plugin-multiburn","resin-plugin-sync","resin-request","resin-sdk","resin-settings-client","resin-settings-storage","resin-sync","resin-token","resin-zip-image","rindle","slice-stream2","stuffwords","versionist","wary"]{ "parent": "block/template_fence_inventory", "textures": { "rail": "sap:block/mango_fence_rail", "top": "sap:block/mango_fence_top", "texture": "sap:block/mango_fence" } } luosichengx/myplace {"title": "BreakSense: Combining Physiological and Location Sensing to Promote Mobility during Work-Breaks.", "fields": ["activity recognition", "popularity", "software deployment", "smartwatch", "bluetooth"], "abstract": "Work breaks can play an important role in the mental and physical well-being of workers and contribute positively to productivity. In this paper we explore the use of activity-, physiological-, and indoor-location sensing to promote mobility during work-breaks. While the popularity of devices and applications to promote physical activity is growing, prior research highlights important constraints when designing for the workplace. With these constraints in mind, we developed BreakSense, a mobile application that uses a Bluetooth beacon infrastructure, a smartphone and a smartwatch to encourage mobility during breaks with a game-like design. We discuss constraints imposed by design for work and the workplace, and highlight challenges associated with the use of noisy sensors and methods to overcome them. We then describe a short deployment of BreakSense within our lab that examined bound vs. unbound augmented breaks and how they affect users' sense of completion and readiness to work.", "citation": "Citations (3)", "year": "2017", "departments": ["Northwestern University", "FXPAL, Palo Alto, CA, USA", "FXPAL, Palo Alto, CA, USA"], "conf": "chi", "authors": [".....http://dblp.org/pers/hd/c/Cambo:Scott_A=", ".....http://dblp.org/pers/hd/a/Avrahami:Daniel", ".....http://dblp.org/pers/hd/l/Lee:Matthew_L="], "pages": 13}bacora03/YDM-Data1-10 { "name": "", "number": "55553602", "is_illegal": false, "text": "Monsters you control gain 200 ATK for each monster you control with a different Type. Once per turn, if you control 4 \"Performapal\" monsters with different Types: You can Special Summon 1 \"Odd-Eyes\" monster from your hand, Deck, or Graveyard.", "type": "Spell", "is_monster": false, "is_spell": true, "is_trap": false, "property": "Field" }{ "@microsoft/generator-sharepoint": { "version": "1.4.1", "libraryName": "cpt-powerbi-webparts", "libraryId": "f3438921-2de8-4e44-a7dd-7dd333c27e1d", "environment": "spo" } }files/json/topic_20161122_0515_radiance-daysim_000137.json {"topic": "Nearest Neighbor mode with Daysim 3.0", "category": "radiance-daysim", "attachments": [], "created_by_name": "", "created_at": "November 22, 2016 at 05:15AM", "body": "Dear all\n\n\nI am working on different daylight simulations with Daysim 3.0. My interest\nis to compare the different methods for calculating contributions from\ndirect sunliight. I have seen that Daysim give three options to calculate\ndaylight coefficients as follows:\n1. Use original daylight coefficients file format\n2. Use DDS file format\n3. Use DDS file format with shadow testing\n\n\nMy query is\n- Which is the way to calculate the direct luminances with the assignment\nmode of 'Nearest Neighbor (NN)'?\nThanks in advance\n\n\n\n___\nAutomatically generated content from [radiance mailing-list](https://radiance-online.org/pipermail/radiance-daysim/2016-November/000137.html).", "id": "radiance-daysim_000137", "created_by": "Abigail_Chi"}app/content/lexicons/strongs/entries/G3917.json1-10 {"derivation": "feminine of (a panther);", "kjv_def": "leopard", "lemma": "\u03c0\u03ac\u03c1\u03b4\u03b1\u03bb\u03b9\u03c2", "frequency": 1, "strongs_def": " a leopard", "outline": "
  1. a pard, panther, leopard
  2. a very fierce Asiatic and African animal, having a tawny skin marked with large black spots
"}{ "directions": [ "With the ground beef, make 8 thin hamburger patties. Fry in a skillet for 3 to 4 minutes on both sides or until done. Set aside.", "In the same pan, heat the olive oil over medium heat. Saute the green peppers, onion, and mushrooms for 1 minute. Cover and let cook in the juices for 5 minutes.", "Place the hamburgers on the buns, top with vegetables and an ounce of low fat mozzarella cheese. Drizzle Italian dressing to taste." ], "ingredients": [ "1 pound lean ground beef", "1 teaspoon olive oil", "1 large green bell peppers, sliced", "1 small onion, sliced", "6 button mushrooms, sliced", "1 cup shredded low-fat mozzarella cheese", "8 hamburger buns", "fat free Italian-style dressing, to taste" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Eddie's Special Burgers", "url": "http://allrecipes.com/recipe/22905/eddies-special-burgers/" } jameslee97/saltysalty/data/MELTING_POINT/453.json {"solvent": null, "expmeth": "DSC/DTA", "footer": "", "title": "Phase transition properties: Normal melting temperature", "ref": {"full": ".; .; .; .; .; . (2016) Fluid Phase Equilib. 423, 190-202.", "title": "The effect of n vs. iso Isomerization on the thermophysical properties of aromatic and non-aromatic ionic liquids"}, "components": [{"formula": "C12H22F6N2O4S2", "name": "1-isobutyl-1-methylpiperidinium bis((trifluoromethyl)sulfonyl)imide", "sample": [["Source:", "commercial source"], ["Initial purification:", "NMR (proton)"], ["Initial purity:", "98 mol %"], ["Final purification:", "Karl Fischer titration;ion chromatography"], ["Final purity:", "0.0017 water mass %;0.01 halide impurity mass %"]], "idout": "ABfwWk", "mw": "436.43"}], "constr": ["Pressure of 1 atm"], "dhead": [["Normal melting temperature, K"]], "phases": ["Crystal", "Liquid"], "data": [[["287.9", "1.1"]]]}{ "version": "english_NIV", "book": "Daniel", "chapter": 11, "verse": 29, "word": "\u201cAt the appointed time he will invade the South again, but this time the outcome will be different from what it was before." }0 { "name": "node-typescript-jest-template", "version": "0.0.1", "description": "A node module template using typescript and jest", "keywords": [ "typescript", "template", "jest" ], "author": { "name": "" }, "license": "BSD-3-Clause", "repository": { "type": "git", "url": "https://github.com/ludorival/node-typescript-jest-template.git" }, "main": "dist/index.js", "types": "dist/index.d.ts", "files": [ "dist/" ], "scripts": { "build": "npm run clean && npm run lint && tsc", "changelog": "conventional-changelog -p angular -u", "changelog:update": "conventional-changelog -p angular -i CHANGELOG.md -s && git add CHANGELOG.md", "clean": "(rm -r ./.nyc_output || true) && (rm -r ./coverage || true) && (rm -r ./dist || true)", "clean:all": "npm run clean && (rm -r ./node_modules || true)", "cover": "jest --coverage", "coverall": "jest --coverage && coveralls < coverage/lcov.info", "lint": "eslint . --ext .js,.jsx,.ts,.tsx", "preversion": "npm run build && npm run cover:check", "postversion": "git push && git push --tags", "prepare": "npm run build", "test": "jest", "upgrade": "npx npm-check -u", "version": "npm run build && npm run changelog:update" }, "devDependencies": { "@types/jest": "27.4.0", "@types/node": "16.11.19", "@typescript-eslint/eslint-plugin": "4.33.0", "@typescript-eslint/parser": "4.33.0", "conventional-changelog-cli": "2.2.2", "coveralls": "3.1.1", "cz-conventional-changelog": "3.3.0", "eslint": "7.32.0", "jest": "27.4.7", "prettier": "2.5.1", "ts-jest": "27.1.2", "ts-node": "10.4.0", "eslint-plugin-prettier": "4.0.0", "typescript": "4.5.4" }, "config": { "commitizen": { "path": "./node_modules/cz-conventional-changelog" } } } ArasYagiz/Node-Js-Discord-Bot { "title": "Hubert the weaved", "name":"hubert", "description": "A half-giant, the last remaining of the giant race. His parents have been killed in the [giantslayer_walls] and has multiple hands of corpses weaved onto his body.", "combat": "He fights with objects around him that he picks up on the fly, such as boulders, posts, even houses. His intellect has a lot to be desired.", "allegiance" : "Sanguine Circle", "origin": "Giantslayer Walls", "charRelationships" : "-A near symbiotic relationship with [kitt]", "trivia" : "Inspired by berserker from fate/stay night", "type": "Characters", "lastSeen": "Giantslayer Walls", "aliases": ["half-giant"] }{"html_attributions": [], "result": {"rating": 4.3, "reviews": [{"author_name": "anjora", "author_url": "https://www.google.com/maps/contrib/108597090990909282232/reviews", "language": "en", "profile_photo_url": "https://lh5.ggpht.com/-n6ujLptXIzA/AAAAAAAAAAI/AAAAAAAAAAA/RpjVHvbO9Pk/s128-c0x00000000-cc-rp-mo/photo.jpg", "rating": 5, "relative_time_description": "2 months ago", "text": "I'm giving this station a 5 star review because we passed through it on our way to the Pembrokeshire Coastal Path, where the gentleman behind the ticket counter who we asked for some information wished us a lovely journey; and he remembered us when we returned a week later - three out-of-town, sun-burnt backpackers, and kindly kept our bags for us so we could run into town for one last fish-and-chips! Such a lovely man, he made this station a lovely station. Thank you!", "time": 1564929691}, {"author_name": "", "author_url": "https://www.google.com/maps/contrib/113723953179353300173/reviews", "language": "en", "profile_photo_url": "https://lh5.ggpht.com/-l7Pco3iDubk/AAAAAAAAAAI/AAAAAAAAAAA/P851eujxdeA/s128-c0x00000000-cc-rp-mo-ba4/photo.jpg", "rating": 4, "relative_time_description": "2 months ago", "text": "Okay station, lovely cafe!", "time": 1564411070}, {"author_name": "", "author_url": "https://www.google.com/maps/contrib/110655591643749734585/reviews", "language": "en", "profile_photo_url": "https://lh3.ggpht.com/-DmwUz5jer48/AAAAAAAAAAI/AAAAAAAAAAA/UyFoPgOO7yQ/s128-c0x00000000-cc-rp-mo-ba4/photo.jpg", "rating": 4, "relative_time_description": "a year ago", "text": "Fairly good facilities and nice cafe", "time": 1520096499}, {"author_name": "", "author_url": "https://www.google.com/maps/contrib/106427413994759975419/reviews", "language": "en", "profile_photo_url": "https://lh5.ggpht.com/-UqZqHu8huro/AAAAAAAAAAI/AAAAAAAAAAA/u7iaVSwh3f8/s128-c0x00000000-cc-rp-mo-ba6/photo.jpg", "rating": 2, "relative_time_description": "2 years ago", "text": "My review is not a reflection on the station or the staff. Indeed it is a better small station than most. Makes good use of the limited space for parking and turning etc. The review is based on the fact that Arriva TW in the infinite wisdom will only ever put a 1-2 carriage train and if you're starting at Milford, Haverfordwest is only 2 stops away, and on a game day or weekend, Haverfordwest is packed with people and fill up the carriages immediately. From this point on, it's a farce. No point. Port Talbot will have to get a taxi. Bridgend don't even bother. Swansea is like polyfilla finding all the gaps and Neath and Llanelli are standing room only. They need to sort it out. Coming back is even worse.", "time": 1494882807}, {"author_name": "tiktok person", "author_url": "https://www.google.com/maps/contrib/108699508364086931792/reviews", "profile_photo_url": "https://lh5.ggpht.com/-SpZ0xWk8h7g/AAAAAAAAAAI/AAAAAAAAAAA/nAg3d67Y6cA/s128-c0x00000000-cc-rp-mo-ba3/photo.jpg", "rating": 5, "relative_time_description": "a year ago", "text": "", "time": 1536337046}]}, "status": "OK"}{"name": "object_detection_projects", "description": "My public projects about object detection algorithms", "license": null, "starNum": 69, "folkNum": 44, "watchNum": 69, "topic": []}greenelab/nature_news_disparities version https://git-lfs.github.com/spec/v1 oid sha256:a01fa7711befc86765d42845b7bcbc366a239fbca5bb2ca8c1fbaa9af0449ea4 size 797101 JefferyLukas/SRIssimple-line-icons/2.2.2.json {"css/simple-line-icons.css":","css/simple-line-icons.min.css":"}jawee/twitch-recorderconfig-example.json { "client-id": "asdfkölk93242340fdsf", "client-secret": "", "streamers": "streamer1, streamer2", "webhook-id": "awdfjlejrwa", "webhook-token": "" } { "id": 6488, "title": [ "[Monastery, Gallery]" ], "description": [ "A magnificent modwir armoire and long tapestry dominate this room, which is lined with various pieces of artwork. Among other pieces of note are an exceptionally well-crafted shadowbox and an elegant oil painting. Off in a corner of the room stand a stone bust and a bronze statue, each on their own marble pedestal." ], "paths": [ "Obvious exits: east, south" ], "location": "the Lysierian Hills", "wayto": { "6487": "east", "6489": "south" }, "timeto": { "6487": 0.2, "6489": 0.2 }, "image": "wl-lysierian-1264234799.png", "image_coords": [ 281, 1005, 291, 1015 ], "tags": [ "spectral monk", "monastic lich" ] }{ "name": "apollo-server-v3", "version": "1.0.0", "main": "dist/bundle.js", "repository": "https://github.com/glauroqj/apollo-server-v3.git", "author": "", "license": "MIT", "private": false, "heroku-run-build-script": true, "scripts": { "heroku-prebuild": "yarn install --production=false", "build": "NODE_ENV=production rollup -c", "start": "node ./dist/bundle.js", "dev": "rollup -c -w", "----- DOCKER DEVELOPER -----": "", "dev-start": "docker-compose up -d --build && docker ps && yarn dev-logs", "dev-stop": "docker-compose stop && docker ps", "dev-restart": "yarn dev-stop && yarn dev-start", "dev-logs": "docker logs -f --tail 30 apollo_server_v3_local" }, "dependencies": { "apollo-datasource-rest": "^3.2.0", "apollo-server": "^3.3.0", "apollo-server-core": "^3.3.0", "apollo-server-express": "^3.3.0", "dotenv": "^10.0.0", "express": "^4.17.1", "graphql": "^15.6.1" }, "devDependencies": { "@babel/cli": "^7.15.7", "@babel/core": "^7.15.8", "@babel/plugin-proposal-optional-chaining": "^7.14.5", "@babel/plugin-transform-runtime": "^7.15.8", "@babel/preset-env": "^7.15.8", "@rollup/plugin-alias": "^3.1.5", "@rollup/plugin-babel": "^5.3.0", "@rollup/plugin-run": "^2.1.0", "rollup": "^2.58.0", "rollup-plugin-graphql-tag": "^0.1.0" } } {"body": "On Thu, Feb 27, 2014 at 3:47 PM, <\n___\nAutomatically generated content from [radiance mailing-list](https://radiance-online.org/pipermail/radiance-general/2014-March/010138.html).", "attachments": [], "created_by_name": "", "created_at": "March 03, 2014 at 10:44AM", "created_by": "Andy_McNeil", "parent_id": "radiance-general_010125", "id": "radiance-general_010138"}ItsSilvie/bot {"DOAp":{"name":"Dawn of Ashes Prelude","prefix":"DOAp","language":"EN"},"DEMO22":{"name":"LGS Demo 2022","prefix":"DEMO22","language":"EN"}}1-10 [{"namaKab":"PAMEKASAN","originalFilename":"foto.jpg","namaPartai":"Partai Keadilan Sejahtera","id":147987,"noUrut":1,"nama":"SYAIFUL, S.Pd.I.","stringJenisKelamin":"Laki-Laki"},{"namaKab":"PAMEKASAN","originalFilename":"foto.jpg","namaPartai":"Partai Keadilan Sejahtera","id":112846,"noUrut":2,"nama":"MUHAMMAD, S.Sos.","stringJenisKelamin":"Laki-Laki"},{"namaKab":"PAMEKASAN","originalFilename":"FOTOO.jpg","namaPartai":"Partai Keadilan Sejahtera","id":206302,"noUrut":3,"nama":", S.Ag., M.M.","stringJenisKelamin":"Perempuan"},{"namaKab":"PAMEKASAN","originalFilename":"FOTO.jpg","namaPartai":"Partai Keadilan Sejahtera","id":238255,"noUrut":4,"nama":".Ma.","stringJenisKelamin":"Laki-Laki"},{"namaKab":"PAMEKASAN","originalFilename":"FOTO.jpg","namaPartai":"Partai Keadilan Sejahtera","id":106489,"noUrut":5,"nama":", M.T.","stringJenisKelamin":"Laki-Laki"},{"namaKab":"PAMEKASAN","originalFilename":"FOTO.jpg","namaPartai":"Partai Keadilan Sejahtera","id":250110,"noUrut":6,"nama":", S.Pd.I.","stringJenisKelamin":"Perempuan"},{"namaKab":"PAMEKASAN","originalFilename":"foto.jpg","namaPartai":"Partai Keadilan Sejahtera","id":132977,"noUrut":7,"nama":"","stringJenisKelamin":"Perempuan"},{"namaKab":"PAMEKASAN","originalFilename":"FOTO.jpg","namaPartai":"Partai Keadilan Sejahtera","id":237054,"noUrut":8,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"PAMEKASAN","originalFilename":"foto 4 x 6.jpg","namaPartai":"Partai Keadilan Sejahtera","id":132375,"noUrut":9,"nama":".","stringJenisKelamin":"Laki-Laki"}]hashdata/553894a678cdd849844174ba82ec522c_1.json0 [{"title": "Diao cha zhi wai fa quan wei yuan hui bao gao shu = Report of the Commission on Extra-territoriality in China \u8abf\u67e5\u6cbb\u5916\u6cd5\u6b0a\u59d4\u54e1\u6703\u5831\u544a\u66f8 = Report of the Commission on Extra-territoriality in China ", "author": "Commission on Extraterritoriality in China.", "id": "002365062"}]{"data":{"token":[{"reveal_status":2,"timestamp":"2022-02-22T23:06:00+00:00","owner_id":"tz1Vw1tsHuRYaCuk341eUzPfK7oj6FFcs6c9","creator_id":"tz2Dbz9U2YPJNxTztnnDkpr6Yirevymgcs6x","metadata":{"description":"DOGAMI, Adopt Raise, Earn.","name":"DOGAMI #1456","display_uri":"https://nft-zzz.mypinata.cloud/ipfs/QmYvQ69PMLU8qsFusSeCdvGNgHm1LJfo5Zes4LXMRGhQW3","thumbnail_uri":"https://nft-zzz.mypinata.cloud/ipfs/QmTBgWiYPS9vfWoeWrbqhwoGaTqzdq664CFxdSyvp96N3c","artifact_uri":"https://nft-zzz.mypinata.cloud/ipfs/QmY5pNX1HgyAMtJDgocaALRzaPejmDTadjktkTQHoJ7HDU","decimals":0,"attributes":{"rarity_score":42,"ranking":5366,"rarity_tier":{"name":"Bronze","__typename":"rarity_tier"},"generation":"Alpha","gender":"Male","breed":{"name":"Rottweiler","__typename":"breed"},"fur_color":"Light black & tan #6","friendliness":6,"eyes_color":"Brown #3","intelligence":4,"strength":5,"obedience":6,"vitality":7,"secondary_personality":"Independent ","bonding_level":1,"primary_personality":"Naive","stats":{"bonding_level_top_pct":0,"breed_pct":9,"eyes_color_pct":8,"friendliness_top_pct":26,"fur_color_pct":0,"gender_pct":50,"generation_pct":477,"intelligence_top_pct":61,"obedience_top_pct":44,"rarity_tier_pct":58,"primary_personality_pct":5,"size_pct":16,"vitality_top_pct":25,"strength_top_pct":32,"secondary_personality_pct":2,"__typename":"attributes_stats"},"__typename":"attributes"},"is_boolean_amount":true,"__typename":"metadata"},"id":1456,"swaps":[],"__typename":"token"}]}}0 { "id": 3049, "api_model": "exhibitions", "api_link": "https://api.artic.edu/api/v1/exhibitions/3049", "title": "Evening Glow", "is_featured": false, "description": "Over the centuries, Japanese printmakers have expressed the many aspects of evening. From the soft light of dusk to the darkness of night, the sky\u2019s changing moods have been captured by Utagawa Hiroshige, Kawase Hasui, and others. On view are over 30 woodblock prints of nighttime images.", "short_description": "Over the centuries, Japanese printmakers have expressed the many aspects of evening. From the soft light of dusk to the darkness of night, the sky\u2019s changing moods have been captured by Utagawa Hiroshige, Kawase Hasui, and others. On view are over 30 woodblock prints of nighttime images.", "web_url": "https://nocache.www.artic.edu/exhibitions/3049/evening-glow", "image_url": null, "type": "AIC Only", "status": "Closed", "aic_start_at": "2007-09-22T00:00:00-05:00", "aic_end_at": "2007-12-09T00:00:00-06:00", "date_display": null, "department_display": "Asian Art", "gallery_id": 2147480090, "gallery_title": null, "artwork_ids": [], "artwork_titles": [], "artist_ids": [], "site_ids": [], "image_id": null, "alt_image_ids": [], "document_ids": [], "suggest_autocomplete_all": { "input": [ "Evening Glow" ], "contexts": { "groupings": [ "title" ] } }, "last_updated_source": "1976-09-02T11:20:00-05:00", "last_updated": "2021-01-13T23:27:25-06:00", "timestamp": "2021-01-14T17:13:52-06:00" }{ "files": { "main.css": "/goit-react-hw-04-hooks-images/static/css/main.1eadae70.chunk.css", "main.js": "/goit-react-hw-04-hooks-images/static/js/main.652b8b5a.chunk.js", "main.js.map": "/goit-react-hw-04-hooks-images/static/js/main.652b8b5a.chunk.js.map", "runtime-main.js": "/goit-react-hw-04-hooks-images/static/js/runtime-main.6443b71d.js", "runtime-main.js.map": "/goit-react-hw-04-hooks-images/static/js/runtime-main.6443b71d.js.map", "static/css/2.ab75b8d7.chunk.css": "/goit-react-hw-04-hooks-images/static/css/2.ab75b8d7.chunk.css", "static/js/2.1a7f76df.chunk.js": "/goit-react-hw-04-hooks-images/static/js/2.1a7f76df.chunk.js", "static/js/2.1a7f76df.chunk.js.map": "/goit-react-hw-04-hooks-images/static/js/2.1a7f76df.chunk.js.map", "index.html": "/goit-react-hw-04-hooks-images/index.html", "static/css/2.ab75b8d7.chunk.css.map": "/goit-react-hw-04-hooks-images/static/css/2.ab75b8d7.chunk.css.map", "static/css/main.1eadae70.chunk.css.map": "/goit-react-hw-04-hooks-images/static/css/main.1eadae70.chunk.css.map", "static/js/2.1a7f76df.chunk.js.LICENSE.txt": "/goit-react-hw-04-hooks-images/static/js/2.1a7f76df.chunk.js.LICENSE.txt" }, "entrypoints": [ "static/js/runtime-main.6443b71d.js", "static/css/2.ab75b8d7.chunk.css", "static/js/2.1a7f76df.chunk.js", "static/css/main.1eadae70.chunk.css", "static/js/main.652b8b5a.chunk.js" ] }{"packages":{"wpackagist-plugin\/contact-us":{"1.0":{"name":"wpackagist-plugin\/contact-us","version":"1.0","version_normalized":"1.0.0.0","uid":80280,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/contact-us.1.0.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/contact-us\/","reference":"tags\/1.0"},"homepage":"https:\/\/wordpress.org\/plugins\/contact-us\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.1":{"name":"wpackagist-plugin\/contact-us","version":"1.1","version_normalized":"1.1.0.0","uid":80281,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/contact-us.1.1.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/contact-us\/","reference":"tags\/1.1"},"homepage":"https:\/\/wordpress.org\/plugins\/contact-us\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.2":{"name":"wpackagist-plugin\/contact-us","version":"1.2","version_normalized":"1.2.0.0","uid":80282,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/contact-us.1.2.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/contact-us\/","reference":"tags\/1.2"},"homepage":"https:\/\/wordpress.org\/plugins\/contact-us\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.3":{"name":"wpackagist-plugin\/contact-us","version":"1.3","version_normalized":"1.3.0.0","uid":80283,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/contact-us.1.3.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/contact-us\/","reference":"tags\/1.3"},"homepage":"https:\/\/wordpress.org\/plugins\/contact-us\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.4":{"name":"wpackagist-plugin\/contact-us","version":"1.4","version_normalized":"1.4.0.0","uid":80284,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/contact-us.1.4.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/contact-us\/","reference":"tags\/1.4"},"homepage":"https:\/\/wordpress.org\/plugins\/contact-us\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.5":{"name":"wpackagist-plugin\/contact-us","version":"1.5","version_normalized":"1.5.0.0","uid":80285,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/contact-us.1.5.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/contact-us\/","reference":"tags\/1.5"},"homepage":"https:\/\/wordpress.org\/plugins\/contact-us\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.6":{"name":"wpackagist-plugin\/contact-us","version":"1.6","version_normalized":"1.6.0.0","uid":80286,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/contact-us.1.6.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/contact-us\/","reference":"tags\/1.6"},"homepage":"https:\/\/wordpress.org\/plugins\/contact-us\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"dev-trunk":{"name":"wpackagist-plugin\/contact-us","version":"dev-trunk","version_normalized":"9999999-dev","uid":80287,"time":"2011-08-21 02:10:38","dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/contact-us.zip?timestamp=1313892638"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/contact-us\/","reference":"trunk"},"homepage":"https:\/\/wordpress.org\/plugins\/contact-us\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"}}}}0 { "description": "Polymer-based web components for D2L Outcome Level of Achievements", "keywords": [ "D2L", "Outcomes" ], "repository": { "type": "git", "url": "https://github.com/Brightspace/d2l-outcomes-level-of-achievements.git" }, "homepage": "https://github.com/Brightspace/d2l-outcomes-level-of-achievements-ui", "name": "d2l-outcomes-level-of-achievements", "version": "3.0.3", "directories": { "test": "test" }, "scripts": { "lang:build": "lang-build -ec langtools/config.json", "lang:copy": "lang-copy -svc langtools/config.json", "lang:lint": "eslint build/lang", "serve": "polymer analyze > analysis.json && polymer serve", "test": "npm run test:lint && npm run test:wct", "test:lint": "npm run test:lint:wc && npm run test:lint:js", "test:lint:js": "eslint . --ext .js,.html test/**/*.js test/**/*.html demo/**/*.js demo/**/*.html", "test:lint:wc": "polymer lint", "test:local": "npm run test:lint && npm run test:wct:local", "test:wct": "polymer test --skip-plugin local", "test:wct:local": "cross-env LAUNCHPAD_BROWSERS=chrome polymer test --skip-plugin sauce" }, "author": "D2L Corporation", "license": "Apache-2.0", "devDependencies": { "@polymer/iron-component-page": "^4.0.0", "@polymer/iron-demo-helpers": "^3.0.0", "@polymer/promise-polyfill": "^3.0.0-pre.18", "@webcomponents/webcomponentsjs": "^2.2.1", "babel-eslint": "^10.0.1", "cross-env": "^5.2.0", "d2l-polymer-langtools": "^1.1.1", "eslint": "^4.19.1", "eslint-config-brightspace": "^0.4.0", "eslint-plugin-html": "^4.0.5", "polymer-cli": "^1.9.4", "wct-browser-legacy": "^1.0.1", "whatwg-fetch": "^2.0.0" }, "dependencies": { "@brightspace-ui/core": "^1.86.0", "@polymer/polymer": "^3.0.0", "d2l-hypermedia-constants": "^6", "d2l-localize-behavior": "BrightspaceUI/localize-behavior#semver:^2", "d2l-polymer-behaviors": "Brightspace/d2l-polymer-behaviors-ui#semver:^2", "d2l-polymer-siren-behaviors": "Brightspace/polymer-siren-behaviors#semver:^1", "siren-sdk": "BrightspaceHypermediaComponents/siren-sdk#semver:^1" }, "resolutions": { "inherits": "2.0.3", "samsam": "1.1.3", "supports-color": "3.1.2", "type-detect": "1.0.0" }, "main": "outcomes-level-of-achievement.js" } 0 { "name": "refla-todo", "version": "0.0.1", "private": true, "description": "Description TBD", "main": "", "directories": { "test": "tests" }, "scripts": { "watch": "watchify refla/main.js --debug -o refla/static/js/bundle.js -v", "build": "browserify --debug refla/main.js | exorcist refla/static/js/bundle.js.map | uglifyjs > refla/static/js/bundle.js", "test": "py.test tests" }, "repository": { "type": "git", "url": "https://github.com/dewe/refla-todo.git" }, "author": "", "license": "MIT", "bugs": { "url": "https://github.com/dewe/refla-todo/issues" }, "homepage": "https://github.com/dewe/refla-todo", "dependencies": { "browser-request": "^0.3.3", "react": "^0.13.1" }, "devDependencies": { "browserify": "^9.0.4", "exorcist": "^0.1.6", "reactify": "^1.1.0", "uglify-js": "^2.4.19", "watchify": "^3.1.0" }, "browserify": { "transform": [ "reactify" ] } } {"totalHistoryAmount":10714,"TECNOLOGIA-occurances":3821,"TECNOLOGIA-percentage":35.6636176964719,"SEXO-occurances":2697,"SEXO-percentage":25.1726712712339,"FACEBOOK-occurances":1417,"FACEBOOK-percentage":13.225686018293821,"ESPORTE-occurances":555,"ESPORTE-percentage":5.180138137016987,"CARROS-occurances":160,"CARROS-percentage":1.4933731566175097,"YOUTUBE-occurances":96,"YOUTUBE-percentage":0.8960238939705059,"INSTAGRAM-occurances":83,"INSTAGRAM-percentage":0.7746873249953332,"SÉRIES E FILMES-occurances":76,"SÉRIES E FILMES-percentage":0.7093522493933172,"NOTÍCIAS-occurances":44,"NOTÍCIAS-percentage":0.4106776180698152,"ESOTERISMO-occurances":28,"ESOTERISMO-percentage":0.2613403024080642,"MODA-occurances":24,"MODA-percentage":0.22400597349262646,"ALIMENTAÇÃO E SAÚDE-occurances":22,"ALIMENTAÇÃO E SAÚDE-percentage":0.2053388090349076,"DECORAÇÃO-occurances":18,"DECORAÇÃO-percentage":0.16800448011946986,"TV E CELEBRIDADES-occurances":8,"TV E CELEBRIDADES-percentage":0.07466865783087549,"POP/ARTE-occurances":7,"POP/ARTE-percentage":0.06533507560201605,"CIÊNCIA-occurances":6,"CIÊNCIA-percentage":0.056001493373156616,"EDUCAÇÃO-occurances":6,"EDUCAÇÃO-percentage":0.056001493373156616,"JOGOS-occurances":6,"JOGOS-percentage":0.056001493373156616,"LINKEDIN-occurances":5,"LINKEDIN-percentage":0.04666791114429718,"VIAGENS-occurances":5,"VIAGENS-percentage":0.04666791114429718,"ECONOMIA-occurances":2,"ECONOMIA-percentage":0.018667164457718873,"FITNESS-occurances":2,"FITNESS-percentage":0.018667164457718873,"HUMOR-occurances":1,"HUMOR-percentage":0.009333582228859437,"POLÍTICA-occurances":1,"POLÍTICA-percentage":0.009333582228859437,"INTERNACIONAL-occurances":0,"INTERNACIONAL-percentage":0,"NATUREZA-occurances":0,"NATUREZA-percentage":0,"TWITTER-occurances":0,"TWITTER-percentage":0}codetheorem/platform-2020 { "SENTRY_DSN": "https://0e393a6ba49641debefd3774cb69ed2c@o414418.ingest.sentry.io/5303942", "SCHEDULE_TABLE": "platform-stage-schedule", "USER_EVENTS_TABLE": "platform-stage-user-events", "SHORTLINKS_TABLE": "platform-stage-shortlinks", "SHORTLINK_CLICKS_TABLE": "platform-stage-shortlink-clicks", "ZOOM_API_EMAIL_ACCOUNT": "", "ZOOM_LINK_TABLE": "platform-stage-meetings", "ZOOM_API_KEY_SECRET_NAME": "ZOOM_TESTING_API_KEY", "PLATFORM_BASE_URL": "http://platform-staging.gotechnica.org", "ZOOM_API_KEYS_TABLE": "platform-stage-zoom-api-keys" }{"resourceType":"DataElement","id":"MedicationAdministration.effectiveX","meta":{"lastUpdated":"2017-04-19T07:44:43.294+10:00"},"url":"http://hl7.org/fhir/DataElement/MedicationAdministration.effectiveX","status":"draft","experimental":true,"stringency":"fully-specified","element":[{"id":"MedicationAdministration.effective[x]","path":"MedicationAdministration.effective[x]","short":"Start and end time of administration","definition":"A specific date/time or interval of time during which the administration took place (or did not take place, when the 'notGiven' attribute is true). For many administrations, such as swallowing a tablet the use of dateTime is more appropriate.","min":1,"max":"1","type":[{"code":"dateTime"},{"code":"Period"}],"isSummary":true,"mapping":[{"identity":"workflow","map":"…occurrence[x]"},{"identity":"v2","map":"RXA-3-Date/Time Start of Administration / RXA-4-Date/Time End of Administration"},{"identity":"rim","map":".effectiveTime"},{"identity":"w5","map":"when.done"}]}]}{"type":"AdaptiveCard","version":"1.0","body":[{"type":"Container","items":[{"type":"ColumnSet","columns":[{"type":"Column","width":"stretch","items":[{"type":"TextBlock","size":"medium","weight":"lighter","color":"dark","text":"**No one needs to travel.**","wrap":true,"separator":true}],"style":"emphasis","separator":true}]}]},{"type":"ColumnSet","columns":[{"type":"Column","width":"stretch","items":[{"type":"TextBlock","size":"medium","weight":"lighter","color":"accent","text":"Jul 09, 2019 | 09:00 AM – 10:00 AM BST","wrap":true,"spacing":"none","separator":true,"separation":"none"}],"style":null},{"type":"Column","width":"auto","items":[{"type":"TextBlock","size":"small","weight":"lighter","color":"attention","text":"Alternate date/time","horizontalAlignment":"right","wrap":true,"separator":true}],"style":null}]},{"type":"Container","items":[{"type":"ColumnSet","columns":[{"type":"Column","width":"auto","verticalContentAlignment":"bottom","items":[{"type":"Image","url":"https://gtbotappstorage.blob.core.windows.net/gtbotlogo/videoconference.png","spacing":"none","separator":true,"separation":"none","height":"stretch"}],"spacing":"none","separation":"none","height":"stretch"},{"type":"Column","width":"stretch","verticalContentAlignment":"center","items":[{"type":"Container","items":[{"type":"ColumnSet","columns":[{"type":"Column","width":"stretch","verticalContentAlignment":"bottom","items":[{"type":"TextBlock","size":"medium","color":"dark","text":"**John** will join from _London MLP (ML8W.M07 (VC))_","wrap":true,"spacing":"small"}],"spacing":"small"}],"spacing":"large","separator":true,"separation":"strong"}]}],"spacing":"large","separator":true,"separation":"strong"}],"spacing":"large","separator":true,"separation":"strong"},{"type":"ColumnSet","columns":[{"type":"Column","width":"auto","verticalContentAlignment":"bottom","items":[{"type":"Image","url":"https://gtbotappstorage.blob.core.windows.net/gtbotlogo/telepresence.png","spacing":"none","separator":true,"separation":"none","height":"stretch"}],"spacing":"none","separation":"none","height":"stretch"},{"type":"Column","width":"stretch","verticalContentAlignment":"center","items":[{"type":"Container","items":[{"type":"ColumnSet","columns":[{"type":"Column","width":"stretch","verticalContentAlignment":"bottom","items":[{"type":"TextBlock","size":"medium","color":"dark","text":"**Arya** will join from _Paris Tour First (26.3 - Venet (Téléprésence))_","wrap":true,"spacing":"small"}],"spacing":"small"}],"spacing":"large","separator":true,"separation":"strong"}]}],"spacing":"large","separator":true,"separation":"strong"}],"spacing":"large","separator":true,"separation":"strong"},{"type":"ColumnSet","columns":[{"type":"Column","width":"auto","verticalContentAlignment":"bottom","items":[{"type":"Image","url":"https://gtbotappstorage.blob.core.windows.net/gtbotlogo/skype.png","spacing":"none","separator":true,"separation":"none","height":"stretch"}],"spacing":"none","separation":"none","height":"stretch"},{"type":"Column","width":"stretch","verticalContentAlignment":"center","items":[{"type":"Container","items":[{"type":"ColumnSet","columns":[{"type":"Column","width":"stretch","verticalContentAlignment":"bottom","items":[{"type":"TextBlock","size":"medium","color":"dark","text":"**Rachel** will join from _Leeds (4.5)_","wrap":true,"spacing":"small"}],"spacing":"small"}],"spacing":"large","separator":true,"separation":"strong"}]}],"spacing":"large","separator":true,"separation":"strong"}],"spacing":"large","separator":true,"separation":"strong"}]},{"type":"Container","items":[{"type":"ColumnSet","columns":[{"type":"Column","width":"stretch","items":[{"type":"TextBlock","size":"small","color":"dark","text":"Travel cost **$0**","horizontalAlignment":"center","wrap":true}],"style":"emphasis","separator":true},{"type":"Column","width":"stretch","verticalContentAlignment":"center","items":[{"type":"TextBlock","size":"small","color":"dark","text":"Carbon footprint **0.0 kg CO₂**","horizontalAlignment":"center","wrap":true}],"style":"emphasis"}]}]}],"actions":[{"type":"Action.Submit","data":{"msteams":{"type":"messageBack","displayText":"Book selected","text":"Book9"}},"title":"Book"},{"type":"Action.Submit","data":{"msteams":{"type":"messageBack","displayText":"Hold selected","text":"Hold9"}},"title":"Hold"}]} { "occulta_novellia_version": 1, "copyright": "Copyright Rektangular Studios Inc.; all rights reserved", "name": "", "card": { "number": 14, "release_set": "Presale 1", "rarity": "Rare" }, "progression": { "class": "Rotakin", "stage": 2 }, "stats": { "health": 6, "attack": 20, "move": 2 }, "attributes": [ "Creature", "Ghost", "Explode", "Conceal", "Emanation", "Fly" ], "description": "Ghostly spheres wander the Grevan forests, often finding their way into rarely visited basements or abandoned structures. Though punishable with death, renegade Grevan operators often delve into them, hoping to discover some new talent. They leave a residue as they float, which evokes visions in those who dare make contact." }33kk/uso-archive100-1000 { "id": 139187, "name": "BLOODCAT OSU!", "description": "Dark BLOODCAT OSU!", "user": { "id": 359118, "name": "Rinale", "email": "", "paypal_email": null, "homepage": null, "about": null, "license": "ccby" }, "updated": "2017-02-20T16:20:44.000Z", "weekly_install_count": 0, "total_install_count": 100, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/139187_after.jpeg?r=1609834101", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": null, "license": "ccby", "created": "2017-02-20T16:20:44.000Z", "category": "site", "raw_subcategory": "bloodcat", "subcategory": "bloodcat", "additional_info": null, "style_tags": [], "css": "@-moz-document domain(\"bloodcat.com\") {\r\n.sets {\r\n overflow: hidden;\r\n margin-bottom: 6px;\r\n color: #8c8c8c;\r\n}\r\nheader {\r\n position: fixed;\r\n margin-top: -40px;\r\n width: 100%;\r\n line-height: 40px;\r\n white-space: nowrap;\r\n background: #000;\r\n z-index: 2;\r\n}\r\n.set {\r\n background: #131313 no-repeat;\r\n position: relative;\r\n white-space: nowrap;\r\n overflow: hidden;\r\n}\r\n.mid .set {\r\n border: 4px solid #131313;\r\n}\r\n.set a {\r\n color: #ff8383;\r\n text-decoration: none;\r\n border-bottom: 1px dashed transparent;\r\n}\r\n.set button.mode {\r\n border: 0;\r\n padding: 0;\r\n margin: 0;\r\n position: absolute;\r\n height: 17px;\r\n background: rgba(255, 255, 255, 0);\r\n cursor: pointer;\r\n line-height: 1;\r\n}\r\nbody {\r\n margin: 0;\r\n overflow: hidden;\r\n overflow-y: scroll;\r\n background: #000000;\r\n}\r\nheader a, header input, header label, header button, header legend {\r\n color: #ffffff;\r\n}\r\n.mid .mode .diff {\r\n position: absolute;\r\n right: 0;\r\n background: rgba(255, 255, 255, 0);\r\n height: 20px;\r\n font-size: 11px;\r\n width: auto;\r\n padding: 0 4px;\r\n}\r\n.set .id {\r\n color: #fff;\r\n}\r\n.set ul.mode a {\r\n color: #ff837f;\r\n}\r\n.mid .mode li {\r\n position: relative;\r\n border-bottom: 1px solid rgba(238, 238, 238, 0);\r\n overflow: hidden;\r\n}\r\n.mid .set.r .details {\r\n border-color: rgba(255, 255, 255, 0);\r\n}\r\n.mid .set.r::after {\r\n border-top-color: #71ff00;\r\n}\r\n.mid .set::after {\r\n border: solid rgba(0, 0, 0, 0);\r\n border-width: 30px 16px 0 0;\r\n content: '';\r\n position: absolute;\r\n top: 0;\r\n left: 0;\r\n}\r\nheader .selected {\r\n background: #000;\r\n}\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/139187/bloodcat-osu.user.js", "style_settings": [] }{ "markdown": "{{slash}}, 3m Push {{!}} [Accuracy](/accuracy/) vs. [Fortitude](/fortitude/)
If successful: [Frightened](/statusEffectsDeadfireFrightened/) for 15.0 sec {{!}} [Accuracy](/accuracy/) vs. [Will](/will/)\n| label_1 = Penetration\n| data_1 = 7\n| rel_quests = \n| rel_abilities = \n| rel_items = Wicked Beast\n| rel_characters = \n| internalname = Cry_havoc_Ability\n| guid = 89abc2d8-b7d8-49aa-86d5-4e491b2bea10\n}}\n'''{{Pagename nd}}''' is an [ability](/pillarsOfEternityIiDeadfireAbilities/) in {{poe2}}.\n\n## Description\n\n{{Description|{{#var:description}}}}\n\n## Effects\n\n{{#var:effects_formatted}}\n\n## Items\n\n* [Wicked Beast](/wickedBeast/) ([Cry Havoc](/cryHavocEnchantment/)): Grants [Cry Havoc](/cryHavoc/)", "raw": "{{Infobox ability poe2\n| name = \n| icon = with_all_your_strength_icon.png\n| description = Strike out at all nearby enemies, dealing Pierce/Slash Damage and Frightening them.\n| added_in = poe2\n| class = \n| subclass = \n| race = \n| subrace = \n| activation = Active\n| activation_req = \n| combat_only = yes\n| ability_type = \n| ability_level = 1\n| ability_origin = Equipment\n| modal_group = \n| learn_type = \n| learn_level = \n| learn_level_mc = \n| learn_req = \n| upgrades_from = \n| upgrades_to = \n| keywords = Beasts\n| counters = \n| source = \n| source_cost = \n| uses = 1\n| restoration = Rest\n| cast_time = 0.5\n| recovery_time = 3.0\n| range = \n| area_of_effect = 2.5 Radius from Caster\n| duration = \n| linger = \n| noise_use = Quiet\n| noise_impact = Quiet\n| target = Foe AoE\n| effects = 20-30 {{pierce}} {{slash}}, 3m Push {{!}} [[Accuracy]] vs. [[Fortitude]]
If successful: [[Status effects (Deadfire)#Frightened|Frightened]] for 15.0 sec {{!}} [[Accuracy]] vs. [[Will]]\n| label_1 = Penetration\n| data_1 = 7\n| rel_quests = \n| rel_abilities = \n| rel_items = Wicked Beast\n| rel_characters = \n| internalname = Cry_havoc_Ability\n| guid = 89abc2d8-b7d8-49aa-86d5-4e491b2bea10\n}}\n'''{{Pagename nd}}''' is an [[Pillars of Eternity II: Deadfire abilities|ability]] in {{poe2}}.\n\n==Description==\n{{Description|{{#var:description}}}}\n\n==Effects==\n{{#var:effects_formatted}}\n\n==Items==\n* [[Wicked Beast]] ([[Cry Havoc (enchantment)|Cry Havoc]]): Grants [[Cry Havoc]]", "slug": "cryHavoc", "title": "Cry Havoc" } 1-10 {"title": {"cop": "Ϣⲁϣϥ ⲛ̀ⲁⲣⲭⲏ ⲁⲅⲅⲉⲗⲟⲥ", "eng": "Doxology for All the Heavenly Beings", "ara": " ذوكصولوجية للسمائيين "}, "items": [{"user": {"cop": "Ⲡⲓⲗⲁⲟⲥ", "eng": "People", "ara": "الشعب "}, "text": {"cop": ["Ϣⲁϣϥ ⲛ̀ⲁ̀ⲣⲭⲏⲁ̀ⲅⲅⲉⲗⲟⲥ: ⲥⲉⲟ̀ϩⲓ ⲉ̀ⲣⲁⲧⲟⲩ ⲉ̀ⲩⲉ̀ⲣϩⲩⲙⲛⲟⲥ: ⲙ̀ⲡⲉⲙ̀ⲑⲟ ⲙ̀ⲡⲓⲠⲁⲛⲧⲟⲕⲣⲁⲧⲱⲣ: ⲉ̀ⲩϣⲉⲙϣⲓ ⲙ̀ⲙⲩⲥⲧⲏⲣⲓⲟⲛ ⲉ̀ⲧϩⲏⲡ.", " Ⲙⲓⲭⲁⲏⲗ ⲡⲉ ⲡⲓϩⲟⲩⲓⲧ: Ⲅⲁⲃⲣⲓⲏⲗ ⲡⲉ ⲡⲓⲙⲁϩ ⲥ̀ⲛⲁⲩ: Ⲣⲁⲫⲁⲏⲗ ⲡⲉ ⲡⲓⲙⲁϩ ϣⲟⲙⲧ: ⲕⲁⲧⲁ ⲡ̀ⲧⲩⲡⲟⲥ ⲛ̀ϯⲦ̀ⲣⲓⲁⲥ.", "Ⲥⲟⲩⲣⲓⲏⲗ Ⲥⲉⲇⲁⲕⲓⲏⲗ: Ⲥⲁⲣⲁⲑⲓⲏⲗ ⲛⲉⲙ Ⲁⲛⲁⲛⲓⲏⲗ: ⲛⲁⲓⲛⲓϣϯ ⲛ̀ⲣⲉϥⲉ̀ⲣⲟⲩⲱⲓⲛⲓ ⲉ̅ⲑ̅ⲩ̅: ⲛⲏⲉ̀ⲧⲱⲃϩ ⲙ̀ⲙⲟϥ ⲉ̀ϩ̀ⲣⲏⲓ ⲉ̀ϫⲉⲛ ⲡⲓⲥⲱⲛⲧ.", "ⲚⲓⲬⲉⲣⲟⲃⲓⲙ ⲛⲉⲙ ⲛⲓⲤⲉⲣⲁⲫⲓⲙ: ⲛⲓⲑ̀ⲣⲟⲛⲟⲥ ⲛⲓⲙⲉⲧⲟ̅ⲥ̅ ⲛⲓϫⲟⲙ: ⲡⲓϥ̀ⲧⲟⲟⲩ ⲛ̀ⲍⲱⲟⲛ ⲛ̀ⲁ̀ⲥⲱⲙⲁⲧⲟⲥ: ⲉⲧϥⲁⲓ ϧⲁ ⲡⲓϩⲁⲣⲙⲁ ⲛ̀Ⲑⲉⲟⲥ.", "Ⲡⲓϫⲟⲩⲧ ϥ̀ⲧⲟⲟⲩ ⲙ̀ⲡ̀ⲣⲉⲥⲃⲩⲧⲉⲣⲟⲥ: ϧⲉⲛ ϯⲉ̀ⲕⲕⲗⲏⲥⲓⲁ ⲛ̀ⲧⲉ ⲛⲓϣⲟⲣⲡ ⲙ̀ⲙⲓⲥⲓ: ⲉ̀ⲩϩⲱⲥ ⲉ̀ⲣⲟϥ ϧⲉⲛ ⲟⲩⲙⲉⲧⲁ̀ⲧⲙⲟⲩⲛⲕ: ⲉⲩⲱϣ ⲉ̀ⲃⲟⲗ ⲉⲩϫⲱ ⲙ̀ⲙⲟⲥ.", "Ϫⲉ ⲁ̀ⲅⲓⲟⲥ ⲟ̀ Ⲑⲉⲟⲥ: ⲛⲏⲉ̀ⲧϣⲱⲛⲓ ⲙⲁⲧⲁⲗϭ̀ⲱⲟⲩ: ⲁ̀ⲅⲓⲟⲥ Ⲓⲥⲭⲩⲣⲟⲥ: ⲛⲏⲉ̀ⲧⲁⲩⲉ̀ⲛⲕⲟⲧ Ⲡⲟ̅ⲥ̅ ⲙⲁⲙ̀ⲧⲟⲛ ⲛⲱⲟⲩ.", "Ⲁⲅⲓⲟⲥ Ⲁ̀ⲑⲁⲛⲁⲧⲟⲥ: ⲥ̀ⲙⲟⲩ ⲉ̀ⲧⲉⲕⲕ̀ⲗⲏⲣⲟⲛⲟⲙⲓⲁ: ⲙⲁⲣⲉ ⲡⲉⲕⲛⲁⲓ ⲛⲉⲙ ⲧⲉⲕϩⲓⲣⲏⲛⲏ: ⲟⲓ ⲛ̀ⲥⲟⲃⲧ ⲙ̀ⲡⲉⲕⲗⲁⲟⲥ.", "Ϫⲉ ⲭ̀ⲟⲩⲁⲃ ⲟⲩⲟϩ ⲭ̀ⲟⲩⲁⲃ: ⲭ̀ⲟⲩⲁⲃ Ⲡⲟ̅ⲥ̅ ⲥⲁⲃⲁⲱⲑ: ⲧ̀ⲫⲉ ⲛⲉⲙ ⲡ̀ⲕⲁϩⲓ ⲙⲉϩ ⲉ̀ⲃⲟⲗ: ϧⲉⲛ ⲡⲉⲕⲱ̀ⲟⲩ ⲛⲉⲙ ⲡⲉⲕⲧⲁⲓⲟ.", "Ⲁⲩϣⲁⲛϫⲟⲥ ⲙ̀ⲡⲓ ⲁ̅ⲗ̅: ϣⲁⲣⲉ ⲛⲁⲛ ⲛⲓⲫⲏⲟⲩⲓ̀ ⲟⲩⲟϩ ⲙ̀ⲙⲱⲟⲩ: ϫⲉ ⲁ̀ⲅⲓⲟⲥ ⲁ̀ⲙⲏⲛ ⲁ̅ⲗ̅: ⲡⲓⲱ̀ⲟⲩ ⲫⲁ ⲡⲉⲛⲚⲟⲩϯ ⲡⲉ.", "Ⲁⲣⲓⲡ̀ⲣⲉⲥⲃⲉⲩⲓⲛ ⲉ̀ϩ̀ⲣⲏⲓ ⲉ̀ϫⲱⲛ: ⲛⲓⲥ̀ⲧⲣⲁⲧⲓⲁ ⲛ̀ⲁ̀ⲅⲅⲉⲗⲓⲕⲟⲛ: ⲛⲉⲙ ⲛⲓⲧⲁⲅⲙⲁ ⲛ̀ⲉ̀ⲡⲟⲩⲣⲁⲛⲓⲟⲛ: ⲛ̀ⲧⲉϥ ⲭⲁ ⲛⲉⲛⲛⲟⲃⲓ ⲛⲁⲛ ⲉⲃⲟⲗ."], "eng": ["Seven Archangels / praising as they stand / before the Pantocrator / serving the hidden Mystery", "Michael is the first / Gabriel is the second / Rafael is the third / a symbol of the Trinity. ", "Suriel, Sedakiel / Sarathiel, and Ananiel / the great and holy luminaries / entreating Him for the creation.", "The Cherubim and the Seraphim / the thrones, dominions and powers, / the four incorporeal creatures, / carrying the throne of God. ", "The twenty-four presbyters / in the church of the first-born / praising Him without ceasing / proclaming and saying,", "\"Holy God / heal the sick / Holy Mighty / repose those asleep in the Lord\"", "Holy Immortal / bless Your inheritance / may Your mercy and Your peace / be a fortress to Your people.", "Holy, Holy / Holy, O Lord of Hosts / heaven and earth are full of / Your glory and honor", "And when they say \"Alleluia\" / the heavenly respond saying, / \"Holy. Amen Alleluia. / Glory be to our God.\"", "Intercede on our behalf / O angelic armies / and heavenly orders / that He may forgive us our sins."], "ara": [" .سبعة رؤساء ملائكة وقوف يسبحون أمام الضابط الكل يخدمون السر الخفى", " .ميخائيل هو الأول. غبريال هو الثانى. رافائيل هو الثالث. كمثال الثالوث ", " .سوريال سداكيال سراتيال وآنانيال هؤلاء المنيرون العظماء الأطهار يطلبون منه عن الخليقة ", " .الشاروبيم والسارافيم الكراسي والأرباب والقوات الأربعة الحيوانات الغير المتجسدين الحاملون مركبة الله ", " .الأربعة وعشرين قسيساً في كنيسة الأبكار يسبحونه بلا فتور صارخين قائلين ", " .قدوس الله. المرضي إشفهم قدوس القوى. الراقدين يارب نيحهم ", " .قدوس الذى لا يموت بارك ميراثك. ولتكن رحمتك وسلامك حصناً لشعبك ", " .قدوس قدوس قدوس رب الصاباؤوت. السماء والأرض مملوءتان من مجدك وكرامتك ", " .إذا ما قالوا هلليلويا يتبعهم السمائييون قائلين قدوس أمين هلليلويا. المجد هو لإلهنا ", " .إشفعوا فينا أيها العساكر الملائكية والطغمات السمائية ليغفر لنا خطايانا "]}}]} examples/worldFlights/data/places/Tirgu_Mures^Romania.json [["596", "alitalia", "bucharest", "romania", "tirgu mures", "romania", "0"], ["596", "alitalia", "tirgu mures", "romania", "bucharest", "romania", "0"], ["3389", "mal\u00c3\u00a9v", "budapest", "hungary", "tirgu mures", "romania", "0"], ["3389", "mal\u00c3\u00a9v", "tirgu mures", "romania", "budapest", "hungary", "0"], ["5179", "tarom", "bucharest", "romania", "tirgu mures", "romania", "0"], ["5179", "tarom", "tirgu mures", "romania", "bucharest", "romania", "0"], ["5461", "wizz air", "budapest", "hungary", "tirgu mures", "romania", "0"], ["5461", "wizz air", "london", "united kingdom", "tirgu mures", "romania", "0"], ["5461", "wizz air", "tirgu mures", "romania", "budapest", "hungary", "0"], ["5461", "wizz air", "tirgu mures", "romania", "london", "united kingdom", "0"]]1-10 { "appID": "com.miniapp.helloworld", "appName": "Helloworld", "versionName": "1.0.0", "versionCode": 1, "minPlatformVersion": 100, "pages": [ "pages/index/index" ], "window": { "fullScreen":true, "backgroundColor": "#ffffff" } } 1-10 { "publisher_name": "Society of Motion and Television Engineers", "journals": 1, "scihub": 322, "crossref_open_access": 0, "crossref_active": 2680, "crossref": 2680, "coverage": 0.12014925, "publisher_slug": "society-of-motion-and-television-engineers", "access_logs": { "downloads": 0.03155, "visitors": 0.02839, "countries": 0.02524, "days": 0.02839, "months": 0.01577, "n_articles_requested": 3, "n_articles": 317 } }{ "name": "ractive", "description": "Next-generation DOM manipulation", "version": "0.3.3", "homepage": "http://rich-harris.github.com/Ractive/", "main": "build/Ractive.js", "keywords": [ "template", "templating", "data binding", "binding", "declarative", "view model" ], "author": { "name": "" }, "licenses": [ { "type": "MIT" } ], "jam": { "main": "Ractive.js", "include": [ "Ractive.js", "build/Ractive.js", "build/Ractive.runtime.js", "build/Ractive.min.js", "build/Ractive.runtime.min.js", "build/Ractive-legacy.js", "build/Ractive-legacy.runtime.js", "build/Ractive-legacy.min.js", "build/Ractive-legacy.runtime.min.js", "README.md" ] }, "categories": [ "DOM", "Templating" ], "bugs": { "web": "https://github.com/Rich-Harris/Ractive/issues" }, "contributors": [ { "name": "", "web": "https://github.com/maxogden" }, { "name": "", "web": "https://github.com/ryanramage" }, { "name": "", "web": "https://github.com/1N50MN14" } ], "repositories": [ { "type": "git", "url": "https://github.com/Rich-Harris/Ractive.git" } ], "github": "https://github.com/Rich-Harris/Ractive", "devDependencies": { "grunt": "~0.4.0", "grunt-contrib-jshint": "~0.2.0", "grunt-contrib-clean": "~0.4.0", "grunt-contrib-qunit": "~0.2.0", "grunt-contrib-concat": "~0.1.3", "grunt-contrib-uglify": "~0.2.2", "grunt-contrib-copy": "~0.4.0", "grunt-contrib-watch": "~0.4.3" } } {"localities": ["Belle Point, LA", "Lions, LA", "Reserve, LA"], "state": "LA", "postal_code": "70084", "locality": "Reserve, LA", "lat": 30.075645, "region": {"fips": "22", "abbr": "LA", "name": "Louisiana"}, "city": "Reserve", "type": "STANDARD", "lng": -90.567481, "counties": [{"fips": "095", "name": "St. John the Baptist Parish"}]}data/uso-styles/12222.json { "id": 12222, "name": "Boortz.Com Right Column Omitted", "description": "The new format on Boortz.com makes the Nuze harder to read, so I modified it a bit. This will get rid of the right column all together.", "user": { "id": 16894, "name": "", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": null }, "updated": "2008-11-19T14:03:57.000Z", "weekly_install_count": 0, "total_install_count": 131, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/12222_after.png?r=1475410022", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": null, "license": null, "created": "2008-11-19T14:03:57.000Z", "category": "site", "raw_subcategory": "boortz", "subcategory": "boortz", "additional_info": null, "style_tags": [], "css": "@namespace url(http://www.w3.org/1999/xhtml);\r\n\r\n@-moz-document domain(\"boortz.com\") {\r\n #rightColumnCell { display:none !important;}\r\n\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/12222/boortz-com-right-column-omitted.user.js", "style_settings": [] }{ "theme-data": { "name": "Andy", "description": "Simpel en elegant thema voor websites of blogs. Navigatiebalk met social media en statische pagina's." } }{ "WorkItem": { "AffectedComponent": { "Name": "", "DisplayName": "" }, "ClosedComment": "", "ClosedDate": null, "CommentCount": 0, "Custom": null, "Description": "As I worked through the tutorial and tested each stage, I noticed that the web page didn't always display properly.\r\nThen I realized that the browser was probably showing an old, cached, page. Refreshing always solved the problem.\r\n\r\nPerhaps the examples should always generate the appropriate no-caching code in their headers. ", "LastUpdatedDate": "2011-04-04T12:24:46.773-07:00", "PlannedForRelease": "", "ReleaseVisibleToPublic": false, "Priority": { "Name": "Low", "Severity": 50, "Id": 1 }, "ProjectName": "mvcmusicstore", "ReportedDate": "2011-04-04T12:24:46.773-07:00", "Status": { "Name": "Proposed", "Id": 1 }, "ReasonClosed": { "Name": "Unassigned" }, "Summary": "Tutorial Suggestion", "Type": { "Name": "Issue", "Id": 3 }, "VoteCount": 1, "Id": 6635 }, "FileAttachments": [], "Comments": [] }cleiver/codeandtalk.comdata/people/steven-holden.json { "country": "London UK", "github": "holdenweb", "home": "http://holdenweb.com/", "name": "", "twitter": "holdenweb" }{ "text": "The Democrats have never responded to this statement by Reagan. \"In this present crisis, government is not the solution to our problem, government is the problem.\" Since then the Repubs have been running the table. Reagan may have been sincere, but his successors have been using this as an excuse to loot the treasury.", "created": "Thu, 29 Nov 2018 18:46:30 GMT", "type": "outline", "urlvideo": "https://www.youtube.com/watch?v=6ixNPplo-SU", "image": "http://scripting.com/images/2018/11/29/reagan.png" }0 { "quarry": { "index": 1, "isReplica": false, "mergePool": "8ZnBevopNzuptw3CWyxxwZPqbu2tFdZfdN6zbRhzCvdA", "primaryQuarries": [], "primaryToken": { "decimals": 6, "mint": "" }, "primaryTokenInfo": { "address": "", "chainId": 103, "decimals": 6, "name": "Token 4vNP", "symbol": "4vNPx" }, "quarry": "", "replicaMint": "", "replicaQuarries": [], "slug": "4vnpx", "stakedToken": { "decimals": 6, "mint": "" } }, "rewarder": { "authority": "", "mintWrapper": "", "rewardsToken": { "decimals": 6, "mint": "" }, "rewardsTokenInfo": { "address": "", "chainId": 103, "decimals": 6, "name": "Token 9HmW", "symbol": "9HmWV" }, "slug": "6McsFv6jDYTuuGV72XKcRvd3yVG9S6FCGP4GgdRcqxfy" }, "rewardsToken": { "address": "", "chainId": 103, "decimals": 6, "name": "Token 9HmW", "symbol": "9HmWV" }, "stakedToken": { "address": "", "chainId": 103, "decimals": 6, "name": "Token 4vNP", "symbol": "4vNPx" }, "underlyingTokens": [] }{"category":"display","family":"Griffy","files":{"regular":"http://fonts.gstatic.com/s/griffy/v11/FwZa7-ox2FQh9kfwSNSEwM2zpA.ttf"},"lastModified":"2021-03-19","subsets":["latin","latin-ext"],"variants":["regular"],"version":"v11"} { "id": 28903, "citation_title": "Equilibrium Effects of Pay Transparency", "citation_author": [ "", "" ], "citation_publication_date": "2021-06-14", "issue_date": "2021-06-10", "revision_date": "2021-06-22", "topics": [ "Microeconomics", "Game Theory", "Economics of Information", "Labor Economics", "Labor Compensation", "Other", "Accounting, Marketing, and Personnel" ], "program": [ "Labor Studies" ], "projects": null, "working_groups": [ "Market Design", "Organizational Economics", "Personnel Economics" ], "abstract": "\n\nThe public discourse around pay transparency has focused on the direct effect: how workers seek to rectify newly-disclosed pay inequities through renegotiations. The question of how wage-setting and hiring practices of the firm respond in equilibrium has received less attention. To study these outcomes, we build a model of bargaining under incomplete information and test our predictions in the context of the U.S. private sector. Our model predicts that transparency reduces the individual bargaining power of workers, leading to lower average wages. A key insight is that employers credibly refuse to pay high wages to any one worker to avoid costly renegotiations with others under transparency. In situations where workers do not have individual bargaining power, such as under a collective bargaining agreement or in markets with posted wages, greater transparency has a muted impact on average wages. We test these predictions by evaluating the roll-out of U.S. state legislation protecting the right of workers to inquire about the salaries of their coworkers. Consistent with our prediction, the laws lead wages to decline by approximately 2% overall, but declines are progressively smaller in occupations with higher unionization rates. Our model provides a unified framework to analyze a wide range of transparency policies, and reconciles effects of transparency mandates documented in a variety of countries and contexts.\n\n", "acknowledgement": "\nWe are extremely grateful to Aviv Nevo and three anonymous referees. One referee in particular shifted the direction of this paper, and we are extremely appreciative of their contribution. We are very grateful for guidance from , , , , , , , , and . We also thank , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , and and seminar attendees at Stanford, UCLA Anderson, NYU Stern, HBS, Dartmouth, Brown, USC, UCSD, UCSB, Penn State, Michigan SI, AEA, SIEPR, EC\u201919, SOLE, and NBER SI (Digitization and Personnel) for helpful comments and suggestions. We are indebted to Chloe Lee, and especially - Elliott and for excellent research assistance. This research was supported by the Center for Comparative Studies in Race and Ethnicity and the B.F. Haley and E.S. Shaw Fellowship through SIEPR. The views expressed herein are those of the authors and do not necessarily reflect the views of the National Bureau of Economic Research.\n\n\n" }0 { "outputBankFolderPath": "D:/[DATA]/[RED-ASSETS]/Art-Private/red-triplane-assets/assets/com.red-triplane.assets.r3.local", "targetTank": "tank-0" }Author Data/cnr rao/Papers/paper 2622.json {"citations": null, "paper_link": "https://scholar.google.com/citations?view_op=view_citation&hl=en&user=Zs9227oAAAAJ&cstart=2559&pagesize=100&citation_for_view=Zs9227oAAAAJ:dYRx7efp7U0C", "authors": [""], "title": "Mixed Valence in Chemistry", "publication": "Theoretical and Experimental Aspects of Valence Fluctuations and Heavy ..., 1987"}{ "excerpt": "“Injustice anywhere is a threat to justice everywhere. We are caught in an inescapable network of mutuality, tied in a single garment of destiny. Whatever affects one directly, affects all indirectly.” \nThe post Blum Center for Developing Economies Statement of Solidarity for Racial, Social, and Economic Justice appeared first on Blum Center.", "feedKey": "blum", "templateKey": "rss-post", "author": "", "date": "2020-06-04", "url": "http://blumcenter.berkeley.edu/?p=15800", "curated": true, "title": "Blum Center for Developing Economies Statement of Solidarity for Racial, Social, and Economic Justice", "id": "", "image": "/img/blumcenter-socialjustice-news.jpeg" } { "kind": "Method", "name": "Range.createContextualFragment", "href": "https://developer.mozilla.org/en-US/docs/Web/API/Range/createContextualFragment", "description": "The Range.createContextualFragment() method returns a DocumentFragment by invoking the HTML fragment parsing algorithm or the XML fragment parsing algorithm with the start of the range (the parent of the selected node) as the context node. The HTML fragment parsing algorithm is used if the range belongs to a Document whose HTMLness bit is set. In the HTML case, if the context node would be html, for historical reasons the fragment parsing algorithm is invoked with body as the context instead." } { "Duration": 7921, "ReturnValue": null, "ResultText": "ERROR! Object: SqlSpClient.Execute | Message: Invalid object name 'notable'.", "DbRows": null }0 {"data":{"ID":"base2-6","Name":"Mr. Mime","Supertype":"Pokémon","Subtypes":["Basic"],"HP":"40","Types":["Psychic"],"EvolvesFrom":"Mime Jr.","Rules":null,"Attacks":[{"Name":"Meditate","Cost":["Psychic","Colorless"],"ConvertedEnergyCost":2,"Damage":"10+","Text":"Does 10 damage plus 10 more damage for each damage counter on the Defending Pokémon."}],"Weaknesses":[{"Type":"Psychic","Value":"×2"}],"RetreatCost":["Colorless"],"ConvertedRetreatCost":1,"Set":{"ID":"base2","Name":"Jungle","Series":"Base","PrintedTotal":64,"Total":64,"Legalities":{"unlimited":"Legal"},"PtcgoCode":"JU","ReleaseDate":"1999/06/16","UpdatedAt":"2020/08/14 09:35:00","Images":{"logo":"https://images.pokemontcg.io/base2/logo.png","symbol":"https://images.pokemontcg.io/base2/symbol.png"}},"Number":"6","Artist":"","Rarity":"Rare Holo","NationalPokedexNumbers":[122],"Legalities":{"unlimited":"Legal"},"RegulationMark":"","Images":{"large":"https://images.pokemontcg.io/base2/6_hires.png","small":"https://images.pokemontcg.io/base2/6.png"},"CardMarket":{"URL":"https://prices.pokemontcg.io/cardmarket/base2-6","UpdatedAt":"2022/03/14","Prices":{"AverageSellPrice":20.85,"LowPrice":0.95,"TrendPrice":38.8,"ReverseHoloTrend":11.05,"Avg1":8.5,"Avg7":55.34,"Avg30":21.66,"ReverseHoloAvg1":10,"ReverseHoloAvg7":11.78,"ReverseHoloAvg30":10.44}}}} {"email-settings":"Postavke emaila","address":"Email adresa","address-help":"Sljedeća email adresa je adresa koju će primatelj vidjeti u \"Od\" i \"Odgovori na\" poljima.","from":"Od imena","from-help":"Ime prikazano u dolaznom emailu.","smtp-transport":"SMTP Transport","smtp-transport.enabled":"Use an external email server to send emails","smtp-transport-help":"You can select from a list of well-known services or enter a custom one.","smtp-transport.service":"Select a service","smtp-transport.service-custom":"Custom Service","smtp-transport.service-help":"Select a service name above in order to use the known information about it. Alternatively, select 'Custom Service' and enter the details below.","smtp-transport.gmail-warning1":"There have been reports of the Gmail service not working on accounts with heightened security. In those scenarios, you will have to configure your GMail account to allow less secure apps.","smtp-transport.gmail-warning2":"For more information about this workaround, please consult this NodeMailer article on the issue. An alternative would be to utilise a third-party emailer plugin such as SendGrid, Mailgun, etc. Browse available plugins here.","smtp-transport.host":"SMTP Host","smtp-transport.port":"SMTP Port","smtp-transport.security":"Connection security","smtp-transport.security-encrypted":"Encrypted","smtp-transport.security-starttls":"StartTLS","smtp-transport.security-none":"None","smtp-transport.username":"Username","smtp-transport.username-help":"For the Gmail service, enter the full email address here, especially if you are using a Google Apps managed domain.","smtp-transport.password":"Password","template":"Uredi predložak emaila","template.select":"Odaberi predložak emaila","template.revert":"Povrati na original ","testing":"Testiranje emaila","testing.select":"Odaberi email predložak ","testing.send":"Pošalji testni email","testing.send-help":"Ovaj test mail će biti poslan svim trenutačno prijavljenim korisnicima na njihovu email adresu.","subscriptions":"Email pretplate","subscriptions.disable":"Onemogući obavijesti emailom za pretplatnika ","subscriptions.hour":"Pregled Sati.","subscriptions.hour-help":"Unesite broj koji pretstavlja vrijeme kada će se poslati pregled mailom (npr. 0 za ponoć, 17za 5 popodne).Imajte na umu da to vrijeme predstavlja vrijeme servera te ne mora predstavljati vrijeme na Vašem sistemu. Vrijeme servera je:
Sljedeći pregled će biti poslan ."}XPRIZE/glexp-usage-standardizationteam-CCI/tablet-usage-data/2019-03-01/14/REMOTE/NjExNjAwMjcwNi0xLS9hbmRyb2lkX2Fzc2V0L3d3dy9zY2hvb2wvSGlzYWJhdGkvSGF0dWElMjA0L0xlc3NvbiUyMDMvaW5kZXguaHRtbC1hbmFseXRpY3MtOTQ2OTI2ODY4MzAz.json1-10 {"946926868332":{"type":"bo","bookName":"Lesson%203","subjectName":"Hatua%204","fName":"users/MS0vYW5kcm9pZF9hc3NldC93d3cvc2Nob29sL0hpc2FiYXRpL0hhdHVhJTIwNC9MZXNzb24lMjAzL2luZGV4Lmh0bWwtYW5hbHl0aWNzLTk0NjkyNjg2ODMwMw==.json"},"946926868352":{"type":"bo","bookName":"Lesson%203","subjectName":"Hatua%204","fName":"users/MS0vYW5kcm9pZF9hc3NldC93d3cvc2Nob29sL0hpc2FiYXRpL0hhdHVhJTIwNC9MZXNzb24lMjAzL2luZGV4Lmh0bWwtYW5hbHl0aWNzLTk0NjkyNjg2ODMwMw==.json"},"946926868406":{"type":"xl"},"946926868628":{"type":"xp","pageCount":9},"946926869081":{"type":"fl"},"946926869114":{"type":"st","linkname":"Page"}}{ "type": "service_account", "project_id": "saf-3b-nbrzh6nu9c6vaamt4inu3gr", "private_key_id": "", "private_key": "-----", "client_email": "", "client_id": "112599532132541671135", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/mfc-gx4s0f6o66gfobb5s11mj4r9jl%40saf-3b-nbrzh6nu9c6va3gr.iam.gserviceaccount.com" } Cutting-and-Packing/2D/Datasets/NGCUTFS/json/445.json1-10 {"Name":"ngcutfs3_25","Objects":[{"Length":100,"Height":100,"Stock":null,"Cost":10000}],"Items":[{"Length":53,"Height":55,"Demand":0,"DemandMax":4,"Value":8745},{"Length":34,"Height":36,"Demand":0,"DemandMax":4,"Value":2448},{"Length":5,"Height":95,"Demand":0,"DemandMax":4,"Value":475},{"Length":93,"Height":83,"Demand":0,"DemandMax":4,"Value":23157},{"Length":27,"Height":19,"Demand":0,"DemandMax":4,"Value":513},{"Length":86,"Height":47,"Demand":0,"DemandMax":4,"Value":4042},{"Length":17,"Height":29,"Demand":0,"DemandMax":4,"Value":986},{"Length":25,"Height":10,"Demand":0,"DemandMax":4,"Value":750},{"Length":35,"Height":2,"Demand":0,"DemandMax":4,"Value":140},{"Length":82,"Height":93,"Demand":0,"DemandMax":4,"Value":15252},{"Length":41,"Height":16,"Demand":0,"DemandMax":4,"Value":1312},{"Length":24,"Height":4,"Demand":0,"DemandMax":4,"Value":96},{"Length":2,"Height":35,"Demand":0,"DemandMax":4,"Value":210},{"Length":91,"Height":32,"Demand":0,"DemandMax":4,"Value":8736},{"Length":45,"Height":6,"Demand":0,"DemandMax":4,"Value":540},{"Length":18,"Height":98,"Demand":0,"DemandMax":4,"Value":3528},{"Length":38,"Height":8,"Demand":0,"DemandMax":4,"Value":608},{"Length":44,"Height":3,"Demand":0,"DemandMax":4,"Value":132},{"Length":45,"Height":25,"Demand":0,"DemandMax":4,"Value":3375},{"Length":24,"Height":36,"Demand":0,"DemandMax":4,"Value":2592},{"Length":30,"Height":15,"Demand":0,"DemandMax":4,"Value":450},{"Length":27,"Height":26,"Demand":0,"DemandMax":4,"Value":702},{"Length":40,"Height":22,"Demand":0,"DemandMax":4,"Value":2640},{"Length":48,"Height":95,"Demand":0,"DemandMax":4,"Value":9120},{"Length":39,"Height":17,"Demand":0,"DemandMax":4,"Value":1326},{"Length":6,"Height":29,"Demand":0,"DemandMax":4,"Value":522},{"Length":54,"Height":51,"Demand":0,"DemandMax":4,"Value":8262},{"Length":38,"Height":12,"Demand":0,"DemandMax":4,"Value":912},{"Length":39,"Height":39,"Demand":0,"DemandMax":4,"Value":1521},{"Length":10,"Height":44,"Demand":0,"DemandMax":4,"Value":880},{"Length":36,"Height":8,"Demand":0,"DemandMax":4,"Value":288},{"Length":49,"Height":39,"Demand":0,"DemandMax":4,"Value":5733},{"Length":10,"Height":94,"Demand":0,"DemandMax":4,"Value":2820},{"Length":47,"Height":10,"Demand":0,"DemandMax":4,"Value":940},{"Length":15,"Height":88,"Demand":0,"DemandMax":4,"Value":3960},{"Length":20,"Height":10,"Demand":0,"DemandMax":4,"Value":200},{"Length":31,"Height":19,"Demand":0,"DemandMax":4,"Value":1767},{"Length":51,"Height":86,"Demand":0,"DemandMax":4,"Value":4386},{"Length":49,"Height":34,"Demand":0,"DemandMax":4,"Value":3332},{"Length":14,"Height":29,"Demand":0,"DemandMax":4,"Value":406}]}{"news_outlet": "taz", "provenance": "https://taz.de/Holocaust-Gedenken/!5660585/", "query_keywords": ["ausl\u00e4nder", "fl\u00fcchtl"], "creation_date": "28.01.2020", "last_modified": "28.01.2020", "crawl_date": "23.11.2020", "author_person": [""], "author_organization": [], "news_keywords": ["Auschwitz-Birkenau", "Holocaust", "Rechtsextremismus", "Europa", "Politik", "taz", "tageszeitung "], "content": {"title": "Holocaust-Gedenken: Es braucht mehr als Floskeln", "description": "Das Versprechen \u201eNie wieder\u201c ist 75 Jahre nach der Befreiung von Auschwitz br\u00fcchig geworden. Menschenrechte sind auch in Europa bedroht.", "body": {"": ["Urspr\u00fcnglich war das \u201eNie wieder\u201c eine Selbstverpflichtung: \u201eNie wieder werden wir gleichg\u00fcltig zusehen, wenn Minderheiten ausgegrenzt, beleidigt und ihrer Menschenrechte beraubt werden.\u201c Das \u201eNie wieder\u201c sollte weitere V\u00f6lkermorde nach dem Holocaust verhindern. Eine bessere Welt sollte entstehen: Die Vereinten Nationen (UNO) sollten den Weltfrieden bewahren, die entwickelten Demokratien wollten Fl\u00fcchtlinge nicht mehr vor ihren Grenzen abweisen, so wie sie es vor dem Krieg gegen\u00fcber den europ\u00e4ischen Juden getan hatten.", "Doch die Bilanz des \u201eNie wieder\u201c f\u00e4llt nach 75 Jahren niederschmetternd aus. V\u00f6lkermorde gab und gibt es auch nach Auschwitz. Trotz des Versprechens schauen die meisten Menschen weg, wollen das Leid der Verfolgten nicht sehen. Egal ob es Syrer sind, Kongolesen, Rohingya in Myanmar oder Uiguren in der Volksrepublik China. Schlimmer noch: Fast \u00fcberall ziehen Politiker neue Mauern und Z\u00e4une hoch, um die Fl\u00fcchtlinge von den eigenen Grenzen fernzuhalten. \u201eWir k\u00f6nnen schlie\u00dflich nicht alle retten\u201c, hei\u00dft es dann.", "Die Vereinten Nationen waren unf\u00e4hig, ein weltumspannendes System der Friedenssicherung auszubilden und den Schutz der Menschenrechte \u00fcberall zu garantieren. Aber auch die Europ\u00e4ische Union, die nach dem Zweiten Weltkrieg gegr\u00fcndet wurde, um den Frieden zu bewahren, bekommt immer mehr Risse. Fast \u00fcberall in den Mitgliedstaaten kommen Nationalismus, Rassismus und Demokratieverachtung wieder an die Oberfl\u00e4che. \u201eDie Wahrheit \u00fcber den Holocaust darf nicht sterben\u201c, sagte Polens Pr\u00e4sident auf der Gedenkfeier zur Befreiung des nazideutschen Konzentrations- und Vernichtungslagers Auschwitz-Birkenau. Doch zur Wahrheit von heute geh\u00f6rt auch: Die Demokratie und ihre Werte m\u00fcssen immer wieder neu verteidigt werden \u2013 damit sich Verbrechen aus Hass nicht wiederholen.", "In Deutschland haben Hetze und allgemeine Verrohung der Umgangsformen so zugenommen, dass sich Juden immer \u00f6fter fragen, ob sie hier noch sicher leben k\u00f6nnen. Lokalpolitiker geben ihr politisches Engagement auf, weil der Staat sie nicht vor Morddrohungen und Attent\u00e4tern \u2013 wie in Kassel \u2013 sch\u00fctzen kann. Die Erosion des Rechtsstaats ist mit H\u00e4nden zu greifen. In Polen und Ungarn zerst\u00f6ren Politiker sogar ganz bewusst die Grundlagen der demokratischen Grundordnung und hetzen offen gegen einzelne Gruppen wie Richter, Ausl\u00e4nder oder Homosexuelle. Wenn in diesen Tagen das \u201eNie wieder\u201c erneut in aller Munde ist, sollte es mehr sein als nur eine Erinnerungsfloskel. Wir sollten uns erneut dar\u00fcber klar werden, was es f\u00fcr jeden von bedeutet und was es uns abverlangt."]}}, "recommendations": []}{ "parser": "babel-eslint", "extends": "airbnb-base", "globals": { "describe": false, "before": false, "beforeEach": false, "after": false, "afterEach": false, "it": false } } jasonmb626/texashousewitnessrelations/witnessTestimony/JSON/80R/C5652007041108301.json [{"meeting_cd":"C5652007041108301","committee":"Government Organization","dttm":"April 11, 2007 - 08:30 AM","session":"80R","bill_name":"SB  750","bill_cd":"SB750","position":"For","rbnt":false,"self":false,"fullWitnessName":"McLain, Tuck  District Attorney- 278th District  (Grimes County District Attorney/ OIG),  Navasota, TX","given_name":"Tuck  District Attorney- 278th District","sur_name":"McLain","organization":"Grimes County District Attorney/ OIG),  Navasota, TX"},{"meeting_cd":"C5652007041108301","committee":"Government Organization","dttm":"April 11, 2007 - 08:30 AM","session":"80R","bill_name":"SB  750","bill_cd":"SB750","position":"For","rbnt":false,"self":false,"fullWitnessName":".  President  (Association of Certified Fraud Examiners),  Austin, TX","given_name":".  President","sur_name":"Ratley","organization":"Association of Certified Fraud Examiners),  Austin, TX"},{"meeting_cd":"C5652007041108301","committee":"Government Organization","dttm":"April 11, 2007 - 08:30 AM","session":"80R","bill_name":"SB  750","bill_cd":"SB750","position":"On","rbnt":false,"self":false,"fullWitnessName":"  Inspector General  (HHSC),  Austin, TX","given_name":"Brian  Inspector General","sur_name":"Flood","organization":"HHSC),  Austin, TX"},{"meeting_cd":"C5652007041108301","committee":"Government Organization","dttm":"April 11, 2007 - 08:30 AM","session":"80R","bill_name":"SB  1449","bill_cd":"SB1449","position":"For","rbnt":false,"self":false,"fullWitnessName":"  Director of Government Relations  (Texas Public Employees Association),  Austin, TX","given_name":"Andrew  Director of Government Relations","sur_name":"Homer","organization":"Texas Public Employees Association),  Austin, TX"},{"meeting_cd":"C5652007041108301","committee":"Government Organization","dttm":"April 11, 2007 - 08:30 AM","session":"80R","bill_name":"SB  1449","bill_cd":"SB1449","position":"On","rbnt":false,"self":false,"fullWitnessName":"  Director of Operations  (Board of Nurse Examiners),  Austin, TX","given_name":"Mark  Director of Operations","sur_name":"Majek","organization":"Board of Nurse Examiners),  Austin, TX"},{"meeting_cd":"C5652007041108301","committee":"Government Organization","dttm":"April 11, 2007 - 08:30 AM","session":"80R","bill_name":"SB  1449","bill_cd":"SB1449","position":"On","rbnt":false,"self":false,"fullWitnessName":"  Director  (State Council on Competitive Government),  Austin, TX","given_name":"James  Director","sur_name":"Scogin","organization":"State Council on Competitive Government),  Austin, TX"},{"meeting_cd":"C5652007041108301","committee":"Government Organization","dttm":"April 11, 2007 - 08:30 AM","session":"80R","bill_name":"SB  1449","bill_cd":"SB1449","position":"On","rbnt":true,"self":false,"fullWitnessName":"  Chair  (Mid-Size Agency Coordinating Committee),  Austin, TX","given_name":"Stephanie  Chair","sur_name":"Newberg","organization":"Mid-Size Agency Coordinating Committee),  Austin, TX"},{"meeting_cd":"C5652007041108301","committee":"Government Organization","dttm":"April 11, 2007 - 08:30 AM","session":"80R","bill_name":"SB  1475","bill_cd":"SB1475","position":"On","rbnt":false,"self":false,"fullWitnessName":"., .  Executive Director  (Texas Department of Licensing and Regulation),  Austin, TX","given_name":"Jr., .  Executive Director","sur_name":"Kuntz","organization":"Texas Department of Licensing and Regulation),  Austin, TX"},{"meeting_cd":"C5652007041108301","committee":"Government Organization","dttm":"April 11, 2007 - 08:30 AM","session":"80R","bill_name":"SB  1761","bill_cd":"SB1761","position":"For","rbnt":false,"self":false,"fullWitnessName":"  Physician Assistant  (Texas Academy of Physician Assistants),  Austin, TX","given_name":"Tim  Physician Assistant","sur_name":"King","organization":"Texas Academy of Physician Assistants),  Austin, TX"}]{"word":"obstreperous","definition":"Attended by, or making, a loud and tumultuous noise; clamorous; noisy; vociferous. \"The obstreperous city.\" Wordsworth. \"Obstreperous approbation.\" Addison. Beating the air with their obstreperous beaks. . -- Ob*strep\"er*ous*ly, adv. -- Ob*strep\"er*ous*ness, n."}{ "name": "", "description": "Dù là ngày không gió, loại thực vật này cũng sẽ đung đưa theo tiếng sấm. Trông có vẻ giống kết cấu của cánh hoa, nhưng thật ra là lá vươn dài ra để bảo vệ những đóa hoa yếu ớt.", "sortorder": 1671, "category": "EXCHANGE", "materialtype": "Đặc Sản Khu Vực Inazuma", "source": [ "Thu Thập Bên Ngoài", "Đề xuất: Thu thập ở Tatarasuna" ] }{ "id": 28031, "citation_title": "Economic Benefits of COVID-19 Screening Tests", "citation_author": [ "", "", "", "" ], "citation_publication_date": "2020-11-02", "issue_date": "2020-10-29", "revision_date": "2020-11-05", "topics": [ "Macroeconomics", "Fiscal Policy", "Health, Education, and Welfare", "Health" ], "program": [ "Economic Fluctuations and Growth", "Health Care", "Health Economics" ], "projects": null, "working_groups": null, "abstract": "\n\nWe assess the economic value of screening testing programs as a policy response to the ongoing COVID-19 pandemic. We find that the fiscal, macroeconomic, and health benefits of rapid SARS-CoV-2 screening testing programs far exceed their costs, with the ratio of economic benefits to costs typically in the range of 4-15 (depending on program details), not counting the monetized value of lives saved. Unless the screening test is highly specific, however, the signal value of the screening test alone is low, leading to concerns about adherence. Confirmatory testing increases the net economic benefits of screening tests by reducing the number of healthy workers in quarantine and by increasing adherence to quarantine measures. The analysis is undertaken using a behavioral SIR model for the United States with 5 age groups, 66 economic sectors, screening and diagnostic testing, and partial adherence to instructions to quarantine or to isolate.\n\n", "acknowledgement": "\nWe thank , , , and seminar participants at Johns Hopkins University, the Federal Reserve Bank of Boston, and the Harvard Kennedy School for helpful discussions and/or comments. Droste and Stock acknowledge research support under NSF RAPID Grant SES-2032493. The views expressed herein are those of the authors and do not necessarily reflect the views of the National Bureau of Economic Research.\n\n\n" }package.json { "name": "sheet-unlocker", "version": "0.0.1", "description": "Removes the password from Excel spreadsheets", "main": "index.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "repository": { "type": "git", "url": "git+https://github.com/diogenes1oliveira/sheet-unlocker.git" }, "keywords": [ "excel", "unlock", "password", "xlsx" ], "author": " <>", "license": "MIT", "bugs": { "url": "https://github.com/diogenes1oliveira/sheet-unlocker/issues" }, "homepage": "https://github.com/diogenes1oliveira/sheet-unlocker#readme" } [ { "title": "React Next 123", "content": "`Best-selling author explains how to get the most from React. He begins by describing the React architecture and the benefits it offers and then shows you how to use React and its associated tools and libraries in your projects, starting from the nuts and bolts and building up to the most advanced and sophisticated features, going in-depth to give you the knowledge you need.`", "authorId": "1", "imageUrl": "https://images-na.ssl-images-amazon.com/images/I/411Forn86vL.jpg", "keywords": [ "react", "javascript", "react16" ], "id": "95458188-67b3-4ac0-892d-d0155af06d8c" }, { "title": "Pro React 16", "content": "`Best-selling author explains how to get the most from React. He begins by describing the React architecture and the benefits it offers and then shows you how to use React and its associated tools and libraries in your projects, starting from the nuts and bolts and building up to the most advanced and sophisticated features, going in-depth to give you the knowledge you need.`", "authorId": "1", "imageUrl": "https://images-na.ssl-images-amazon.com/images/I/411Forn86vL.jpg", "keywords": [ "react", "javascript", "react16" ], "id": "53cffa57-fdda-4204-96cd-df3420650865" }, { "title": "Pro React 18", "content": "`Best-selling author explains how to get the most from React. He begins by describing the React architecture and the benefits it offers and then shows you how to use React and its associated tools and libraries in your projects, starting from the nuts and bolts and building up to the most advanced and sophisticated features, going in-depth to give you the knowledge you need.`", "authorId": "1", "imageUrl": "https://images-na.ssl-images-amazon.com/images/I/411Forn86vL.jpg", "keywords": [ "react", "javascript", "react16" ], "id": "95458188-67b3-4ac0-892d-d0155af06d8c" } ]16/Kleine Anfrage/16-9822.json { "vorgangId": "9822", "VORGANG": { "WAHLPERIODE": "16", "VORGANGSTYP": "Kleine Anfrage", "TITEL": "Auswirkungen der Europäischen Nahverkehrsverordnung über öffentliche Personenverkehrsdienste auf Schiene und Straße auf die Änderung des Personenbeförderungsgesetzes und die bisherige Genehmigungspraxis von Linienverkehrsgenehmigungen", "INITIATIVE": "Fraktion BÜNDNIS 90/DIE GRÜNEN", "AKTUELLER_STAND": "Beantwortet", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "WICHTIGE_DRUCKSACHE": [ { "DRS_HERAUSGEBER": "BT", "DRS_NUMMER": "16/6372", "DRS_TYP": "Kleine Anfrage", "DRS_LINK": "http://dipbt.bundestag.de:80/dip21/btd/16/063/1606372.pdf" }, { "DRS_HERAUSGEBER": "BT", "DRS_NUMMER": "16/6544", "DRS_TYP": "Antwort", "DRS_LINK": "http://dipbt.bundestag.de:80/dip21/btd/16/065/1606544.pdf" } ], "EU_DOK_NR": "", "SACHGEBIET": "Verkehr", "SCHLAGWORT": [ "EU-Recht", "Genehmigung", { "_fundstelle": "true", "__cdata": "Öffentlicher Personennahverkehr" }, "Personenbeförderungsgesetz", "Rechtsangleichung in der EU", "Regulierung", "Schienenpersonennahverkehr", "Straßenverkehr", "Verordnung der EU", "Wettbewerb" ], "ABSTRAKT": " Effizienz und Attraktivität im ÖPNV durch Wettbewerb, Sicherstellung des vorgesehenen regulierten Wettbewerbes, erforderliche Anpassungen des nationalen Rechtes, darunter Personenbeförderungsgesetz, Genehmigungen von Linienverkehren nach Personenbeförderungsgesetz, Transparenz und Nichtdiskriminierung bei Vergabe von Dienstleistungsaufträgen " }, "VORGANGSABLAUF": { "VORGANGSPOSITION": [ { "ZUORDNUNG": "BT", "URHEBER": "Kleine Anfrage, Urheber : Fraktion BÜNDNIS 90/DIE GRÜNEN ", "FUNDSTELLE": "14.09.2007 - BT-Drucksache 16/6372", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/16/063/1606372.pdf", "PERSOENLICHER_URHEBER": [ { "VORNAME": "Hans-Josef", "NACHNAME": "Fell", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Kleine Anfrage" }, { "VORNAME": "Bettina", "NACHNAME": "Herlitzius", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Kleine Anfrage" }, { "PERSON_TITEL": "Dr.", "VORNAME": "Anton", "NACHNAME": "Hofreiter", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Kleine Anfrage" }, { "VORNAME": "Bärbel", "NACHNAME": "Höhn", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Kleine Anfrage" }, { "VORNAME": "Sylvia", "NACHNAME": "Kotting-Uhl", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Kleine Anfrage" }, { "VORNAME": "Undine", "NACHNAME": "Kurth", "WAHLKREISZUSATZ": "Quedlinburg", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Kleine Anfrage" }, { "VORNAME": "Nicole", "NACHNAME": "Maisch", "FUNKTION": "MdB", "FRAKTION": "BÜNDNIS 90/DIE GRÜNEN", "AKTIVITAETSART": "Kleine Anfrage" } ] }, { "ZUORDNUNG": "BT", "URHEBER": "Antwort, Urheber : Bundesregierung, Bundesministerium für Verkehr, Bau und Stadtentwicklung (federführend)", "FUNDSTELLE": "01.10.2007 - BT-Drucksache 16/6544", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/16/065/1606544.pdf" } ] } } {"derivation": "a primitive root (perhaps rather identical with H578 (\u05d0\u05b8\u05e0\u05b8\u05d4) through the idea of contraction in anguish);", "pron": "aw-naw'", "outline": "
  1. to meet, encounter, approach, be opportune
    1. (Piel) to allow to meet, cause to meet
    2. (Pual) to be sent, be allowed to meet
    3. (Hithpael) to seek occasion (quarrel), cause oneself to meet
", "kjv_def": "befall, deliver, happen, seek a quarrel.", "lemma": "\u05d0\u05b8\u05e0\u05b8\u05d4", "frequency": 6, "strongs_def": "to approach; hence, to meet in various senses", "xlit": "\u02bc\u00e2n\u00e2h"}{"title":"Elven Warrior (15C14)","text":"Set:\tThe Hunters\nKind:\tFree People\nCulture:\tElven\nTwilight:\t2\nCard Type:\tCompanion • Elf\nStrength:\t5\nVitality:\t3\nResistance:\t6\nGame Text:\tWhile this companion is bearing a possession, he is strength +2. Maneuver: Exert this companion to place an [Elven] card from your discard pile on the bottom of your draw deck.\nRarity:\tC","scanUrl":"https://lotrtcgwiki.com/wiki/_media/cards:lotr15014.jpg"}{ "DB_HOST": "yb-tserver-1.yb-tservers.default.svc.cluster.local", "APP_HOST": "localhost", "APP_PORT": "3001" } { "_id": "e5e27493", "abbreviation": "", "name": "Lisianthus necrosis virus", "lower_name": "lisianthus necrosis virus" }{"ui.panel.page-authorize.initializing":"Alustetaan","ui.panel.page-authorize.authorizing_client":"Olet antamassa pääsyn {clientId} ioBroker -ympäristöösi.","ui.panel.page-authorize.logging_in_with":"Kirjaudutaan sisään **{authProviderName}**.","ui.panel.page-authorize.pick_auth_provider":"Tai kirjaudu sisään joillakin seuraavista","ui.panel.page-authorize.abort_intro":"Kirjautuminen on keskeytetty","ui.panel.page-authorize.form.working":"Ole hyvä ja odota","ui.panel.page-authorize.form.unknown_error":"Jotain meni pieleen","ui.panel.page-authorize.form.next":"Seuraava","ui.panel.page-authorize.form.start_over":"Aloita alusta","ui.panel.page-authorize.form.error":"Virhe: {error}","ui.panel.page-authorize.form.providers.command_line.step.init.data.username":"Käyttäjätunnus","ui.panel.page-authorize.form.providers.command_line.step.init.data.password":"","ui.panel.page-authorize.form.providers.command_line.step.mfa.data.code":"Kaksivaiheinen tunnistuskoodi","ui.panel.page-authorize.form.providers.command_line.step.mfa.description":"Avaa **{mfa_module_name}** laitteessasi nähdäksesi kaksivaiheisen tunnistautumisen koodisi ja vahvistaaksesi identiteettisi:","ui.panel.page-authorize.form.providers.command_line.error.invalid_auth":"Virheellinen käyttäjätunnus tai salasana","ui.panel.page-authorize.form.providers.command_line.error.invalid_code":"Virheellinen tunnistuskoodi","ui.panel.page-authorize.form.providers.command_line.abort.login_expired":"Istunto päättyi, ole hyvä ja kirjaudu uudelleen.","ui.panel.page-authorize.form.providers.homeassistant.step.init.data.username":"Käyttäjätunnus","ui.panel.page-authorize.form.providers.homeassistant.step.init.data.password":"","ui.panel.page-authorize.form.providers.homeassistant.step.mfa.data.code":"Kaksivaiheinen tunnistuskoodi","ui.panel.page-authorize.form.providers.homeassistant.step.mfa.description":"Avaa **{mfa_module_name}** laitteessasi nähdäksesi kaksivaiheisen tunnistautumisen koodisi ja vahvistaaksesi identiteettisi:","ui.panel.page-authorize.form.providers.homeassistant.error.invalid_auth":"Virheellinen käyttäjätunnus tai salasana","ui.panel.page-authorize.form.providers.homeassistant.error.invalid_code":"Väärä tunnistuskoodi","ui.panel.page-authorize.form.providers.homeassistant.abort.login_expired":"Istunto päättyi, ole hyvä ja kirjaudu uudelleen.","ui.panel.page-authorize.form.providers.legacy_api_password.step.init.data.password":"","ui.panel.page-authorize.form.providers.legacy_api_password.step.init.description":"Ole hyvä ja syötä API-salasanasi http-asetuksissa:","ui.panel.page-authorize.form.providers.legacy_api_password.step.mfa.data.code":"Kaksivaiheinen tunnistuskoodi","ui.panel.page-authorize.form.providers.legacy_api_password.step.mfa.description":"Avaa **{mfa_module_name}** laitteessasi nähdäksesi kaksivaiheisen tunnistautumisen koodisi ja vahvistaaksesi identiteettisi:","ui.panel.page-authorize.form.providers.legacy_api_password.error.invalid_auth":"Virheellinen API-salasana","ui.panel.page-authorize.form.providers.legacy_api_password.error.invalid_code":"Virheellinen tunnistautumiskoodi","ui.panel.page-authorize.form.providers.legacy_api_password.abort.no_api_password_set":"API-salasanaa ei ole asetettu.","ui.panel.page-authorize.form.providers.legacy_api_password.abort.login_expired":"Istunto vanhentunut, ole hyvä ja kirjaudu uudelleen.","ui.panel.page-authorize.form.providers.trusted_networks.step.init.data.user":"Käyttäjä","ui.panel.page-authorize.form.providers.trusted_networks.step.init.description":"Valitse käyttäjä, jolla haluat kirjautua:","ui.panel.page-authorize.form.providers.trusted_networks.abort.not_allowed":"Tietokone ei ole sallittu.","ui.panel.page-authorize.form.providers.trusted_networks.abort.not_whitelisted":"Tietokonettasi ei ole sallittu."}{"title": "Scalable action recognition with a subspace forest.", "fields": ["contextual image classification", "subspace topology", "grassmannian", "model selection", "linear subspace"], "abstract": "We present a novel structure, called a Subspace Forest, designed to provide an efficient approximate nearest neighbor query of subspaces represented as points on Grassmann manifolds. We apply this structure to action recognition by representing actions as subspaces spanning a sequence of thumbnail image tiles extracted from a tracked entity. The Subspace Forest lifts the concept of randomized decision forests from classifying vectors to classifying subspaces, and employs a splitting method that respects the underlying manifold geometry. The Subspace Forest is an inherently parallel structure and is highly scalable due to O(log N) recognition time complexity. Our experimental results demonstrate state-of-the-art classification accuracies on the well-known KTH Actions and UCF Sports benchmarks, and a competitive score on Cambridge Gestures. In addition to being both highly accurate and scalable, the Subspace Forest is built without supervision and requires no extensive validation stage for model selection. Conceptually, the Subspace Forest could be used anywhere set-to-set feature matching is desired.", "citation": "Citations (60)", "year": "2012", "departments": ["Colorado State University", "Colorado State University"], "conf": "cvpr", "authors": [".....http://dblp.org/pers/hd/o/O=Hara:Stephen", ".....http://dblp.org/pers/hd/d/Draper:Bruce_A="], "pages": 8}liuyi0501/IwaraCollector {"title":"[HS] Marie Rose Dance (Test Version)","author":"WorldOfLyRah","description":"Lookingundefinedforundefinedfeedback.","thumb":"//i.iwara.tv/sites/default/files/styles/thumbnail/public/videos/thumbnails/1005244/thumbnail-1005244_0005.jpg?itok=Ex9RcPVX","download":"https://www.iwara.tv/api/video/e4a4buozxzum40ejb","origin":"https://www.iwara.tv/videos/e4a4buozxzum40ejb"}[{"team_id": 47, "team": "Ball State", "id": "2318", "name": "", "year": "Senior", "hometown": "Belleville, Ill.", "high_school": "Belleville East", "previous_school": null, "height": "6'2\"", "position": "Forward", "jersey": "00", "url": "/sports/womens-basketball/roster/suzanne-grossnickle/2318", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2324", "name": "", "year": "Freshman", "hometown": "Rushville, Ind.", "high_school": "Rushville Consolidated", "previous_school": null, "height": "5'10\"", "position": "Guard", "jersey": "3", "url": "/sports/womens-basketball/roster/shelbie-justice/2324", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2320", "name": "", "year": "Sophomore", "hometown": "Indianapolis, Ind.", "high_school": "Ben Davis", "previous_school": null, "height": "5'8\"", "position": "Guard", "jersey": "05", "url": "/sports/womens-basketball/roster/jordan-huber/2320", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2321", "name": "", "year": "Junior", "hometown": "Hilliard, Ohio", "high_school": "Hilliard Darby", "previous_school": null, "height": "5'8\"", "position": "Guard", "jersey": "10", "url": "/sports/womens-basketball/roster/shanee-jackson/2321", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2332", "name": "", "year": "Sophomore", "hometown": "Thorntown, Ind.", "high_school": "Western Boone", "previous_school": null, "height": "5'4\"", "position": "Guard", "jersey": "11", "url": "/sports/womens-basketball/roster/brandy-woody/2332", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2326", "name": "", "year": "Junior", "hometown": "Huber Heights, Ohio", "high_school": "Wayne", "previous_school": null, "height": "5'9\"", "position": "Guard/Forward", "jersey": "21", "url": "/sports/womens-basketball/roster/mercedes-miller/2326", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2314", "name": "", "year": "Freshman", "hometown": "St. Louis, Mo.", "high_school": "Incarnate Word Academy", "previous_school": null, "height": "5'10\"", "position": "Guard", "jersey": "23", "url": "/sports/womens-basketball/roster/brittany-carter/2314", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2331", "name": "", "year": "Freshman", "hometown": "Cincinnati, Ohio", "high_school": "Princeton Senior", "previous_school": null, "height": "6'0\"", "position": "Forward", "jersey": "24", "url": "/sports/womens-basketball/roster/neschelle-williams/2331", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2329", "name": "", "year": "Freshman", "hometown": "Cincinnati, Ohio", "high_school": "Mt. Healthy", "previous_school": null, "height": "6'2\"", "position": "Forward", "jersey": "30", "url": "/sports/womens-basketball/roster/jonessa-moore/2329", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2315", "name": "", "year": "Senior", "hometown": "Terre Haute, Ind.", "high_school": "South Vigo", "previous_school": null, "height": "5'4\"", "position": "Guard", "jersey": "31", "url": "/sports/womens-basketball/roster/megan-craft/2315", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2319", "name": "", "year": "Sophomore", "hometown": "St. Louis, Mo.", "high_school": "Incarnate Word Academy", "previous_school": null, "height": "6'1\"", "position": "Forward", "jersey": "32", "url": "/sports/womens-basketball/roster/jazmin-hitchens/2319", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2330", "name": "", "year": "Sophomore", "hometown": "Peoria, Ill.", "high_school": "Richwoods", "previous_school": null, "height": "6'0\"", "position": "Forward", "jersey": "33", "url": "/sports/womens-basketball/roster/katie-murphy/2330", "season": "2011-12"}, {"team_id": 47, "team": "Ball State", "id": "2316", "name": "", "year": "Senior", "hometown": "Huntington, Ind.", "high_school": "Huntington North", "previous_school": null, "height": "6'3\"", "position": "Center", "jersey": "40", "url": "/sports/womens-basketball/roster/amber-crago/2316", "season": "2011-12"}]{"PREVALENCE_BY_GENDER_AGE_YEAR":{"TRELLIS_NAME":[],"SERIES_NAME":[],"X_CALENDAR_YEAR":[],"Y_PREVALENCE_1000PP":[]},"PREVALENCE_BY_MONTH":{"X_CALENDAR_MONTH":[],"Y_PREVALENCE_1000PP":[]},"PROCEDURE_FREQUENCY_DISTRIBUTION":{"Y_NUM_PERSONS":0,"X_COUNT":1},"PROCEDURES_BY_TYPE":{"CONCEPT_NAME":"EHR order list entry","COUNT_VALUE":13},"AGE_AT_FIRST_OCCURRENCE":{"CATEGORY":"MALE","MIN_VALUE":27,"P10_VALUE":27,"P25_VALUE":56,"MEDIAN_VALUE":56,"P75_VALUE":76,"P90_VALUE":79,"MAX_VALUE":79}} node_modules/.cache/babel-loader/39e10d55cef72cb0e450f4006733959e.json {"remainingRequest":"/Users/mac/WebstormProjects/learn-vue/vuemall/node_modules/babel-loader/lib/index.js!/Users/mac/WebstormProjects/learn-vue/vuemall/src/router/index.js","dependencies":[{"path":"/Users/mac/WebstormProjects/learn-vue/vuemall/src/router/index.js","mtime":1586078110126},{"path":"/Users/mac/WebstormProjects/learn-vue/vuemall/node_modules/cache-loader/dist/cjs.js","mtime":499162500000},{"path":"/Users/mac/WebstormProjects/learn-vue/vuemall/node_modules/babel-loader/lib/index.js","mtime":499162500000}],"contextDependencies":[],"result":[{"type":"Buffer","data":"base64:"},{"version":3,"sources":["/Users/mac/WebstormProjects/learn-vue/vuemall/src/router/index.js"],"names":["Vue","VueRouter","Home","Category","Cart","Profiles","use","routes","path","redirect","name","component","router","mode"],"mappings":";AAAA,OAAOA,GAAP,MAAgB,KAAhB;AACA,OAAOC,SAAP,MAAsB,YAAtB;;AAGA,IAAMC,IAAI,GAAG,SAAPA,IAAO;AAAA,SAAM,OAAO,iBAAP,CAAN;AAAA,CAAb;;AACA,IAAMC,QAAQ,GAAG,SAAXA,QAAW;AAAA,SAAM,OAAO,yBAAP,CAAN;AAAA,CAAjB;;AACA,IAAMC,IAAI,GAAG,SAAPA,IAAO;AAAA,SAAM,OAAO,iBAAP,CAAN;AAAA,CAAb;;AACA,IAAMC,QAAQ,GAAG,SAAXA,QAAW;AAAA,SAAM,OAAO,yBAAP,CAAN;AAAA,CAAjB;;AAEAL,GAAG,CAACM,GAAJ,CAAQL,SAAR;AAEE,IAAMM,MAAM,GAAG,CACb;AACEC,EAAAA,IAAI,EAAC,EADP;AAEEC,EAAAA,QAAQ,EAAC;AAFX,CADa,EAKf;AACED,EAAAA,IAAI,EAAE,OADR;AAEEE,EAAAA,IAAI,EAAE,MAFR;AAGEC,EAAAA,SAAS,EAAET;AAHb,CALe,EAUf;AACEM,EAAAA,IAAI,EAAE,WADR;AAEEE,EAAAA,IAAI,EAAE,UAFR;AAGEC,EAAAA,SAAS,EAAER;AAHb,CAVe,EAeb;AACEK,EAAAA,IAAI,EAAE,OADR;AAEEE,EAAAA,IAAI,EAAE,MAFR;AAGEC,EAAAA,SAAS,EAAEP;AAHb,CAfa,EAoBb;AACEI,EAAAA,IAAI,EAAE,WADR;AAEEE,EAAAA,IAAI,EAAE,UAFR;AAGEC,EAAAA,SAAS,EAAEN;AAHb,CApBa,CAAf;AA2BF,IAAMO,MAAM,GAAG,IAAIX,SAAJ,CAAc;AAC3BY,EAAAA,IAAI,EAAE,SADqB;AAE3BN,EAAAA,MAAM,EAANA;AAF2B,CAAd,CAAf;AAKA,eAAeK,MAAf","sourcesContent":["import Vue from 'vue'\nimport VueRouter from 'vue-router'\n\n\nconst Home = () => import('views/home/Home')\nconst Category = () => import('views/category/Category')\nconst Cart = () => import('views/cart/Cart')\nconst Profiles = () => import('views/profiles/Profiles')\n\nVue.use(VueRouter)\n\n const routes = [\n {\n path:'',\n redirect:'/home'\n },\n {\n path: '/home',\n name: 'Home',\n component: Home\n },\n {\n path: '/category',\n name: 'Category',\n component: Category\n },\n {\n path: '/cart',\n name: 'Cart',\n component: Cart\n },\n {\n path: '/profiles',\n name: 'Profiles',\n component: Profiles\n }\n]\n\nconst router = new VueRouter({\n mode: 'history',\n routes\n})\n\nexport default router\n"]}]}100-1000 { "train_batch_size": 16, "gradient_accumulation_steps": 1, "scheduler": { "type": "WarmupDecayLR", "params": { "total_num_steps": 300, "warmup_min_lr": 0, "warmup_max_lr": 3e-5, "warmup_num_steps": 30 } }, "fp16": { "enabled": true, "initial_scale_power": 32, "loss_scale_window": 1000, "hysteresis": 2, "min_loss_scale": 1 }, "zero_optimization": { "stage": 3, "allgather_bucket_size": 5e8, "reduce_bucket_size": 5e8, "offload_param": { "device": "cpu", "pin_memory": true }, "offload_optimizer": { "device": "cpu", "pin_memory": true } }, "activation_checkpointing": { "partition_activations": true, "cpu_checkpointing": true, "contiguous_memory_optimization": true, "number_checkpoints": 4 }, "zero_allow_untested_optimizer": true, "wall_clock_breakdown": false, "steps_per_print": 9999999999 }{"title":"ワカメ化龍驤改二でドーナツホール","author":"takesiman","description":"この子も難儀な髪の毛してますね。ツインテールは重力設定で逆立つというお約束事でもあるのでしょうか?
一応前回の動画で気になった箇所を微調整してみました。
この子も重力強くしすぎるとスカートが破綻するのですが、最後のシーンだけ耐久度テスト兼ねてサービスしてみましたw。","thumb":"//i.iwara.tv/sites/default/files/styles/thumbnail/public/videos/thumbnails/625476/thumbnail-625476_0001.jpg?itok=neZKpOvt","download":"https://www.iwara.tv/api/video/ekk65sb08rikbvrnk","origin":"https://www.iwara.tv/videos/ekk65sb08rikbvrnk"}{"name":"chevron_left","subject":7,"date":"1412010-024328","paths":{"Pen":{"strokes":[{"x":-169,"y":-861,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":-214,"y":-893,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":-251,"y":-913,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":-277,"y":-925,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":-289,"y":-919,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":-280,"y":-901,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":-254,"y":-866,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":-213,"y":-823,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":-157,"y":-769,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":-84,"y":-710,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":2,"y":-645,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":97,"y":-579,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":200,"y":-511,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":305,"y":-445,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":415,"y":-380,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":527,"y":-316,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":636,"y":-252,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":739,"y":-187,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":836,"y":-121,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":924,"y":-55,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":1004,"y":7,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":1071,"y":65,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":1124,"y":119,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":1160,"y":167,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":1178,"y":211,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":1182,"y":249,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":1168,"y":287,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":1140,"y":323,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":1093,"y":363,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":1032,"y":407,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0},{"x":959,"y":458,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":30,"stroke_id":0},{"x":878,"y":514,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":31,"stroke_id":0},{"x":788,"y":577,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":32,"stroke_id":0},{"x":692,"y":645,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":33,"stroke_id":0},{"x":596,"y":712,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":34,"stroke_id":0},{"x":502,"y":780,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":35,"stroke_id":0},{"x":415,"y":840,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":36,"stroke_id":0},{"x":345,"y":896,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":37,"stroke_id":0},{"x":283,"y":938,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":38,"stroke_id":0},{"x":248,"y":973,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":39,"stroke_id":0},{"x":231,"y":989,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":40,"stroke_id":0},{"x":236,"y":991,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":41,"stroke_id":0},{"x":258,"y":980,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":42,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Stylistic ST5022 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}1-10 { "author": "Flywheel <>", "cite": "", "command": "poetry run python run.py", "config": { "check_bed_moving": { "description": "Check for duplicate slice positions (ImagePositionPatient)", "type": "boolean", "default": true }, "check_embedded_localizer": { "description": "Check for existance of embedded localizer", "type": "boolean", "default": true }, "check_instance_number_uniqueness": { "description": "Check for uniqueness of InstanceNumber", "type": "boolean", "default": true }, "check_series_consistency": { "description": "Check for inconsistent SeriesNumber", "type": "boolean", "default": true }, "check_slice_consistency": { "description": "Check for inconsistent slice locations", "type": "boolean", "default": true }, "tag": { "default": "dicom-qc", "description": "The tag to be added on input file upon run completion.", "type": "string" }, "debug": { "description": "Include debug output", "type": "boolean", "default": false } }, "custom": { "flywheel": { "suite": "Curation" }, "gear-builder": { "category": "qa", "image": "flywheel/dicom-qc:0.3.0" } }, "description": "Validate dicom archive on a set of hardcoded and user-specified rules", "environment": { "PATH": "/opt/poetry/bin:/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "PIP_NO_CACHE_DIR": "0", "POETRY_HOME": "/opt/poetry", "POETRY_VERSION": "1.1.11", "POETRY_VIRTUALENVS_CREATE": "false", "FLYWHEEL": "/flywheel/v0" }, "inputs": { "dicom": { "base": "file", "description": "Dicom Archive", "optional": false, "type": { "enum": [ "dicom" ] } }, "validation-schema": { "base": "file", "description": "A JSON template to validate file.info.header", "optional": false, "type": { "enum": [ "source code" ] } } }, "label": "Dicom QC", "license": "MIT", "maintainer": "Flywheel <>", "name": "dicom-qc", "source": "https://gitlab.com/flywheel-io/flywheel-apps/dicom-qc", "url": "https://gitlab.com/flywheel-io/flywheel-apps/dicom-qc/-/blob/master/README.md", "version": "0.3.0" } kckern/IsaiahExplorer { "id": 15328, "source": "parry", "verse_id": 18311, "verse_count": 1, "reference": "34:7", "title": "", "html": " wild oxen\/bullocks.<\/i> Known for their great strength, oxen and bullocks represent mighty people that will be destroyed.<\/p>

mighty ones.<\/i> This term may refer to the politically and socially important people of the world.<\/p>

soaked with blood.<\/i> The destruction will be so great that the land will be soaked with the blood of the wicked.<\/p> ", "audit": null }.vscode/settings.json { "files.associations": { "README": "markdown", ".x*": "shellscript", ".alias": "shellscript", "dot-*": "shellscript", "profile": "shellscript", "xkblib.h": "c" } }{ "name": "hapi-kalamata", "version": "0.1.3", "description": "Extensible REST API for Hapi.js + Bookshelf.js", "homepage": "https://github.com/zachelrath/hapi-kalamata", "bugs": "https://github.com/zachelrath/hapi-kalamata/issues", "author": ", ", "repository": { "type": "git", "url": "git://github.com/zachelrath/hapi-kalamata.git" }, "keywords": [ "bookshelf", "knex", "node", "hapi", "REST", "API" ], "peerDependencies": { "hapi": "^13.2.2", "bookshelf": "^0.7.7" }, "devDependencies": { "grunt": "0.4.5", "load-grunt-tasks": "0.6.0", "grunt-contrib-watch": "0.6.1", "grunt-jasmine-node": "git://github.com/fiznool/grunt-jasmine-node.git#c773421b608ce944454cb540a6e66575d2df09c6" }, "dependencies": { "body-parser": "^1.9.0", "bluebird": "^2.3.6" }, "scripts": { "test": "grunt" }, "main": "src/index.js" } public/data/weapons/fensalir.json { "name": "Fensalir", "might": 16, "range": 1, "sp": 400, "description": "Inflicts Atk-4 on foes within 2 spaces through their next actions at the start of each turn.", "type": "Lance", "link": "https://kagerochart.com/hero/weapons/fensalir", "color": "Blue", "magical": false, "threaten": { "atk": -4 }, "char_unique": true, "refinable":{ "type":"melee", "Special":"Fensalir", "remove":{ "threaten":true }, "nullify_enemy_bonuses": { "all":true }, "description": "Neutralizes foe’s bonuses (from skills like Fortify, Rally, etc.) during combat." }, "id": "fensalir" }homeassistant/components/isy994/translations/no.json { "config": { "abort": { "already_configured": "Enheten er allerede konfigurert" }, "error": { "cannot_connect": "Tilkobling mislyktes", "invalid_auth": "Ugyldig godkjenning", "invalid_host": "Vertsoppf\u00f8ringen var ikke i fullstendig URL-format, for eksempel http://192.168.10.100:80", "unknown": "Uventet feil" }, "flow_title": "Universelle enheter ISY994 {name} ({host})", "step": { "user": { "data": { "host": "URL", "password": "", "tls": "TLS-versjonen av ISY-kontrolleren.", "username": "Brukernavn" }, "description": "Vertsoppf\u00f8ringen m\u00e5 v\u00e6re i fullstendig URL-format, for eksempel http://192.168.10.100:80", "title": "Koble til ISY994" } } }, "options": { "step": { "init": { "data": { "ignore_string": "Ignorer streng", "restore_light_state": "Gjenopprett lysstyrke", "sensor_string": "Node sensor streng", "variable_sensor_string": "Variabel sensorstreng" }, "description": "Angi alternativene for ISY-integrering: \n \u2022 Nodesensorstreng: Alle enheter eller mapper som inneholder NodeSensor String i navnet, behandles som en sensor eller bin\u00e6r sensor. \n \u2022 Ignorer streng: Alle enheter med 'Ignorer streng' i navnet ignoreres. \n \u2022 Variabel sensorstreng: Alle variabler som inneholder \"Variabel sensorstreng\" vil bli lagt til som en sensor. \n \u2022 Gjenopprett lyslysstyrke: Hvis den er aktivert, gjenopprettes den forrige lysstyrken n\u00e5r du sl\u00e5r p\u00e5 et lys i stedet for enhetens innebygde p\u00e5-niv\u00e5.", "title": "ISY994 Alternativer" } } }, "title": "Universelle enheter ISY994" }src/main/resources/static/mas_json/2017_ijcai_3937399293377058783.json {"title": "Tensor Decomposition with Missing Indices.", "fields": ["computer science", "tensor", "discrete mathematics"], "abstract": null, "citation": "Citations (1)", "year": "2017", "departments": ["National Institute of Advanced Industrial Science and Technology"], "conf": "ijcai", "authors": [".....http://dblp.org/pers/hd/y/Yamaguchi:Yuto", ".....http://dblp.org/pers/hd/h/Hayashi:Kohei"], "pages": 7}bundie1990/new-websitesri/qartjs/1.0.2.json {"qart.js":"sha256-innJjWq6yvZk8RjZli9dJm2nRDMLH4gI+qaFv88jcNY=","qart.min.js":"sha256-gT/DZ}0 { "files": { "main.css": "/static/css/main.ad5ef9c8.chunk.css", "main.js": "/static/js/main.5c7e0de7.chunk.js", "main.js.map": "/static/js/main.5c7e0de7.chunk.js.map", "runtime-main.js": "/static/js/runtime-main.d42790b2.js", "runtime-main.js.map": "/static/js/runtime-main.d42790b2.js.map", "static/js/2.2dd9d609.chunk.js": "/static/js/2.2dd9d609.chunk.js", "static/js/2.2dd9d609.chunk.js.map": "/static/js/2.2dd9d609.chunk.js.map", "static/js/3.c78365ec.chunk.js": "/static/js/3.c78365ec.chunk.js", "static/js/3.c78365ec.chunk.js.map": "/static/js/3.c78365ec.chunk.js.map", "index.html": "/index.html", "static/css/main.ad5ef9c8.chunk.css.map": "/static/css/main.ad5ef9c8.chunk.css.map", "static/js/2.2dd9d609.chunk.js.LICENSE.txt": "/static/js/2.2dd9d609.chunk.js.LICENSE.txt" }, "entrypoints": [ "static/js/runtime-main.d42790b2.js", "static/js/2.2dd9d609.chunk.js", "static/css/main.ad5ef9c8.chunk.css", "static/js/main.5c7e0de7.chunk.js" ] }{ "name": "hcs4-assessed-exercise", "homepage": "", "authors": [ "alexmtmorgan <>", "brownjd6 <>", "rorybain " ], "description": "HCS4 Group Exercise", "main": "index.html", "private": true, "ignore": [ "**/.*", "node_modules", "bower_components", "test", "tests" ], "dependencies": { "angular-bootstrap": null, "angular-route": null, "angular-ui-router": null } } Data/Places/data/dhubri/education/school/602.json {"html_attributions": [], "results": [{"business_status": "OPERATIONAL", "geometry": {"location": {"lat": 26.2094056, "lng": 90.0184321}, "viewport": {"northeast": {"lat": 26.21065713029151, "lng": 90.01962865000002}, "southwest": {"lat": 26.20795916970851, "lng": 90.01484245}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/school-71.png", "icon_background_color": "#7B9EB0", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/school_pinlet", "name": "Dholagaon 1 No L.P.School", "photos": [{"height": 2448, "html_attributions": ["Mohesh Teli"], "photo_reference": "A", "width": 3264}], "place_id": "ChIJseD00-p3WDcRGtNj4Z3ho98", "reference": "ChIJseD00-p3WDcRGtNj4Z3ho98", "scope": "GOOGLE", "types": ["school", "point_of_interest", "establishment"], "vicinity": "6259+Q97, Kazigaon Part VI"}], "status": "OK"}mhansonp/geodegeode-assembly/src/acceptanceTest/resources/security.json { "_comments" : ["This file to be consumed by test class LogsAreFullyRedactedAcceptanceTest", "Each user will have the same 'admin' roll / permissions.", "Each username and password will be distinct for identification but contain 'abcdefg',", "making each line immediately identifiable but still easily parsable for an offending log.", "", "Most of these username-password combinations will not actually be consumed, but are listed.", "here for completion.", "Take care to not accidentally use 'password' in any username." ], "roles": [ { "name": "superuser", "operationsAllowed": [ "CLUSTER:READ", "CLUSTER:MANAGE", "CLUSTER:WRITE", "DATA:READ", "DATA:MANAGE", "DATA:WRITE" ] } ], "users": [ { "name": "propertyFileUser", "password": "", "roles": ["superuser"] }, { "name": "securityPropertyFileUser", "password": "", "roles": ["superuser"] }, { "name": "viaStartMemberOptions", "password": "", "roles": ["superuser"] }, { "name": "user-jd", "password": "", "roles": ["superuser"] } ] } {"resourceType":"ValueSet","id":"identity-assuranceLevel","meta":{"lastUpdated":"2017-04-19T07:44:43.294+10:00","profile":["http://hl7.org/fhir/StructureDefinition/shareablevalueset"]},"extension":[{"url":"http://hl7.org/fhir/StructureDefinition/structuredefinition-ballot-status","valueString":"Informative"},{"url":"http://hl7.org/fhir/StructureDefinition/structuredefinition-fmm","valueInteger":2},{"url":"http://hl7.org/fhir/StructureDefinition/structuredefinition-wg","valueCode":"pa"}],"url":"http://hl7.org/fhir/ValueSet/identity-assuranceLevel","identifier":[{"system":"urn:ietf:rfc:3986","value":"urn:oid:2.16.840.1.113883.4.642.3.640"}],"version":"3.0.2","name":"IdentityAssuranceLevel","status":"draft","experimental":false,"date":"2017-04-19T07:44:43+10:00","publisher":"HL7 (FHIR Project)","contact":[{"telecom":[{"system":"url","value":"http://hl7.org/fhir"},{"system":"email","value":""}]}],"description":"The level of confidence that this link represents the same actual person, based on NIST Authentication Levels.","immutable":true,"compose":{"include":[{"system":"http://hl7.org/fhir/identity-assuranceLevel"}]},"expansion":{"identifier":"urn:uuid:b99e410e-4e02-44c3-b192-c4feb318dc23","timestamp":"2017-04-17T08:38:54+10:00","total":4,"parameter":[{"name":"version","valueUri":"http://hl7.org/fhir/identity-assuranceLevel|3.0.2"}],"contains":[{"system":"http://hl7.org/fhir/identity-assuranceLevel","code":"level1","display":"Level 1"},{"system":"http://hl7.org/fhir/identity-assuranceLevel","code":"level2","display":"Level 2"},{"system":"http://hl7.org/fhir/identity-assuranceLevel","code":"level3","display":"Level 3"},{"system":"http://hl7.org/fhir/identity-assuranceLevel","code":"level4","display":"Level 4"}]}}hpde/hpde.io { "Spase": { "xmlns": "http://www.spase-group.org/data/schema", "xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance", "xsi:schemaLocation": "http://www.spase-group.org/data/schema http://www.spase-group.org/data/schema/spase-2_3_0.xsd", "Version": "2.3.0", "DisplayData": { "ResourceID": "spase://GBO/DisplayData/ICESTAR/South.Pole/Dayplot/Overview/PT10S", "ResourceHeader": { "ResourceName": "ICESTAR South Pole Overview Dayplots", "AlternateName": "South Pole ICESTAR.Dayplot.Overview Dayplots", "ReleaseDate": "2019-05-05T12:34:56Z", "Description": "South Pole ICESTAR.Dayplot.Overview Dayplots", "Acknowledgement": "Please acknowledge , , , , and whenever ICESTAR generated dayplots of this format are used in publication.", "Contact": [ { "PersonID": "spase://SMWG/Person/Louis.J.Lanzerotti", "Role": "PrincipalInvestigator" }, { "PersonID": "spase://SMWG/Person/Roger.L.Arnoldy", "Role": "PrincipalInvestigator" }, { "PersonID": "spase://SMWG/Person/Theodore.J.Rosenberg", "Role": "PrincipalInvestigator" }, { "PersonID": "spase://SMWG/Person/Allan.T.Weatherwax", "Role": "PrincipalInvestigator" }, { "PersonID": "spase://SMWG/Person/Umran.S.Inan", "Role": "PrincipalInvestigator" }, { "PersonID": "spase://SMWG/Person/Lee.Frost.Bargatze", "Role": "MetadataContact" } ], "Association": { "AssociationID": "spase://SMWG/Observatory/ICESTAR/South.Pole", "AssociationType": "ObservedBy" }, "PriorID": "spase://VMO/DisplayData/ICESTAR/South.Pole/ICESTAR.Dayplot.Overview/PT10S" }, "AccessInformation": { "RepositoryID": "spase://SMWG/Repository/ICESTAR", "Availability": "Online", "AccessRights": "Open", "AccessURL": { "Name": "ICESTAR Data Set URL Address", "URL": "http://vmo.igpp.ucla.edu/data1/ICESTAR/", "Description": "URL for ICESTAR data hosted on the UCLA VMO Data Repository" }, "Format": "GIF", "Encoding": "None", "DataExtent": { "Quantity": "40000", "Units": "bytes" }, "Acknowledgement": "Please acknowledge , , Institute for Physical Science and Technology, University of Maryland, College Park, MD 20742-2431 for providing access to South Pole data resources." }, "ProcessingLevel": "Calibrated", "InstrumentID": [ "spase://SMWG/Instrument/ICESTAR/South.Pole/Magnetometer", "spase://SMWG/Instrument/ICESTAR/South.Pole/SearchCoil", "spase://SMWG/Instrument/ICESTAR/South.Pole/Riometer", "spase://SMWG/Instrument/ICESTAR/South.Pole/Photometer", "spase://SMWG/Instrument/ICESTAR/South.Pole/Antenna" ], "MeasurementType": [ "MagneticField", "MagneticField", "Waves.Passive", "Irradiance", "Waves.Passive" ], "TemporalDescription": { "TimeSpan": { "StartDate": "1986-01-01T00:00:00.000", "StopDate": "2007-06-21T23:59:50.000", "Note": "Some data gaps may be present. If a gap is present at the beginning or end of the file, the time span start and end dates could be inaccurate as they are assigned assuming an absence of such gaps." }, "Cadence": "PT10S" }, "DisplayCadence": "P1D", "ObservedRegion": "Earth.Surface" } } }package.json { "name": "vue-cli-plugin-docs", "version": "1.0.2", "description": "vue-cli plugin to generate readme files based on .vue file docs tags.", "main": "index.js", "keywords": [ "vue", "cli", "documentation" ], "author": "", "license": "MIT", "repository": { "type": "git", "url": "git+https://github.com/ThatWionGuy/vue-cli-plugin-docs.git" }, "homepage": "https://github.com/ThatWionGuy/vue-cli-plugin-docs", "dependencies": { "@vue/cli-shared-utils": "^4.2.3" } } { "authors": [ { "author": "" }, { "author": "" }, { "author": "" }, { "author": "" }, { "author": "" }, { "author": "" } ], "doi": "10.3889/oamjms.2019.487", "publication_date": "2019-07-19", "id": "EN112068", "url": "https://pubmed.ncbi.nlm.nih.gov/31316668", "source": "Open access Macedonian journal of medical sciences", "source_url": "", "licence": "CC BY-NC", "language": "en", "type": "pubmed", "description": "", "text": "A 50-years-old male, came with the main complaint of double vision 1 month after nasal polyp surgery. He also complained his right eye turned outward. The visual acuity on the right eye was 6/7.5 with his head turn to the left. On the examination, the Hirschberg test was XT 45°, and the Krimsky test > 95 ∆BI. Duction and version test on the right eye were -4 adduction. There was no shifting on the cover-uncover test. Ishihara test was within normal limit, and there was suppression on the right eye in WFDT. On force generation test, we found limited adduction on the right eye and no restriction in force duction test. Head MRI showed atrophy of medial recti on the right eye, 2.2 mm in size. The patient underwent vertical muscle transposition procedure surgery, and it was found atrophy of medial recti without any rupture. Two months after surgery, the double vision was decreased, the result of the Hirschberg test was XT 30° and Krimsky test 65°∆BI." }1-10 {"status":200,"data":{"totalSubs":46,"subsInEachSource":{"feedly":23,"inoreader":17,"feedsPub":6},"failedSources":{}},"lastModified":1610932948929}{"id": 25924, "name": "", "qualified_name": " (Master Combat Achievements)", "examine": "I wonder what happens if I rub it...", "members": true, "release_date": "2021-07-21", "quest": false, "weight": 0.1, "value": 1, "tradeable": false, "stackable": false, "noteable": false, "equipable": false, "tradeable_ge": false, "icon": " "wiki_url": "https://oldschool.runescape.wiki/w/Antique_lamp_(Master_Combat_Achievements)"}// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: // https://github.com/microsoft/vscode-dev-containers/tree/v0.205.2/containers/ubuntu { "name": "Ubuntu", "build": { "dockerfile": "Dockerfile", // Update 'VARIANT' to pick an Ubuntu version: hirsute, focal, bionic // Use hirsute or bionic on local arm64/Apple Silicon. "args": { "VARIANT": "dapr-containerapps" } }, // Set *default* container specific settings.json values on container create. "settings": {}, "containerEnv": { "BIND_LOCALHOST_DOCKER": "true" }, "runArgs": [ // Enable ptrace-based debugging for go "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined", //"--privileged" ], // Add the IDs of extensions you want installed when the container is created. "extensions": [ "ms-azuretools.vscode-bicep", "ms-azuretools.vscode-dapr", "ms-azuretools.vscode-docker" ], "mounts": [ // Mount docker-in-docker library volume "type=volume,source=dind-var-lib-docker,target=/var/lib/docker", // Bind mount docker socket under an alias to support docker-from-docker "type=bind,source=/var/run/docker.sock,target=/var/run/docker-host.sock", // Uncomment to clone local .kube/config into devcontainer // "type=bind,source=${env:HOME}${env:USERPROFILE}/.kube,target=/home/dapr/.kube-localhost", // Uncomment to additionally clone minikube certs into devcontainer for use with .kube/config // "type=bind,source=${env:HOME}${env:USERPROFILE}/.minikube,target=/home/dapr/.minikube-localhost" ], // Use 'forwardPorts' to make a list of ports inside the container available locally. // "forwardPorts": [], // Use 'postCreateCommand' to run commands after the container is created. // "postStartCommand": "sudo chmod 777 /var/run/docker.sock", // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. "remoteUser": "vscode", "features": { "azure-cli": "latest" } }json_schema_corpus/pp_16753.json { "title": "JSON schema for the ASP.NET global configuration files", "$schema": "http://json-schema.org/draft-04/schema#", "type": "object", "additionalProperties": true, "required": [ "sources" ], "properties": { "sources": { "type": "array", "description": "A list of source folders relative to this file.", "items": { "type": "string" } } } }{ "id": 21848, "name": "Snow imp costume head", "incomplete": false, "members": false, "tradeable": false, "tradeable_on_ge": false, "stackable": false, "stacked": null, "noted": false, "noteable": false, "linked_id_item": 21847, "linked_id_noted": null, "linked_id_placeholder": null, "placeholder": true, "equipable": false, "equipable_by_player": false, "equipable_weapon": false, "cost": 1, "lowalch": null, "highalch": null, "weight": 0.226, "buy_limit": null, "quest_item": false, "release_date": "2017-12-11", "duplicate": true, "examine": "A poorly made fake snow imp head.", "icon": " "wiki_name": "Snow imp costume head", "wiki_url": "https://oldschool.runescape.wiki/w/Snow_imp_costume_head", "wiki_exchange": null, "equipment": null, "weapon": null }1-10 { "type": "API Path Parameters", "definition": { "text": "API Path Parameters are embedded as part of an URL path. They allow you to specify the unique path to your endpoint.", "updated": 1622771530317 }, "paragraphs": [ { "style": "Title", "text": "How to identify API Path Parameters", "updated": 1622772536805 }, { "style": "Text", "text": "The API you are trying to map should have it's API Path Parameters documented. If not, check any examples they may provide for how to request the data and see if there are inner folders below the main folder of the API Enpoint. Each one of these folders are an API Path Parameter that needs to be defined.", "updated": 1622772590217 }, { "style": "Success", "text": "As with everything else in Superalgos, node children are considered clockwise. This is important because it will affect the order in which Superalgos builds the URL request you will send to the API.", "updated": 1622773291926 }, { "style": "Text", "text": "" }, { "style": "Include", "text": "Superalgos->Topic->Data Mining - Fetching Data From APIs->Main Workflow", "updated": 1621951167309 } ] }{"meta":{"build_time":"2021-06-01T07:04:25.950Z","license":"CC-BY-4.0","version":"2.0-beta"},"data":{"date":"2020-06-26","state":"IL","meta":{"data_quality_grade":"A","updated":"2020-06-26T04:00:00Z","tests":{"total_source":"totalTestsViral"}},"cases":{"total":141344,"confirmed":140291,"probable":1053},"tests":{"pcr":{"total":1490952,"pending":null,"encounters":{"total":null},"specimens":{"total":1490952,"positive":null,"negative":null},"people":{"total":null,"positive":141344,"negative":null}},"antibody":{"encounters":{"total":null,"positive":null,"negative":null},"people":{"total":null,"positive":null,"negative":null}},"antigen":{"encounters":{"total":null,"positive":null,"negative":null},"people":{"total":null,"positive":null,"negative":null}}},"outcomes":{"recovered":null,"hospitalized":{"total":null,"currently":1516,"in_icu":{"total":null,"currently":400},"on_ventilator":{"total":null,"currently":225}},"death":{"total":7048,"confirmed":6847,"probable":201}}}} { "name": "jquerylinkit", "version": "0.2.0", "description": "LinkIt is a simple jquery plugin to attach elements. Made it to test jquery plugin creations", "main": "jquery.linkIt.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "repository": { "type": "git", "url": "https://github.com/diegoalmesp/jquery.linkit.git" }, "keywords": [ "jquery", "plugin", "linkit" ], "author": "", "license": "MIT", "bugs": { "url": "https://github.com/diegoalmesp/jquery.linkit/issues" }, "homepage": "https://github.com/diegoalmesp/jquery.linkit" } jamoum/JavaScript_Challenges_Solutions10-100 { "author_id": "Matt", "author_url": "https://edabit.com/user/BkPgkDQGHm66X4Qai", "challenge_id": "8QTBwLzAdaM8wkrXu", "code": "function balanced(word) {\n \n}", "difficulty": "Very Hard", "instructions": "

We can assign a value to each character in a word, based on their position in the alphabet (a = 1, b = 2, ... , z = 26). A balanced word is one where the sum of values on the left-hand side of the word equals the sum of values on the right-hand side. For odd length words, the middle character (balance point) is ignored.

Write a function that returns true if the word is balanced, and false if it's not.

Examples

balanced(\"zips\") \u279e true\n// \"zips\" = \"zi|ps\" = 26+9|16+19 = 35|35 = true\n\nbalanced(\"brake\") \u279e false\n// \"brake\" = \"br|ke\" = 2+18|11+5 = 20|16 = false

Notes

  • All words will be lowercase, and have a minimum of 2 characters.
  • Palindromic words will always be balanced.
", "source_url": "https://edabit.com/challenge/8QTBwLzAdaM8wkrXu", "tags": [ "strings", "validation" ], "tests": "Test.assertEquals(balanced('at'), false)\nTest.assertEquals(balanced('forgetful'), false)\nTest.assertEquals(balanced('vegetation'), true)\nTest.assertEquals(balanced('disillusioned'), false)\nTest.assertEquals(balanced('abstract'), true)\nTest.assertEquals(balanced('clever'), false)\nTest.assertEquals(balanced('conditionalities'), true)\nTest.assertEquals(balanced('seasoning'), true)\nTest.assertEquals(balanced('uptight'), false)\nTest.assertEquals(balanced('ticket'), false)\nTest.assertEquals(balanced('calculate'), false)\nTest.assertEquals(balanced('measure'), false)\nTest.assertEquals(balanced('join'), false)\nTest.assertEquals(balanced('anesthesiologies'), true)\nTest.assertEquals(balanced('command'), false)\nTest.assertEquals(balanced('graphite'), true)\nTest.assertEquals(balanced('quadratically'), true)\nTest.assertEquals(balanced('right'), false)\nTest.assertEquals(balanced('fossil'), true)\nTest.assertEquals(balanced('sparkling'), false)\nTest.assertEquals(balanced('dolphin'), true)\nTest.assertEquals(balanced('baseball'), true)\nTest.assertEquals(balanced('immense'), false)\nTest.assertEquals(balanced('pattern'), true)\nTest.assertEquals(balanced('hand'), false)\nTest.assertEquals(balanced('radar'), true)\nTest.assertEquals(balanced('oven'), false)\nTest.assertEquals(balanced('immutability'), true)\nTest.assertEquals(balanced('kayak'), true)\nTest.assertEquals(balanced('bartender'), true)\nTest.assertEquals(balanced('weight'), false)\nTest.assertEquals(balanced('lightbulbs'), true)\nTest.assertEquals(balanced('tail'), true)", "title": "Balanced Words" }{"data":{"placeholderImage":{"childImageSharp":{"fluid":{"base64":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAfCAYAAADnTu3OAAAACXBIWXMAACxLAAAsSwGlPZapAAAGt0lEQVRIx22Wa2wU1xXHxxukIr7waKpIqAFVKjGoIYQ0ryIoInXchBIE+dSqgryIEqI+pHxIK1VKlS8pEB4B6qYPxZiHbGwckwCGGuJX1qzXa7OLnbWX9XofXnufszvv3XnduSf33rUXHLjWaGZn5vzmf8/9n3PNdXV1cfl8nisWizWqqi4CgBru3vHEmppEanZtQZb3S4pyVVGU1+htSRRd3IMGz/M13793qqX5h2damneEY9GjpVIpYJXLNswNXddjmqr+gL6nlkoPZHIdfq9LUJXHeUn8s6yqF1MzM3lNEOcZMK2I0JmO2R9N+qzPkxP01rM0zrEs19jY2EJYMBjcJQpCWC4KaB6g2RYMC1nnRGzUen20Gz032IGfGfwCb/C02+/EfVAsaR/Q2KHczEP3qZMk6Yxp2+AJj9unwwH7r2GvXe+77KwfaMNr3C241n0eNty8gJ8kx8897ejF4A3oy0z3s2DlGDfoG1oIVGYzbzWkQrB5+BJa72nH69yt8BQ5P1mBUBhs9LSze/RYc7MVPosELJgV1tL4eDTmogtbHTAa++k/pwL6c0Nfwksjnbh+5AreQq5JMNT5LsPLw53wxM02euCnPV9A7cB5+y8JP4iJmbdpfDKffYhk6R4gx7m+mZ7y1ZOpEEWIKls30ApbvF/CG7e74ZWRTvgVAdcPX2Eqf9x31q6f6AZ/MtZK40/Phu/P40Rk8n9vxrzwmPu8TVTgzUThiwSwzXeJHZu8F2Ez+cArt67hf0T96EBqHBpj3waJskU0PhqNcnV1dXeBwlho96H4bfjJN81sMZ73XmTT3j5yFd4O9sF/kuPgl3kwHIQJBGeNMr6cmQaUzm2h8YVCwTU1NXXPtK/dWt4WG599ZvgreNXf5XwUGcFf5eIQVIWqF4GgLMsGSdPApBdkpNLpj2m8LMt3px2ZK7XeWLj1ZGEKiqZuC5aBJcsEvlwCuVyGgiSDICvAE7OLsoIxBgbUVO3/TBCAy+v1VoBJVWL025N33u1VeJBMAxVVFfOCBKKkQL4ggECAkqKSsjNgTpwzV4Z8ZHLyERqfy+Vqrl+/znGh4DhT2N/Ts9bUDQshREAyJhBMahUM0wTbrpSy5VCOgw8Eirh7tsQqayaT2zE3bRezT7/bPT97lyAIw/QlWdWQViqD7WAwHVyBYQdSugaKbcBEUcdZzWRfkSTxJA1GyK7kcc/evTQH7Ec6nT5CXzIQtnnTxPGSBoJpgY0ccIhyqh7ZiKkEzC5AN1HgvT3bmX3i8VhFWiqVYr2NdI6X6Usmwo6gG9iwbEwBFGZRGHaqa47pumMEhpjEqby8nsaLkuTiLMviAoEAy2NjY+PDpMlmWIiD6R82HQKCKoi4QIOwoYBx+zOsnd+EoO8NKPLZP7FGExuoTLutrY2b79Sk4XbQQBHpNm+XyLQVyJB8fi1G4cPpPvj9nXb4zeRliF7chUufcDacWQly5MYlZp/uOhe3evVqBiUryeiJePyPFDiWV9HXERG3+AXc5i/CW3cuwfaJFtg13gxPTbRC19BRsA5xjvavJaD4jpMygIer5k4mk1w4HGYKfYMDP5PVEjrlK8CR/hw+0JfBDf1FOBgegs3BRthBgC+EWqEh1IHRfx/F4lHOQVd3A2/DS1Xgvn37OI/HM/9zka5K/u6ECX+/nkbH3FlocBfhXHgKtgZPwa+DzfCLsc/hd5FOyLVsxfKJJTZ4PwCpkDlWBS5fvpzbtm1b1T6qkD+RUAE+7s7YJ2/m8ZH+PJy+lYXfhi7AL0eb4PVQB3w64wMhEwCzEGH20TRteMO61ZWdcOnSpeycyWbZjUQktEMxMXzSl3UO9WbwCXcGX/tWgF4+CqNyGgyy5xBvUe/gglTCkfg0mIaBEonptXc7DikbsmGxPHa0X3hEVlQ+MFuGiZTqSJqOSSUw28iCDHpZxwVRhGw+j8ulEi4IglWpGukPCxrt2bNnq/YRi8Urc76j7sZZVcaaXoZUNkfqXHIMoqhM9muT1DptP3SQxW2owmpra9nZmrNPNjnzvkIMPcinkG2YmOzXDtn0bcs0Hcep7rg0d4g02DuhUKjp8OHDjy9QSB5wsViM5XHQ692ok16ITMsmaqoAWs9kakVSrjdGR0c/PHfu3AvLli370QP/g9hLGkVvb2/VPmSR/BRCylEnFTQeiUT+3dPTs2f//v2PkeeL742lqVqw+9GxYsWK+c2f5fH48eOb3G7335qamrbSx98DsE49D9q5c+f9CletWsXNf+W+r91VwQAHDx7kVq5cyblcLubjxYsXs3X4Dqna94e+5CA6AAAAAElFTkSuQmCC","aspectRatio":0.6549828178694158,"src":"/static/2977433f18ad1345d63288f8a5747fd8/ddf78/phone1.png","srcSet":"/static/2977433f18ad1345d63288f8a5747fd8/a8610/phone1.png 50w,\n/static/2977433f18ad1345d63288f8a5747fd8/06786/phone1.png 100w,\n/static/2977433f18ad1345d63288f8a5747fd8/ddf78/phone1.png 200w,\n/static/2977433f18ad1345d63288f8a5747fd8/a37fa/phone1.png 300w,\n/static/2977433f18ad1345d63288f8a5747fd8/2a20f/phone1.png 400w,\n/static/2977433f18ad1345d63288f8a5747fd8/707ed/phone1.png 600w,\n/static/2977433f18ad1345d63288f8a5747fd8/d4161/phone1.png 953w","sizes":"(max-width: 200px) 100vw, 200px"}}}}}AppInsightsDemos/InvestmentManagerBasic { "ConnectionStrings": { "InvestmentDatabase": "Server=localhost\\SQLEXPRESS;Database=StockData;Trusted_Connection=True;MultipleActiveResultSets=true;Application Name=Investment Manager", "MiniProfilerDatabase": "Server=localhost\\SQLEXPRESS;Database=Miniprofiler;Trusted_Connection=True;Application Name=Investment Manager MiniProfiler" }, "StockIndexServiceUrl": "http://localhost:52505", "Logging": { "LogLevel": { "Default": "Warning" } }, "AllowedHosts": "*", "ApplicationInsights": { "InstrumentationKey": "" } }{"packages":{"wpackagist-plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon":{"1.0":{"name":"wpackagist-plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon","version":"1.0","version_normalized":"1.0.0.0","uid":156540,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon.1.0.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","reference":"tags\/1.0"},"homepage":"https:\/\/wordpress.org\/plugins\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.1":{"name":"wpackagist-plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon","version":"1.1","version_normalized":"1.1.0.0","uid":156541,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon.1.1.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","reference":"tags\/1.1"},"homepage":"https:\/\/wordpress.org\/plugins\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"2.0b":{"name":"wpackagist-plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon","version":"2.0b","version_normalized":"2.0.0.0-beta","uid":156542,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon.2.0b.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","reference":"tags\/2.0b"},"homepage":"https:\/\/wordpress.org\/plugins\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"2.1":{"name":"wpackagist-plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon","version":"2.1","version_normalized":"2.1.0.0","uid":156543,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon.2.1.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","reference":"tags\/2.1"},"homepage":"https:\/\/wordpress.org\/plugins\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"dev-trunk":{"name":"wpackagist-plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon","version":"dev-trunk","version_normalized":"9999999-dev","uid":156544,"time":"2007-08-31 10:20:35","dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/google-sitemap-generator-ultimate-tag-warrior-tags-addon.zip?timestamp=1188555635"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","reference":"trunk"},"homepage":"https:\/\/wordpress.org\/plugins\/google-sitemap-generator-ultimate-tag-warrior-tags-addon\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"}}}}{ "name": "artycles", "version": "0.3.5", "description": "Media API automation by generating different resolutions and (optionally) storing in the cloud", "author": "makesites", "contributors": [ { "name": "", "email": "" } ], "homepage": "http://makesites.org/projects/artycles", "keywords": [ "node", "content", "create", "aws" ], "repository": { "type": "git", "url": "https://github.com/makesites/artycles" }, "bugs": { "url": "https://github.com/makesites/artycles/issues" }, "license": "Apache-2.0", "licenses": [ { "type": "Apache License, Version 2.0", "url": "http://makesites.org/licenses/APACHE-2.0" } ], "main": "./index.js", "engines": { "node": ">=10.x.x" }, "dependencies": { "async": "2.6.2", "fluent-ffmpeg": "2.1.2", "grunt": "1.0.4", "grunt-responsive-images": "1.10.1", "grunt-responsive-videos": "0.1.3", "image-size": "0.7.4", "s3": "4.4.0" }, "devDependencies": { "request": "2.88.0" } } {"id": 3557, "title": "Ticket #3557: Allow delegated proxies to adjust their allowable clock skew", "description": "
\nallow the the administrator to specify an allowable amount of clock skew for delegated proxies.\n\n

in the condor_config, GSI_DELEGATION_CLOCK_SKEW_ALLOWABLE controls the number of seconds allowed. if unset, or equal to zero, the default is used. (which as of 2013-03-27 is five minutes).

", "remarks": "
\n
", "derived_tickets": "", "attachments": "", "check_ins": "\n\n\n\n\n\n\n\n\n\n\n\n\n
2013-May-28 13:15\n\u00a0 \nCheck-in [35892]: add defn of knob GSI_DELEGATION_CLOCK_SKEW_ALLOWABLE ===GT=== #3557 (By )
2013-May-08 15:28\n\u00a0 \nCheck-in [35668]: edit 7.9.6 version history item, and move it to section about new knobs ===GT=== #3557 (By )
2013-May-08 11:26\n\u00a0 \nCheck-in [35654]: add version history for #3557 (By )
2013-Mar-27 19:34\n\u00a0 \nCheck-in [35287]: add the ability to adjust clock skew to GSI delegations. #3557 (By )
", "type": "enhance", "last_change": "2013-Jun-17 14:56", "status": "resolved", "created": "2013-Mar-27 19:34", "fixed_version": "2013-Mar-27 19:34", "broken_version": "", "priority": "3", "subsystem": "Security", "assigned_to": "zmiller", "derived_from": "", "creator": "zmiller", "rust": "", "customer_group": "cms", "visibility": "public", "notify": "", "due_date": "20130327"}{ "name": "no_wifi", "url": "/emojis/6106-no_wifi/download", "filename": "no_wifi.gif", "base64": " }kba/ocrd_tesserocr { "git_url": "https://github.com/OCR-D/ocrd_tesserocr", "dockerhub": "ocrd/tesserocr", "tools": [ { "tags": ["Layout analysis"], "description": "Segment page into regions with tesseract", "executable": "ocrd-tesserocr-segment-line", "step": "layout/segmentation/line" }, { "tags": ["Layout analysis"], "description": "Segment regions into lines with tesseract", "executable": "ocrd-tesserocr-segment-region", "step": "layout/segmentation/region" }, { "tags": ["Text recognition and optimization"], "description": "Recognize text in lines with tesseract", "executable": "ocrd-tesserocr-recognize", "step": "recognition/text-recognition", "parameter": { "textequiv_level": { "type": "string", "enum": [ "page", "region", "line", "word", "glyph" ], "default": "line" } } } ] } 0 {"em":[{"location":{"latitude":51.46,"longitude":5.95}},{"location":{"latitude":52.35,"longitude":5.82}},{"location":{"latitude":53.33,"longitude":6.52}},{"location":{"latitude":52.72,"longitude":4.67}},{"location":{"latitude":52.13,"longitude":4.39}},{"location":{"latitude":53.27,"longitude":6.73}}],"bites":[{"location":{"latitude":51.74,"longitude":3.77}},{"location":{"latitude":52.26,"longitude":7}},{"location":{"latitude":52.84,"longitude":6.85}},{"location":{"latitude":52.06,"longitude":5.17}},{"location":{"latitude":52.42,"longitude":4.71}},{"location":{"latitude":52.42,"longitude":4.88}},{"location":{"latitude":52.2,"longitude":4.51}},{"location":{"latitude":52.89,"longitude":4.75}},{"location":{"latitude":52.09,"longitude":5.73}},{"location":{"latitude":52.39,"longitude":5.93}},{"location":{"latitude":52.29,"longitude":6.18}},{"location":{"latitude":52.36,"longitude":5.68}},{"location":{"latitude":53.09,"longitude":6.7}},{"location":{"latitude":52.65,"longitude":4.72}},{"location":{"latitude":52.21,"longitude":5.75}},{"location":{"latitude":52.39,"longitude":4.95}},{"location":{"latitude":53.34,"longitude":6.43}},{"location":{"latitude":52.27,"longitude":6.78}},{"location":{"latitude":53.41,"longitude":5.35}},{"location":{"latitude":52.15,"longitude":5.2}}],"other":[{"location":{"latitude":52.15,"longitude":7.56}}],"fever":[{"location":{"latitude":52.1,"longitude":5.17}}]}{ "user": "cortadoverde", "repos": 1, "login": "cortadoverde", "id": 4176494, "avatar_url": "https://avatars1.githubusercontent.com/u/4176494?v=3", "url": "https://api.github.com/users/cortadoverde", "html_url": "https://github.com/cortadoverde", "followers_url": "https://api.github.com/users/cortadoverde/followers", "following_url": "https://api.github.com/users/cortadoverde/following{/other_user}", "gists_url": "https://api.github.com/users/cortadoverde/gists{/gist_id}", "starred_url": "https://api.github.com/users/cortadoverde/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cortadoverde/subscriptions", "organizations_url": "https://api.github.com/users/cortadoverde/orgs", "repos_url": "https://api.github.com/users/cortadoverde/repos", "events_url": "https://api.github.com/users/cortadoverde/events{/privacy}", "received_events_url": "https://api.github.com/users/cortadoverde/received_events", "type": "User", "site_admin": false, "name": "", "company": null, "blog": null, "location": "Villa Maria, Cordoba, Argentina", "email": null, "hireable": true, "bio": "Dev", "public_repos": 17, "public_gists": 18, "followers": 1, "following": 2, "created_at": "2013-04-17T01:32:44Z", "updated_at": "2017-02-07T04:52:31Z" }node_modules/.cache/babel-loader/2c15a133e33e3cb536007c69c7b71ff2.json {"ast":null,"code":"import { jsxDEV as _jsxDEV } from \"react/jsx-dev-runtime\";\nimport { Fragment as _Fragment } from \"react/jsx-dev-runtime\";\nvar _jsxFileName = \"D:\\\\React_Project\\\\algorithm_app\\\\src\\\\Components\\\\pages\\\\Navbar.js\";\nimport react from 'react';\nimport { Link } from 'react-router-dom';\nimport { FaBars, FaTimes } from 'react-icons/fa';\nimport { MdFingerprint } from 'react-icons/md';\n\nfunction Navbar() {\n return /*#__PURE__*/_jsxDEV(_Fragment, {\n children: /*#__PURE__*/_jsxDEV(\"div\", {\n className: \"navbar\",\n children: /*#__PURE__*/_jsxDEV(\"div\", {\n className: \"navbar-container container\",\n children: [/*#__PURE__*/_jsxDEV(Link, {\n to: \"/\",\n classname: \"navbar-logo\",\n children: [/*#__PURE__*/_jsxDEV(MdFingerprint, {\n className: \"navbar-icon\"\n }, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 12,\n columnNumber: 25\n }, this), \"LAVA\"]\n }, void 0, true, {\n fileName: _jsxFileName,\n lineNumber: 11,\n columnNumber: 21\n }, this), /*#__PURE__*/_jsxDEV(\"div\", {\n className: \"menu-icon\",\n children: click ? /*#__PURE__*/_jsxDEV(FaTimes, {}, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 16,\n columnNumber: 30\n }, this) : /*#__PURE__*/_jsxDEV(FaBars, {}, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 16,\n columnNumber: 43\n }, this)\n }, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 15,\n columnNumber: 21\n }, this)]\n }, void 0, true, {\n fileName: _jsxFileName,\n lineNumber: 10,\n columnNumber: 17\n }, this)\n }, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 9,\n columnNumber: 13\n }, this)\n }, void 0, false);\n}\n\n_c = Navbar;\nexport default Navbar;\n\nvar _c;\n\n$RefreshReg$(_c, \"Navbar\");","map":{"version":3,"sources":["D:/React_Project/algorithm_app/src/Components/pages/Navbar.js"],"names":["react","Link","FaBars","FaTimes","MdFingerprint","Navbar","click"],"mappings":";;;AAAA,OAAOA,KAAP,MAAkB,OAAlB;AACA,SAASC,IAAT,QAAqB,kBAArB;AACA,SAASC,MAAT,EAAiBC,OAAjB,QAAgC,gBAAhC;AACA,SAASC,aAAT,QAA8B,gBAA9B;;AAEA,SAASC,MAAT,GAAiB;AACb,sBACI;AAAA,2BACI;AAAK,MAAA,SAAS,EAAG,QAAjB;AAAA,6BACI;AAAK,QAAA,SAAS,EAAG,4BAAjB;AAAA,gCACI,QAAC,IAAD;AAAM,UAAA,EAAE,EAAG,GAAX;AAAe,UAAA,SAAS,EAAG,aAA3B;AAAA,kCACI,QAAC,aAAD;AAAe,YAAA,SAAS,EAAG;AAA3B;AAAA;AAAA;AAAA;AAAA,kBADJ;AAAA;AAAA;AAAA;AAAA;AAAA,gBADJ,eAKI;AAAK,UAAA,SAAS,EAAC,WAAf;AAAA,oBACCC,KAAK,gBAAG,QAAC,OAAD;AAAA;AAAA;AAAA;AAAA,kBAAH,gBAAgB,QAAC,MAAD;AAAA;AAAA;AAAA;AAAA;AADtB;AAAA;AAAA;AAAA;AAAA,gBALJ;AAAA;AAAA;AAAA;AAAA;AAAA;AADJ;AAAA;AAAA;AAAA;AAAA;AADJ,mBADJ;AAeH;;KAhBQD,M;AAkBT,eAAeA,MAAf","sourcesContent":["import react from 'react'\r\nimport { Link } from 'react-router-dom'\r\nimport { FaBars, FaTimes } from 'react-icons/fa'\r\nimport { MdFingerprint } from 'react-icons/md'\r\n\r\nfunction Navbar(){\r\n return(\r\n <>\r\n
\r\n
\r\n \r\n \r\n LAVA\r\n \r\n
\r\n {click ? : }\r\n
\r\n
\r\n
\r\n \r\n )\r\n}\r\n\r\nexport default Navbar;"]},"metadata":{},"sourceType":"module"}10-100 { "user": "Xaeonn", "repos": 1, "login": "Xaeonn", "id": 2415366, "avatar_url": "https://avatars0.githubusercontent.com/u/2415366?v=3", "url": "https://api.github.com/users/Xaeonn", "html_url": "https://github.com/Xaeonn", "followers_url": "https://api.github.com/users/Xaeonn/followers", "following_url": "https://api.github.com/users/Xaeonn/following{/other_user}", "gists_url": "https://api.github.com/users/Xaeonn/gists{/gist_id}", "starred_url": "https://api.github.com/users/Xaeonn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Xaeonn/subscriptions", "organizations_url": "https://api.github.com/users/Xaeonn/orgs", "repos_url": "https://api.github.com/users/Xaeonn/repos", "events_url": "https://api.github.com/users/Xaeonn/events{/privacy}", "received_events_url": "https://api.github.com/users/Xaeonn/received_events", "type": "User", "site_admin": false, "name": "", "company": null, "blog": "sam.boles.eu", "location": "Dublin, Ireland", "email": null, "hireable": null, "bio": null, "public_repos": 12, "public_gists": 1, "followers": 5, "following": 5, "created_at": "2012-09-24T22:08:42Z", "updated_at": "2017-02-22T09:38:36Z" }{ "name": "dawn-breaker-js13k-2019", "version": "1.0.0", "main": "index.js", "repository": "https://github.com/SouthpawGoblin/dawn-breaker-js13k-2019.git", "author": "Yinzhe-Qi <>", "license": "MIT", "private": true, "scripts": { "serve": "webpack-dev-server --open --config webpack.dev.js", "build": "webpack --config webpack.prod.js" }, "dependencies": { "kontra": "^6.2.3" }, "devDependencies": { "clean-webpack-plugin": "^3.0.0", "html-webpack-plugin": "^3.2.0", "webpack": "^4.39.2", "webpack-cli": "^3.3.6", "webpack-dev-server": "^3.8.0", "webpack-merge": "^4.2.1" } } {"number":"52.214-26","title":"Audit and Records—Sealed Bidding.","children":[{"text":"As prescribed in 14.201-7(a)(1), insert the following clause:","type":"paragraph","level":null},{"text":"Audit and Records—Sealed Bidding (Oct 2010)","type":"paragraph","level":null},{"text":"(a) As used in this clause, “records” includes books, documents, accounting procedures and practices, and other data, regardless of type and regardless of whether such items are in written form, in the form of computer data, or in any other form.","type":"paragraph","level":null},{"text":"(b) Certified cost or pricing data. If the Contractor has been required to submit certified cost or pricing data in connection with the pricing of any modification to this contract, the Contracting Officer, or an authorized representative of the Contracting Officer, in order to evaluate the accuracy, completeness, and currency of the certified cost or pricing data, shall have the right to examine and audit all of the Contractor’s records, including computations and projections, related to—","type":"paragraph","level":null},{"text":"(1) The proposal for the modification;","type":"outline","level":1},{"text":"(2) The discussions conducted on the proposal(s), including those related to negotiating;","type":"outline","level":1},{"text":"(3) Pricing of the modification; or","type":"outline","level":1},{"text":"(4) Performance of the modification.","type":"outline","level":1},{"text":"(c) Comptroller General. In the case of pricing any modification, the Comptroller General of the United States, or an authorized representative, shall have the same rights as specified in paragraph (b) of this clause and also the right to interview any current employee regarding such transactions.","type":"paragraph","level":null},{"text":"(d) Availability. The Contractor shall make available at its office at all reasonable times the materials described in paragraph (b) of this clause, for examination, audit, or reproduction, until 3 years after final payment under this contract, or for any other period specified in Subpart 4.7 of the Federal Acquisition Regulation (FAR). FAR Subpart 4.7, Contractor Records Retention, in effect on the date of this contract, is incorporated by reference in its entirety and made a part of this contract.","type":"paragraph","level":null},{"text":"(1) If this contract is completely or partially terminated, the records relating to the work terminated shall be made available for 3 years after any resulting final termination settlement.","type":"outline","level":1},{"text":"(2) Records pertaining to appeals under the Disputes clause or to litigation or the settlement of claims arising under or relating to the performance of this contract shall be made available until disposition of such appeals, litigation, or claims.","type":"outline","level":1},{"text":"(e) The Contractor shall insert a clause containing all the provisions of this clause, including this paragraph (e), in all subcontracts expected to exceed the threshold in FAR 15.403-4(a)(1) for submission of certified cost or pricing data.","type":"paragraph","level":null},{"text":"(End of clause)","type":"paragraph","level":null},{"text":"Alternate I (Mar 2009). As prescribed in 14.201-7(a)(2), substitute the following paragraphs (c) and (e) for paragraphs (c) and (e) of the basic clause:","type":"paragraph","level":null},{"text":"(c) The Comptroller General of the United States, an appropriate Inspector General appointed under section 3 or 8G of the Inspector General Act of 1978 (5 U.S.C. App.), or an authorized representative of either of the foregoing officials, shall have access to and the right to—","type":"paragraph","level":null},{"text":"(1) Examine any of the Contractor’s or any subcontractors' records that pertain to, and involve transactions relating to, this contract or a subcontract hereunder; and","type":"paragraph","level":null},{"text":"(2) Interview any officer or employee regarding such transactions.","type":"paragraph","level":null},{"text":"(e)(1) Except as provided in paragraph (e)(2), the Contractor shall insert a clause containing the provisions of this clause, including this paragraph (e), in all subcontracts.","type":"paragraph","level":null},{"text":"(2) The authority of the Inspector General under paragraph (c)(2) of this clause does not flow down to subcontracts.","type":"paragraph","level":null}]} data/nber/3251.json { "id": 3251, "citation_title": "Unemployment and the Demand for Unions", "citation_author": [ "", "", "", "" ], "citation_publication_date": "1990-02-01", "issue_date": "1990-02-01", "revision_date": "None", "topics": null, "program": [ "Labor Studies" ], "projects": null, "working_groups": null, "abstract": "\n\nWhy do people join open-shop unions when they would receive union wage rates even if they were not members? Why are unionization rates so low in the south-east of England? To address these questions, which we treat as interrelated, the paper considers the idea that unions offer insurance against victimization and arbitrary dismissal. Consistent with our theoretical approach, we find that union density is greatest, ceteris paribus, within establishments in areas of high unemployment.\n\n", "acknowledgement": "\n" }{"derivation": "from G1 (\u1f00\u03c0\u03ac\u03c4\u03c9\u03c1) (as a negative particle) and G3962 (\u1f00\u03c0\u03ac\u03c4\u03c9\u03c1);", "kjv_def": "without father", "lemma": "\u1f00\u03c0\u03ac\u03c4\u03c9\u03c1", "frequency": 1, "strongs_def": " fatherless, i.e. of unrecorded paternity", "outline": "
  1. whose father is not recorded in the genealogies
"}{"title": "Efficient transaction processing in SAP HANA database: the end of a column store myth.", "fields": ["scalability", "external data representation", "data management", "sap hana", "transaction processing"], "abstract": "The SAP HANA database is the core of SAP's new data management platform. The overall goal of the SAP HANA database is to provide a generic but powerful system for different query scenarios, both transactional and analytical, on the same data representation within a highly scalable execution environment. Within this paper, we highlight the main features that differentiate the SAP HANA database from classical relational database engines. Therefore, we outline the general architecture and design criteria of the SAP HANA in a first step. In a second step, we challenge the common belief that column store data structures are only superior in analytical workloads and not well suited for transactional workloads. We outline the concept of record life cycle management to use different storage formats for the different stages of a record. We not only discuss the general concept but also dive into some of the details of how to efficiently propagate records through their life cycle and moving database entries from write-optimized to read-optimized storage formats. In summary, the paper aims at illustrating how the SAP HANA database is able to efficiently work in analytical as well as transactional workload environments.", "citation": "Citations (168)", "year": "2012", "departments": ["SAP, Palo Alto, CA, USA", "SAP, Walldorf, Germany", "SAP, Walldorf, Germany", "SAP, Seoul, South Korea", "SAP, Walldorf, Germany"], "conf": "sigmod", "authors": [".....http://dblp.org/pers/hd/s/Sikka:Vishal", "\u00e4rber.....http://dblp.org/pers/hd/f/F=auml=rber:Franz", ".....http://dblp.org/pers/hd/l/Lehner:Wolfgang", ".....http://dblp.org/pers/hd/c/Cha:Sang_Kyun", ".....http://dblp.org/pers/hd/p/Peh:Thomas", "\u00f6vd.....http://dblp.org/pers/hd/b/Bornh=ouml=vd:Christof"], "pages": 12}0 { "author": { "id": "t2_j8fsx", "name": "TheFlyingMinotaur" }, "date": { "day": 1535155200, "full": 1535158529, "month": 1533081600, "week": 1534636800 }, "id": "t3_9a2v5f", "misc": { "postHint": "image" }, "picture": { "filesize": 117976, "fullUrl": "https://preview.redd.it/9vydww0a05i11.png?auto=webp&s=10463d03af6f0ea21a5199ca3ca26cb92e8d3c8e", "hash": "8c676f6c78", "height": 505, "lqip": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAANCAIAAAAv2XlzAAAACXBIWXMAAAsTAAALEwEAmpwYAAABe0lEQVQokW2S3a6qMBCFff/XMVGCMRqz04bSYkkBKQoVR08MEIw/FyBQd7bseOI5fpczs7KmXTN4fKLrurZtP7YG/4/u9/vdbpckSV3Xj8dDa/2v4FUKggAhxBgjhJimeT6f+27Pm0NVVVpr/kQpxRjDGDuOs16vT6fTm0PbtlEUOY6TZZllWV9PMMZSSkqplHK1WgFAnue/gvv9bhgGQohzTil1XVcIgTEejUaWZTHGfN/3PI9S2jTN70pKKdM0EUIA8OdJmqbL5XI6nQ6HQ8MwFotFkiT9e34EWZYJIeI43m63cRxzzhljs9ksDEPXdcfjMULI8zxCSFEUA611VVVSSoyx53kA4Pu+Usp1XUIIPFFKAcB8Pr/dbj8OZVmGYVgUhRCCEBIEwWazAQAhxGQyCYLAsqwkSWzbbprmbw6Hw0FKWZalbduU0jzP0zTlnB+Px+v12v/7W9L9LQBAFEVlWQLA5XKp6/pD0i+01l3X9RNt23Zd96q/BN8qkTUSnNuFMAAAAABJRU5ErkJggg==", "thumbnailUrl": "https://b.thumbs.redditmedia.com/Z-v-OWh0Kbc-AveiQmix91MdWHrrojppw5IwwNTiXfU.jpg", "url": "https://preview.redd.it/9vydww0a05i11.png?width=640&crop=smart&auto=webp&s=cbd9468038a38bb21bd0f73a1a2359d094b192c3", "width": 640 }, "score": { "comments": 7, "downs": 0, "isCurated": false, "ratio": 1, "ups": 100, "value": 100 }, "subreddit": { "id": "t5_3isai", "name": "dndmaps" }, "tags": ["City"], "title": "Isometric Port Town", "url": "https://www.reddit.com/r/dndmaps/comments/9a2v5f/isometric_port_town/" } public/json/words/greek-205.json {"pos":"n","translits":{"akrothiniōn":{"heb.7.4|11":["the","best spoils,","the"]}},"meanings":{"good":1},"meaningsCount":1,"occurences":1}// VDMS Config File // This is the run-time config file // Sets database paths and other parameters { // Network "port": 55555, // Database paths "pmgd_path": "simpleAdd_db", "more-info": "github.com/IntelLabs/vdms" } emperor06/EmuTarkov-Server { "Name": "", "EntryPoints": "MallSE,MallNW", "Chance": 66, "MinTime": 0, "MaxTime": 0, "PlayersCount": 4, "ExfiltrationTime": 60, "PassageRequirement": "TransferItem", "ExfiltrationType": "SharedTimer", "Id": "5449016a4bdc2d6f028b456f", "Count": 3000, "RequirementTip": "EXFIL_Item" }.vscode/launch.json { // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ { "type": "node", "name": "jest-tests", "request": "launch", "args": ["--runInBand"], "cwd": "${workspaceFolder}", "console": "internalConsole", "internalConsoleOptions": "openOnSessionStart", "program": "${workspaceFolder}/node_modules/jest/bin/jest" }, { "type": "node", "name": "test-selectors", "request": "launch", "args": ["selectors", "--runInBand"], "cwd": "${workspaceFolder}", "console": "internalConsole", "internalConsoleOptions": "openOnSessionStart", "program": "${workspaceFolder}/node_modules/jest/bin/jest" }, { "type": "node", "name": "test-manifest", "request": "launch", "args": ["manifest", "--runInBand"], "cwd": "${workspaceFolder}", "console": "internalConsole", "internalConsoleOptions": "openOnSessionStart", "program": "${workspaceFolder}/node_modules/jest/bin/jest" }, { "type": "node", "name": "test-clip-selector", "request": "launch", "args": ["clip-selector", "--runInBand"], "cwd": "${workspaceFolder}", "console": "internalConsole", "internalConsoleOptions": "openOnSessionStart", "program": "${workspaceFolder}/node_modules/jest/bin/jest" }, { "type": "node", "name": "test-html", "request": "launch", "args": ["html.rollup", "--runInBand"], "cwd": "${workspaceFolder}", "console": "internalConsole", "internalConsoleOptions": "openOnSessionStart", "program": "${workspaceFolder}/node_modules/jest/bin/jest" }, { "type": "node", "name": "test-async-iife", "request": "launch", "args": ["async-iife", "--runInBand"], "cwd": "${workspaceFolder}", "console": "internalConsole", "internalConsoleOptions": "openOnSessionStart", "program": "${workspaceFolder}/node_modules/jest/bin/jest" }, { "type": "node", "name": "test-complete", "request": "launch", "args": ["complete", "--runInBand"], "cwd": "${workspaceFolder}", "console": "internalConsole", "internalConsoleOptions": "openOnSessionStart", "program": "${workspaceFolder}/node_modules/jest/bin/jest" }, { "type": "node", "name": "test-validation", "request": "launch", "args": ["validation", "--runInBand"], "cwd": "${workspaceFolder}", "console": "internalConsole", "internalConsoleOptions": "openOnSessionStart", "program": "${workspaceFolder}/node_modules/jest/bin/jest" } ] } 1-10 "Good afternoon, young man. Need help with a good cause. As you have long guessed, I collect food, medicine, and other necessities in order to evacuate the people. I don't want to deceive you, this is true. And now they have enough reserves for the road. And most importantly, through my channels, I’ve managed to negotiate on the other side for these people to be met let through. Can you imagine? But only the civilian population, no men under 60. Therefore, the first wave will mostly be children with mothers and a few elderly people. Almost everything is ready, except for one little detail - we need transportation. We have several vehicles, just enough to carry everyone from the first wave, but they are not running. They have been idle for too long, as I was told, and need batteries and spark plugs. Even fuel is not necessary. Four batteries and 8 plugs. Can you get that?"{ "directions": [ "Place black beans into a large container and cover with several inches of cool water; let stand 8 hours to overnight. Drain beans and rinse with fresh water.", "Heat oil in a large Dutch oven or stockpot over medium heat; cook and stir yellow onion, paprika, and cumin seeds until fragrant, about 3 minutes. Add red bell pepper, green bell pepper, garlic, oregano, and bay leaves; cook and stir until onion is translucent and bell peppers are tender, 6 to 8 minutes.", "Mix vegetable stock, black beans, tempeh, rum, and jalapeno pepper into onion mixture. Increase heat to medium-high, bring to a simmer, reduce heat to medium-low, and cook until beans are tender, about 45 minutes.", "Pour water into bean mixture; season with 1 1/2 teaspoons salt. Cook, stirring occasionally, until beans begin to fall apart and soup thickens, 30 to 40 minutes; season with salt and pepper. Remove and discard bay leaves from soup.", "Ladle soup into bowls and garnish with hard-boiled eggs and red onion." ], "image": "https://images.media-allrecipes.com/userphotos/560x315/3887871.jpg", "ingredients": [ "1 pound dried black beans", "1/4 cup vegetable oil", "1 large yellow onion, finely chopped", "1 tablespoon smoked paprika", "2 teaspoons cumin seeds", "1 red bell pepper, diced", "1 green bell pepper, diced", "8 cloves garlic, minced", "2 teaspoons dried Mexican oregano, crushed between your fingers", "2 bay leaves", "6 cups vegetable stock", "1 cup diced smoked tempeh bacon (such as Lightlife\u00ae Organic Smoky Tempeh Strips\u00ae)", "2 tablespoons dark rum (optional)", "1 jalape\u00f1o pepper, seeded and chopped", "2 cups water", "1 1/2 teaspoons salt", "salt and freshly ground black pepper to taste", "1/2 cup sliced hard-boiled eggs (optional)", "1/2 cup finely chopped red onion" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Smokey Vegetarian Cuban Black Bean Soup", "url": "http://allrecipes.com/recipe/237345/smokey-vegetarian-cuban-black-bean-so/" }{ "name": "bootstrap-example", "version": "1.0.0", "lockfileVersion": 1, "requires": true, "dependencies": { "bootstrap": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-4.0.0.tgz", "integrity": " }, "bootstrap-social": { "version": "4.8.0", "resolved": "https://registry.npmjs.org/bootstrap-social/-/bootstrap-social-4.8.0.tgz", "integrity": "sha1-ZtRj3JZtbbQH37mTNR1YxTSqLHo=", "requires": { "bootstrap": "3.3.7", "font-awesome": "4.3.0" }, "dependencies": { "bootstrap": { "version": "3.3.7", "resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-3.3.7.tgz", "integrity": "sha1-WjiTlFSfIzMIdaOxUGVldPip63E=" }, "font-awesome": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/font-awesome/-/font-awesome-4.3.0.tgz", "integrity": "sha1-RO63kM35hmQnhvM/znhHZPGEHEA=" } } }, "datatables": { "version": "1.10.13", "resolved": "https://registry.npmjs.org/datatables/-/datatables-1.10.13.tgz", "integrity": "sha1-m7Lexvfc8CBJoA5PDn0/4AnDk0Y=", "requires": { "jquery": "3.3.1" } }, "datatables-responsive": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/datatables-responsive/-/datatables-responsive-1.0.7.tgz", "integrity": "sha1-2Q5FeB3CF5+dIA37fwjv9JIJb84=", "requires": { "datatables": "1.10.13", "jquery": "3.3.1" } }, "datatables.net-plugins": { "version": "1.10.15", "resolved": "https://registry.npmjs.org/datatables.net-plugins/-/datatables.net-plugins-1.10.15.tgz", "integrity": "sha1-kG3uv/dQEKtMJ1+AWLS2DSyPMVc=" }, "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" }, "flot": { "version": "0.8.0-alpha", "resolved": "https://registry.npmjs.org/flot/-/flot-0.8.0-alpha.tgz", "integrity": "sha1-nLvHFHwQpH0lSduQvSmH7BunhLo=" }, "font-awesome": { "version": "4.6.3", "resolved": "https://registry.npmjs.org/font-awesome/-/font-awesome-4.6.3.tgz", "integrity": "sha1-hpM2UVQO4Ah0xmQBf1DyFy9lMaI=" }, "holderjs": { "version": "2.9.4", "resolved": "https://registry.npmjs.org/holderjs/-/holderjs-2.9.4.tgz", "integrity": "sha1-oWXbwlMuv/l1SRMSJZ0MUgwFmJg=", "requires": { "shaven": "0.8.1" } }, "jquery": { "version": "3.3.1", "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.3.1.tgz", "integrity": " }, "jquery.flot.tooltip": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/jquery.flot.tooltip/-/jquery.flot.tooltip-0.9.0.tgz", "integrity": "sha1-rha/lLJsLtmrTbFnu6Ut/bYVwd8=" }, "metismenu": { "version": "2.7.4", "resolved": "https://registry.npmjs.org/metismenu/-/metismenu-2.7.4.tgz", "integrity": " }, "morris.js06": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/morris.js06/-/morris.js06-0.6.1.tgz", "integrity": "sha1- }, "popper.js": { "version": "1.12.9", "resolved": "https://registry.npmjs.org/popper.js/-/popper.js-1.12.9.tgz", "integrity": "sha1-DfvC3/lsRRuzMu3Pz6r1ZtMx1bM=" }, "shaven": { "version": "0.8.1", "resolved": "https://registry.npmjs.org/shaven/-/shaven-0.8.1.tgz", "integrity": "sha1-D0LU+zLEBINJ/57pwpb17/styq8=", "requires": { "escape-html": "1.0.3" } }, "tether": { "version": "1.4.3", "resolved": "https://registry.npmjs.org/tether/-/tether-1.4.3.tgz", "integrity": "sha512-YCfE/ } } } { "id": "d1688-0", "text": "OFFICERS\nEXECUTIVE COMMITTEE\nPRESIDENT! , WILL\nVice-President: , WOI\nIOWA STATE COLLEGE\nAMES, IOWA\nSecretary-Treasurer\, KUSD\nUNIVERSITY OF SOUTH DAKOTA\nVERMILLION. SOUTH DAKOTA\nOffice of Executive Secretary\nT. M. BEAIRD, WNAD\nUNIVERSITY OF OKLAHOMA\nof\nCollege and Univ'e/^ity\nBr-\nFebruary 7, 1933\nSPECIAL BULLETIN\nFIRST ZONE: , WCAC\nCONNECTICUT AGRICULTURAL COLLEGE\nStorrs, Connecticut\nSECOND ZONE: , WJBU\nBUCKNELL UNIVERSITY\nLEWISBURQ, PENNSYLVANIA\nTHIRD Zone: GARLAND POWELL, WRUF\nUNIVERSITY OF FLORIDA\nGainesville, Florida\nFOURTH Zone: H. G. INGHAM, KFKU\nLawrence, Kansas\nFifth zone: h. V. CARPENTER, KWSC\nSTATE C □ LLE □ E GF WASHINGTON\nPULLMAN, WASHINGTON\nAt Large: R« C. HIGGY, WEAQ\nOHIO STATE UNIVERSITY\nTO MEMBERS\nof\nTHE ASSOCIATION OF COLLEGE AND UNIVERSITY BROADCASTING STATIONS\n(and educational officials cooperating with the Association)\nThe Associated Press reported from Washington on the afternoon of Feb¬\nruary 3 that President Hoover last Friday named Professor \nof Nebraska Wesleyan University to be a member of the Federal Radio Com¬\nmission. The Association of College and University Broadcasting Stations\nhas been working on this project for several months. We have now completed\nthe first major step—the naming on the part of the President of Professor\ndensen to the Commission. It is needless to state that it will be very\ndifficult to get Senate action on confirming appointments at the present\ntime.\nOfficials of the Association urge your immediate action in behalf of Pro¬\nfessor Jensen. Kindly wire your Senators, even though they have already\nendorsed Professor Jensen, to confirm his appointment. Due to limited\ntime and change in the Administration in the course of the next few days\nit is necessary that this action be followed immediately.\nOfficers of the Association desire that all members of our group have\nwires on the Senators’desk* not later than Monday, February 13. It is\nimperative that w act now. The hard part of the battle has been won,\nbut there is still much to do. Act t*d, President.\n(Signature authorized by wire)\nBulletin released by ," }{"artist_id":"AR6OQVR11C8A42CA37","artist_latitude":62.19845,"artist_location":"SWEDEN","artist_longitude":17.55142,"artist_name":"Kamchatka","duration":381.70077,"num_songs":1,"song_id":"SOQBUWA12AB018B2EA","title":"Whipping Post","year":2009}tasks/config/admin/templates/bower.json0 { "name" : "Component", "description" : "Component distribution", "homepage" : "http://www.Multiselect-Pega.com", "author": { "name" : "", "web" : "https://github.com/ghoshArnab/multiselect" }, "ignore": [ "./index.js" ], "keywords": [ "pegaMultiselect", "ui", "css3", "framework" ], "license" : [ "http://Multiselect-Pega.mit-license.org/" ], "ignore": [ "docs", "node", "server", "spec", "src", "test" ] } { "project_name": "BlackLabs/play-morphia", "files": { "/pom.xml": 2 }, "pull_request": "https://github.com/BlackLabs/play-morphia/pull/133", "report": { "files_fixed": 1, "vulnerabilities_fixed": 2 } }packages/components/tsconfig.json0 { "extends": "../../tsconfig.json", "exclude": ["node_modules"], "include": ["src", "types/jsx.d.ts"], "compilerOptions": { "target": "ESNext", "moduleResolution": "node", "esModuleInterop": true, "module": "ESNext", "declarationDir": "./typings", "jsx": "react", "jsxFactory": "h" }, "references": [ { "path": "../utils" } ] } {"name": "Walidpur", "lsg_code": "270654", "district": "Mau", "state": "Uttar Pradesh", "no_of_wards": 0, "wards": []}0 { "_from": "react-native-maps@^0.22.0", "_id": "react-native-maps@0.22.0", "_inBundle": false, "_integrity": "sha512-vGlmHIhsKHzJLhGkCti+Nus3hTVU1ssDK/zUeXtFx5/f6Ax2lHKDgaM1j29YR+qRvckaUal5LunPpGOyYs7QuQ==", "_location": "/react-native-maps", "_phantomChildren": {}, "_requested": { "type": "range", "registry": true, "raw": "react-native-maps@^0.22.0", "name": "react-native-maps", "escapedName": "react-native-maps", "rawSpec": "^0.22.0", "saveSpec": null, "fetchSpec": "^0.22.0" }, "_requiredBy": [ "#USER", "/" ], "_resolved": "https://registry.npmjs.org/react-native-maps/-/react-native-maps-0.22.0.tgz", "_shasum": "0e675120fa6a8b2459ac819edd6be968d07568df", "_spec": "react-native-maps@^0.22.0", "_where": "/Users/reginaldsapp/MyApp", "author": { "name": "", "email": "" }, "bugs": { "url": "https://github.com/airbnb/react-native-maps/issues" }, "bundleDependencies": false, "dependencies": { "babel-plugin-module-resolver": "^2.3.0", "babel-preset-react-native": "1.9.0" }, "deprecated": false, "description": "React Native Mapview component for iOS + Android", "devDependencies": { "babel-eslint": "^6.1.2", "babel-preset-airbnb": "^1.1.1", "eslint": "^3.3.1", "eslint-config-airbnb": "^10.0.1", "eslint-plugin-import": "^1.14.0", "eslint-plugin-jsx-a11y": "^2.1.0", "eslint-plugin-prefer-object-spread": "^1.1.0", "eslint-plugin-react": "^6.1.2", "gitbook-cli": "^2.3.0", "lodash": "^4.17.2", "prop-types": "^15.5.10", "react": "^16.3.2", "react-native": "^0.54" }, "homepage": "https://github.com/airbnb/react-native-maps#readme", "keywords": [ "react", "react-native", "react-component", "map", "mapview", "google-maps", "mapkit" ], "main": "index.js", "name": "react-native-maps", "peerDependencies": { "react": ">= 16.0 || < 17.0", "react-native": ">= 0.51", "prop-types": "^15.0 || ^16.0" }, "repository": { "type": "git", "url": "git+https://github.com/airbnb/react-native-maps.git" }, "rnpm": { "android": { "sourceDir": "./lib/android" } }, "scripts": { "build": "npm run build:js && npm run build:android && npm run build:ios", "build:android": "./gradlew :react-native-maps:assembleDebug", "build:ios": "bundle install --path ./example/ios/bundles && bundle exec pod install --project-directory=./example/ios/", "build:js": "exit 0", "ci": "npm run lint", "lint": "eslint ./", "preversion": "./scripts/update-version.js", "run:android": "./gradlew installDebug && npm run start:android", "run:ios": "react-native run-ios --project-path ./example/ios", "run:packager": "./node_modules/react-native/scripts/packager.sh", "start": "node node_modules/react-native/local-cli/cli.js start", "start:android": "adb shell am start -n com.airbnb.android.react.maps.example/.MainActivity" }, "version": "0.22.0" } { "name": "wno", "version": "0.0.2", "description": "Wonderful Number Operator", "main": "index.js", "scripts": { "test": "node ./node_modules/istanbul/lib/cli.js cover ./node_modules/mocha/bin/_mocha -- --timeout 500000 --slow 30 --recursive test/ --bail" }, "repository": { "type": "git", "url": "https://github.com/willin/node-wno.git" }, "maintainers": [ { "name": "", "email": "" } ], "keywords": [ "Number", "Operator", "Wonderful", "Views", "Visitors" ], "author": "", "license": "MIT", "bugs": { "url": "https://github.com/willin/node-wno/issues" }, "homepage": "https://github.com/willin/node-wno", "devDependencies": { "coveralls": "^2.11.2", "eslint": "^0.17.1", "istanbul": "^0.3.8", "mocha": "^2.2.1", "should": "^5.2.0" } } { "usingComponents": { "van-field": "../../dist_diy/field/index", "van-cell-group": "../../dist_diy/cell-group/index", "van-button": "../../dist_diy/button/index", "van-image": "../../dist_diy/image/index" } } pxy0113/lhds {"remainingRequest":"F:\\work\\lhds\\node_modules\\thread-loader\\dist\\cjs.js!F:\\work\\lhds\\node_modules\\babel-loader\\lib\\index.js!F:\\work\\lhds\\node_modules\\eslint-loader\\index.js??ref--13-0!F:\\work\\lhds\\src\\plugins\\socket.js","dependencies":[{"path":"F:\\work\\lhds\\src\\plugins\\socket.js","mtime":1573807218727},{"path":"F:\\work\\lhds\\node_modules\\cache-loader\\dist\\cjs.js","mtime":499162500000},{"path":"F:\\work\\lhds\\node_modules\\thread-loader\\dist\\cjs.js","mtime":499162500000},{"path":"F:\\work\\lhds\\node_modules\\babel-loader\\lib\\index.js","mtime":499162500000},{"path":"F:\\work\\lhds\\node_modules\\eslint-loader\\index.js","mtime":499162500000}],"contextDependencies":[],"result":["var websock = null;\nvar state = -1;\nimport vuex from \"../store/index\";\nexport default {\n initWebSocket: function initWebSocket() {\n var _this = this;\n\n //初始化weosocket\n var token = sessionStorage.token;\n\n if (token) {\n var ds = window.encodeURIComponent(token); // const wsuri = `ws://192.168.2.105:8080/EasRobotWS/ws?token=${ds}`;\n\n var wsuri = \"ws://ws.lhds.vip:8080/EasRobotWS/ws?token=\".concat(ds); // const wsuri = `ws://m252t77964.wicp.vip:20211/EasRobotWS/ws?token=${ds}`\n\n websock = new WebSocket(wsuri);\n state = websock.readyState;\n\n websock.onmessage = function (e) {\n _this.websocketonmessage(e);\n };\n\n websock.onopen = function () {\n _this.websocketonopen();\n };\n\n websock.onerror = function () {\n console.log('失败了');\n };\n\n websock.onclose = function (e) {\n _this.websocketclose(e);\n };\n }\n },\n websocketonmessage: function websocketonmessage(e) {\n var result = JSON.parse(e.data);\n console.log('收到数据 = >', result);\n state = websock.readyState;\n vuex.state.collocationList = result;\n },\n websocketonopen: function websocketonopen() {\n console.log('打开websocket'); //this.websocketsend();\n\n state = websock.readyState;\n },\n websocketclose: function websocketclose(e) {\n //关闭\n console.log('断开连接', e);\n vuex.state.collocationList = [];\n state = -1;\n },\n websocketsend: function websocketsend(Data) {\n //数据发送\n console.log('发送数据 = >', Data);\n websock.send(Data);\n },\n close: function close() {\n if (state >= 0) {\n websock.close();\n state = websock.readyState; // state = -1;\n // console.log(state)\n }\n },\n lookState: function lookState() {\n //查询sock当前状态\n return state;\n }\n};",null]}{ "name": "JGExtension", "version": "1.0.0", "summary": "Swift JGExtension 모음", "description": "Swift JGExtension 모음, @IBDesignable 사용한다.", "homepage": "https://github.com/junggate/JGExtension", "license": "MIT", "authors": { "JungGate": "" }, "platforms": { "ios": "11.0" }, "source": { "git": "https://github.com/junggate/JGExtension.git", "branch": "master" }, "source_files": "JGExtension/JGExtension/**/*" } { "debug": true, "log.level": 3, "log.path": "%base_path%/storage/logs/app-development.log", "cache.dir": "%base_path%/storage/cache-development" }dreamer/protondb-data [ { "appId": "34270", "title": "SEGA Mega Drive & Genesis Classics", "timestamp": 1535156721, "rating": "Silver", "notes": "Games played in \"Fullscreen\" Mode seem to black screen. Just back out so the game displays within the virtual CRT TV.", "os": "Fedora 28 (Kernel 4.17.12)", "gpuDriver": "Mesa 18.0.5", "specs": "i5-3570 / Radeon RX 560", "protonVersion": "Default" }, { "appId": "34270", "title": "SEGA Mega Drive & Genesis Classics", "timestamp": 1535389166, "rating": "Bronze", "notes": "Steam controllers don't work; full screen doesn't work; game seems to get random inputs not done by user. ", "os": "Pop!_OS 18.04", "gpuDriver": "3.1 Mesa 18.3.0-devel - padoka PPA", "specs": "AMD Athlon II X4 645/HD 6670", "protonVersion": "Default" }, { "appId": "34270", "title": "SEGA Mega Drive and Genesis Classics", "timestamp": 1536081637, "rating": "Platinum", "notes": "Works fine used USB xbone controller", "os": "Ubuntu 18.04.1 LTS", "gpuDriver": "NVIDIA 396.54", "specs": "i7 5930k / GeForce GTX 1060 6GB", "protonVersion": "Default" } ]{"vendor":"adoptopenjdk","filename":"OpenJDK8U-jdk_x64_linux_openj9_8u192b12_openj9-0.11.0.tar.gz","release_type":"ga","version":"8.0.192+12.openj9-0.11.0","java_version":"8u192-b12_openj9-0.11.0","jvm_impl":"openj9","os":"linux","architecture":"x86_64","file_type":"tar.gz","image_type":"jdk","features":[],"url":"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u192-b12_openj9-0.11.0/OpenJDK8U-jdk_x64_linux_openj9_8u192b12_openj9-0.11.0.tar.gz","md5":"57369ca42bd5c312f13889c5e8795df9","md5_file":"OpenJDK8U-jdk_x64_linux_openj9_8u192b12_openj9-0.11.0.tar.gz.md5","sha1":"e7def54266f02dd3d6ac8286c005f3f4a0ef4882","sha1_file":"OpenJDK8U-jdk_x64_linux_openj9_8u192b12_openj9-0.11.0.tar.gz.sha1","sha256":"fd7035c60ad3060fccf7ab67c15b722302a1863ebddd3141dc3dd98bd0b25a68","sha256_file":"OpenJDK8U-jdk_x64_linux_openj9_8u192b12_openj9-0.11.0.tar.gz.sha256","sha512":"fffcab491e5b33835371cd0e3ea5f0bb0843730bcaad83ac708c57e6b18c6517a79d45d75caf24ba0725e6269eae07fb663092db3d4743dba5ed53c3e3e74177","sha512_file":"OpenJDK8U-jdk_x64_linux_openj9_8u192b12_openj9-0.11.0.tar.gz.sha512","size":90243811} deploy-playbook/instance/input_CheckInstanceStateStopRule.json {"queueName":"InstanceStopQueue","sqsEndpoint":"https://sqs.ap-northeast-1.amazonaws.com","maxNumberOfMessages":10} sluyters/Gester0 {"name":"circle_right","subject":3,"date":"16122009-060106","paths":{"Pen":{"strokes":[{"x":157,"y":-288,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":136,"y":-321,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":113,"y":-335,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":81,"y":-347,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":33,"y":-358,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":-14,"y":-358,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":-76,"y":-358,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":-135,"y":-348,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":-206,"y":-334,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":-273,"y":-304,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":-345,"y":-270,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":-411,"y":-220,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":-477,"y":-169,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":-534,"y":-109,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":-587,"y":-45,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":-629,"y":22,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":-660,"y":89,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":-676,"y":156,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":-676,"y":218,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":-660,"y":276,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":-628,"y":326,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":-583,"y":373,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":-521,"y":406,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":-444,"y":428,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":-355,"y":437,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":-254,"y":430,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":-149,"y":410,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":-39,"y":374,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":67,"y":320,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":172,"y":250,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0},{"x":263,"y":169,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":30,"stroke_id":0},{"x":338,"y":80,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":31,"stroke_id":0},{"x":394,"y":-12,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":32,"stroke_id":0},{"x":427,"y":-101,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":33,"stroke_id":0},{"x":433,"y":-185,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":34,"stroke_id":0},{"x":417,"y":-259,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":35,"stroke_id":0},{"x":374,"y":-318,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":36,"stroke_id":0},{"x":300,"y":-371,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":37,"stroke_id":0},{"x":215,"y":-402,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":38,"stroke_id":0},{"x":112,"y":-425,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":39,"stroke_id":0},{"x":1,"y":-440,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":40,"stroke_id":0},{"x":1,"y":-440,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":41,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Stylistic ST5022 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}{ "parent": "withoutaxmas:block/present_1", "textures": { "side": "withoutaxmas:block/present_red", "top": "withoutaxmas:block/present_red_top" } }{ "id": 204337, "name": "Blotreterus", "description": "blokkolt trollokat etetőikkel együtt rejtő userstyle", "user": { "id": 1182506, "name": "ankhiron", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": null }, "updated": "2021-05-26T09:51:23.000Z", "weekly_install_count": 0, "total_install_count": 0, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/204337_additional_38106.png?r=1622022687", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": [ "https://userstyles.org/style_screenshots/204337_additional_38106.png?r=1622022687" ], "license": "publicdomain", "created": "2021-05-26T09:51:23.000Z", "category": "site", "raw_subcategory": "disqus", "subcategory": "disqus", "additional_info": null, "style_tags": [], "css": "@-moz-document domain(disqus.com) {\r\n\t.post.minimized {\r\n\t display:none;\r\n\t}\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/204337/blotreterus.user.js", "style_settings": [] }SegunWeb/Kingdebag { "title": "Over 80% of a residential building’s energy needs are from heating & cooling", "desc": "Energy efficient building envelopes are key to reducing energy consumption.", "bubles": [ { "perc": "5%", "text": "Cooking", "id": "b-one" }, { "perc": "13%", "text": "Water heating", "id": "b-two" }, { "perc": "2%", "text": "Lighting", "id": "b-three" }, { "perc": "12%", "text": "Electrical Appliances", "id": "b-four" }, { "perc": "68%", "text": "Electrical Appliances", "id": "b-five" } ], "buble_desc": [ {"desc": "Energy efficient building
envelopes are key to
reducing energy
consumption.", "id_desc": "d_one"}, {"desc": "Energy efficient building
envelopes are key to
reducing energy.", "id_desc": "d_two"}, {"desc": "Energy efficient building
envelopes are key to
consumption.", "id_desc": "d_three"}, {"desc": "Energy efficient building
reducing energy
consumption.", "id_desc": "d_four"}, {"desc": "Envelopes are key to
reducing energy
consumption.", "id_desc": "d_five"} ] }0 { "python.pythonPath": "/home/vinicius/anaconda3/envs/augmentation/bin/python", "[python]": { "editor.insertSpaces": true, "editor.tabSize": 4 } } [{"channel":"CNNInternationalEurope.us","display_name":"CNN International Europe","site":"chaines-tv.orange.fr","lang":"fr","url":"https://iptv-org.github.io/epg/guides/fr/chaines-tv.orange.fr.epg.xml"},{"channel":"CNNInternationalEurope.us","display_name":"CNN International Europe","site":"chaines-tv.orange.fr","lang":"fr","url":"https://iptv-org.github.io/epg/guides/bh/chaines-tv.orange.fr.epg.xml"},{"channel":"MNetMovies2.za","display_name":"M-Net Movies 2","site":"dstv.com","lang":"en","url":"https://iptv-org.github.io/epg/guides/zw/dstv.com.epg.xml"}]{"id":"0084","tableId":"0084","tableType":"User","name":"Performed by","chapters":["CH_06"],"entries":[]}dashboard/config/locales/bubble_choice_description.da-DK.json100-1000 { "da-DK": { "data": { "bubble_choice_description": { "courseD_bee_nestedLoops8_2021": "Indsaml al nektar fra alle blomster, og lav honning i alle bikuber. Brug indlejrede løkker. " } } } }{"source_units": [" At breakfast, my cat Susie sits on the deck of the pool, outside the glass doors to the kitchen.", "Bang, bang.", "She hits the door with her paw.", "This means \u201cRyan, feed me\u201d in Susie\u2019s special language.", "I get up from my chair and feed her.", "I know this, because I know everything about her, or at least I think I do.", "For the life of me, I do not know where Susie goes at noon.", "At eleven-thirty Saturday morning, I notice Susie is not at home.", "Where did she go?", "Then I spot her trotting down the street towards town.", "I quickly follow.", "Susie has turned the corner and is walking down the block towards the traffic light.", "She passes the corner store and walks behind the small strip mall, and I start thinking that I know where she is going.", "Mr. Johnston\u2019s Fresh Fish Market is in a small, white building at the back of the strip mall.", "I see that Susie has joined several of her cat colleagues.", "Mr. Johnston comes out of the back door, carrying several black trash bags.", "He piles them into the dumpster, and then pulls out a small, clear plastic bag full of fish heads.", "He scatters these on the ground and watches the cats pounce on them.", "He spies me lurking by the corner of the strip mall.", "\u201cHi Ryan,\u201d he calls in his thick Brooklyn accent.", "\u201cSo this is where Susie comes at noon,\u201d I say, walking towards him.", "Mr. Johnston laughs.", "\u201cYep.", "All the cats come to visit me at noon.", "They used to tear up my garbage, so now I just put the fish heads on the ground for them.", "Is that your cat?\u201d Susie ignores me.", "The fish head is more interesting at the moment.", "\u201cYes, that\u2019s my Susie,\u201d I say.", "\u201cShe\u2019s here every day,\u201d says Mr. Johnston before walking back inside the store.", "I wait until Susie finishes her fish head, and we walk home together."], "response_units": ["One morning my cat Susie was lounging on the pool cover next to the kitchen window .", "I could tell that she was waiting to be fed her breakfast.", "I know this because I know everything about Susie.", "Well, almost everything.", "For the life of me I cannot tell where she goes every afternoon.", "One Saturday morning I noticed Susie was missing.", "I looked for her all over the house when I looked at the front window and saw her walking down the street towards town.", "She was heading past the street light, past the pharmacy.", "I followed her and had a good idea of where she was going.", "She when around an alley next to Johnson's Fish Market.", "I watched as Mr. Johnson's opened the door and through out a bag of trash.", "Then he opened a clear bag of fish heads and started handing them out to the cats standing around him.", "Mr. Johnson noticed me watching him and asked \"Are any of these cats yours?\"", "I nodded and pointed out to Susie who was not remotely interested in me as she was busy eating her fish head.", "Mr Johnson said \"They come every afternoon, they use to tear up my trash until I started giving them the fish heads\".", "With that he turned around went back inside.", "I waited for Susie to finish her fish head and then we walked home together."], "correspondences": [0, 5, 5, 5, 6, 7, 9, 11, 12, 12, 15, 24, 5, 12, 24, 12, 29], "source_spans": [[0, 101], [101, 112], [112, 143], [143, 198], [198, 234], [234, 309], [309, 368], [368, 433], [433, 450], [450, 504], [504, 521], [521, 605], [605, 724], [724, 817], [817, 875], [875, 950], [950, 1048], [1048, 1116], [1116, 1168], [1168, 1217], [1217, 1284], [1284, 1304], [1304, 1309], [1309, 1347], [1347, 1436], [1436, 1472], [1472, 1520], [1520, 1550], [1550, 1629], [1629, 1698]], "response_spans": [[0, 84], [84, 142], [142, 192], [192, 216], [216, 280], [280, 329], [329, 448], [448, 505], [505, 563], [563, 618], [618, 692], [692, 794], [794, 870], [870, 979], [979, 1096], [1096, 1140], [1140, 1216]]}0 { "name": "", "number": "16020923", "is_illegal": false, "text": "If this card is Normal or Special Summoned: You can add 1 \"A.I.\" Spell/Trap from your Deck to your hand. You can target 1 \"@Ignister\" monster you control; its Level becomes 4 until the end of this turn. You can only use each effect of \"Pikari @Ignister\" once per turn.", "type": "Monster", "is_monster": true, "is_spell": false, "is_trap": false, "species": "Cyberse", "attack": "1200", "defense": "600", "attribute": "LIGHT", "is_pendulum": false, "is_xyz": false, "is_synchro": false, "is_fusion": false, "is_link": false, "is_extra_deck": false, "monster_types": [ "Effect" ], "stars": "4" }{ "name": "generator-angulardart", "version": "0.0.1", "description": "AngularDart skeleton generator", "files": [ "app", "router" ], "keywords": ["yeoman-generator"], "dependencies": { "yeoman-generator": "^0.24.1" } } src/config/types.json { "NonceId": "u64", "ReqId": "U256", "Amount": "U256", "Direction": "u8", "L1Account": "U256", "L1TxHash": "U256", "AccountIndex": "u32", "PoolIndex": "u32", "TokenIndex": "u32", "PublicKey": "(U256, U256)", "Signature": "(U256, U256, U256)", "Ops": { "_enum": { "SetKey": "(AccountIndex, PublicKey)", "Deposit": "(Signature, AccountIndex, TokenIndex, Amount)", "Withdraw": "(Signature, AccountIndex, TokenIndex, Amount, L1Account, NonceId)", "Swap": "(Signature, AccountIndex, PoolIndex, Amount, Direction, NonceId)", "PoolSupply": "(Signature, AccountIndex, PoolIndex, Amount, Amount, NonceId)", "PoolRetrieve": "(Signature, AccountIndex, PoolIndex, Amount, Amount, NonceId)", "AddPool": "(PoolIndex, TokenIndex, TokenIndex)" } } } 10-100 {"date":20201015,"state":"DC","positive":16166,"probableCases":null,"negative":null,"pending":null,"totalTestResultsSource":"totalTestEncountersViral","totalTestResults":450614,"hospitalizedCurrently":88,"hospitalizedCumulative":null,"inIcuCurrently":22,"inIcuCumulative":null,"onVentilatorCurrently":10,"onVentilatorCumulative":null,"recovered":12681,"lastUpdateEt":"10/14/2020 00:00","dateModified":"2020-10-14T00:00:00Z","checkTimeEt":"10/13 20:00","death":638,"hospitalized":null,"hospitalizedDischarged":null,"dateChecked":"2020-10-14T00:00:00Z","totalTestsViral":null,"positiveTestsViral":null,"negativeTestsViral":null,"positiveCasesViral":null,"deathConfirmed":null,"deathProbable":null,"totalTestEncountersViral":450614,"totalTestsPeopleViral":235875,"totalTestsAntibody":null,"positiveTestsAntibody":null,"negativeTestsAntibody":null,"totalTestsPeopleAntibody":null,"positiveTestsPeopleAntibody":null,"negativeTestsPeopleAntibody":null,"totalTestsPeopleAntigen":null,"positiveTestsPeopleAntigen":null,"totalTestsAntigen":null,"positiveTestsAntigen":null,"fips":"11","positiveIncrease":34,"negativeIncrease":0,"total":16166,"totalTestResultsIncrease":2352,"posNeg":16166,"dataQualityGrade":null,"deathIncrease":0,"hospitalizedIncrease":0,"hash":"63d22e37d526cde93a4da39e0ae1c13b1506b46c","commercialScore":0,"negativeRegularScore":0,"negativeScore":0,"positiveScore":0,"score":0,"grade":""} JamesSmartCell/platformio-libmirror { "authors": { "name": "", "url": "https://os.mbed.com/teams/Ben-Simon-Inez-IDD/" }, "description": "Adafruit driver converted to mbed.", "examples": [ "https://os.mbed.com/teams/Ben-Simon-Inez-IDD/code/BNO055_Adafruit/" ], "frameworks": "mbed", "keywords": "Adafruit_BNO055", "name": "Adafruit_BNO055", "platforms": "*", "repository": { "type": "hg", "url": "https://os.mbed.com/teams/Ben-Simon-Inez-IDD/code/Adafruit_BNO055/" } }{"StockNinjaWeaponry": ["Ninja", "HighlyVisibleNinja", "RuleOfPerception", "TruthInTelevision", "ArtisticLicenseHistory", "ShroudedInMyth", "ImplausibleFencingPowers", "ImprobableUseOfAWeapon", "WeaponOfChoice", "KatanasAreJustBetter", "ReverseGrip", "BFS", "FlechetteStorm", "KnifeOutline", "FuumaShuriken", "SinisterScythe", "EpicFlail", "ImprobableUseOfAWeapon", "FightingWithChucks", "InterchangeableAsianCultures", "RuleOfCool", "WolverineClaws", "BladeBelowTheShoulder", "RealLife", "Caltrops", "BlindedByTheLight", "SmokeOut", "ThrowDownTheBomblet", "GrenadeTag", "GrenadeSpam", "TakingYouWithMe", "SaiGuy", "BladeOnAStick", "SimpleStaff", "PoisonedWeapons", "ChainPain", "DualTonfas", "HiddenWeapons", "NothingUpMySleeve", "ArsenalAttire", "FuumaShuriken", "ItMakesSenseInContext", "BladeBelowTheShoulder", "EquippableAlly", "SecondHourSuperpower", "HighlyVisibleNinja", "KnifeNut", "CreepyCrossdresser", "Fanboy", "PlayingWithFire", "HeroesPreferSwords", "SaiGuy", "FightingWithChucks", "SimpleStaff", "WarriorMonk", "OldMaster", "DualWielding", "KatanasAreJustBetter", "SaiGuy", "BladeOnAStick", "WeaponOfChoice", "RazorFloss", "PoisonedWeapons", "OverlyLongName", "EverythingsBetterWithNinjas", "FuumaShuriken", "HumongousMecha", "Caltrops", "TransformationTrinket", "SwissArmyWeapon", "SwissArmyWeapon", "TransformationTrinket", "Caltrops", "KillItWithFire", "PlayerCharacter", "FuumaShuriken", "RazorWire", "HighlyVisibleNinja", "Caltrops", "SmokeOut", "KiManipulation", "TrickBomb", "SwordAndGun", "DualWielding", "BladeOnAStick", "TeleportSpam", "BladeBelowTheShoulder", "BarbarianHero", "NinjaPirateZombieRobot", "NamedWeapons", "BladeOnAStick"]}{"categories":["Operating Systems","Programming","Scripting","Web Development"],"desc":"\n","details":{"authors":"","format":"pdf","isbn-10":"1484207203","isbn-13":"978-1484207208","pages":"373 pages","publication date":"December 28, 2015","publisher":"Apress","size":"11.73Mb"},"img":"http://192.168.127.128/covers/34/3473b4d5bad0c7a2433af0395264843e.jpg","link":"https://rapidhosting.info/files/fy7","title":"Windows 10 Development Recipes: A Problem-Solution Approach in HTML and JavaScript"}1-10 {"results":[{"uri":"http://dbpedia.org/resource/OpenCorporates","label":"OpenCorporates","description":"OpenCorporates is a website which shares data on corporate entities as open data under the share-alike attribution Open Database Licence. It was created by and , under the auspices of their company, Chrinon Ltd, and launched on 20 December 2010. It has the aims of creating a URL with such data for every corporate entity in the world, importing government data relating to companies and matching it to specific companies.","refCount":6,"classes":[{"uri":"http://dbpedia.org/ontology/Website","label":"website"},{"uri":"http://schema.org/CreativeWork","label":"creative work"},{"uri":"http://schema.org/WebPage","label":"web page"},{"uri":"http://www.w3.org/2002/07/owl#Thing","label":"owl#Thing"},{"uri":"http://dbpedia.org/ontology/Work","label":"work"}],"categories":[{"uri":"http://dbpedia.org/resource/Category:Websites","label":"Websites"},{"uri":"http://dbpedia.org/resource/Category:Internet_properties_established_in_2011","label":"Internet properties established in 2011"},{"uri":"http://dbpedia.org/resource/Category:Open_data","label":"Open data"}],"templates":[],"redirects":[]}]}InvalidZoneFileTests/FalseCond_4/PreprocessorOutputs/4.json { "Bind": { "Code": 1, "Output": [ "4.txt:2: using RFC1035 TTL semantics", "dns_master_load: 4.txt:3: uni.uni.com: CNAME and other data", "zone com/IN: loading from master file 4.txt failed: CNAME and other data", "zone com/IN: not loaded due to errors." ] }, "Nsd": { "Code": 1, "Output": [ "zone com. file 4.txt has 1 errors" ] }, "Knot": { "Code": 1, "Output": [ "[uni.uni.com.] more records exist at CNAME", "", "Error summary:", " 1\tmore records exist at CNAME", "Serious semantic error detected" ] }, "Powerdns": { "Code": 1, "Output": [ "[Error] CNAME uni.uni.com found, but other records with same label exist.", "Checked 5 records of 'com', 1 errors, 0 warnings." ] } }jokk-itu/laughing-umbrella { "AzureAd": { "Instance": "https://login.microsoftonline.com", "Domain": "joachimkelsen.onmicrosoft.com", "TenantId": "3ea8a579-a1b4-4af9-b63e-7fdc82963153", "ClientId": "895fe467-4fe0-4f4d-bd8e-ec38e486c5b0", "Audience": "api://895fe467-4fe0-4f4d-bd8e-ec38e486c5b0", "CallbackPath": "/signin-oidc", "Scopes": "Sports" }, "Logging": { "Overrides": { "Microsoft.Hosting.Lifetime": "Warning", "Serilog": "Information" }, "LogToConsole": true, "LogToUdp": true, "LogToSeq": true, "UdpPort": 2002, "UdpHost": "localhost" }, "AllowedHosts": "*" } [ { "Id": "134198", "ThreadId": "40454", "Html": "Hi,
\r\n
\r\nI needed to know whether a cell or row was valid before allowing the user to save the work. I couldn't find any property on the DataGrid which told me this but I did find two private methods called HasRowValidationError  / HasCellValidationError  so I changed these to be protected so I could access them from my inherited control.
\r\n
\r\nI realize this will have upgrade issues any advice on alternatives?
\r\n
\r\nThanks \r\n", "PostedDate": "2008-11-21T03:55:04.89-08:00", "UserRole": null, "MarkedAsAnswerDate": null }, { "Id": "137987", "ThreadId": "40454", "Html": "For cell validation you can set the ValidationStep to RawProposedValue or ConvertedValue which will allow you to validate before data is fully committed.  For row validation, unfortunately it will have to save to the data source for validation to take place.  \r\n", "PostedDate": "2008-12-05T05:57:06.9-08:00", "UserRole": null, "MarkedAsAnswerDate": null } ]{"UnbreakableWeapons": ["BreakableWeapons", "GameplayAndStorySegregation", "AcceptableBreaksFromReality", "NobodyPoops", "FirstPersonShooter", "FacklerScaleOfFPSRealism", "ComicBooks", "TabletopGames", "WesternAnimation", "RealLife", "BreakableWeapons", "MightyGlacier", "BreakableWeapons", "BreakableWeapons", "KineticWeaponsAreJustBetter", "IdleAnimation", "BFG", "GatlingGood", "MoreDakka", "BFS", "PVPBalanced", "RareRandomDrop", "BreakableWeapons", "PVPBalanced", "RareRandomDrop", "LeaningOnTheFourthWall", "Mooks", "Unobtainium", "GameBreaker", "GaidenGame", "InfinityPlusOneSword", "WeaponOfChoice", "TheChosenMany", "BreakableWeapons", "BreakableWeapons", "SkillScoresAndPerks", "SystemShock2", "LeaningOnTheFourthWall", "LeaningOnTheFourthWall", "ImprovisedWeapon", "ItemCrafting", "RagnarokProofing", "FridgeBrilliance", "ItemCrafting", "RagnarokProofing", "FridgeBrilliance", "BreakableWeapons", "ClimaxBoss", "FinalBoss", "InfinityPlusOneSword", "JackOfAllStats", "EliteTweak", "GameBreaker", "BigGood", "InfinityPlusOneSword", "JackOfAllStats", "EliteTweak", "GameBreaker", "BigGood", "TimeSkip", "KiManipulation", "ThunderboltIron", "AbsurdlySharpBlade", "RuleOfFun", "LoadsAndLoadsOfRules", "HotBlade", "WordOfGod", "MemeticMutation"]}profiles/parhomenko_irina_oleksandrivna.json {"2010":"","2016":"","AdditionalNote":"","Department":"Нововодолазький районний суд Харківської області","Link":"http://court.gov.ua/lustration/c698cb14fb89c34d796ac460b77a5d98.pdf","Link 2015":"","Name":"","Note":"","Position":"Суддя Нововодолазького районного суду Харківської області","Region":"Харківська область","Youtube":"","analytics":[{"b":30517,"i":209802,"y":2014},{"ff":55.6,"ffa":1,"i":170527,"y":2015},{"b":10000,"ff":55.6,"ffa":1,"fi":4228,"i":290461,"y":2016},{"ff":72.7,"ffa":2,"i":281152,"y":2017},{"b":24672,"fi":467459,"k":27.8,"ka":2,"y":2019},{"y":2020}],"declarationsLinks":[{"id":"vulyk_37_78","provider":"declarations.com.ua.opendata","url":"http://static.declarations.com.ua/declarations/chosen_ones/mega_batch/parkhomenko_iryna_oleksandrivna.pdf","year":2014},{"id":"nacp_b0423700-672f-476f-9ab4-65112074b352","provider":"declarations.com.ua.opendata","year":2015},{"id":"nacp_6595e93a-6e2c-4227-a1ee-ec5a48b76131","provider":"declarations.com.ua.opendata","year":2016},{"id":"nacp_4d59ead8-856e-489d-ba47-6fa28c813a90","provider":"declarations.com.ua.opendata","year":2017},{"id":"nacp_ee7457f3-78c5-40af-a59a-c614a3623197","provider":"declarations.com.ua.opendata","year":2019},{"id":"nacp_980ee9f8-2684-4048-b0f5-59a18e61aa3f","provider":"declarations.com.ua.opendata","year":2020}],"field8":"","field9":"","key":"parhomenko_irina_oleksandrivna","type":"judge","Декларація доброчесності судді подано у 2016 році (вперше)":"","Декларація родинних зв’язків судді подано у 2016 році":"","Декларації 2013":"","Декларації 2014":"","Декларації 2015":"","Декларації 2016":"","Клейма":"","Кількість дисциплінарних стягнень":"","Кількість скарг":"0","Кількість справ":"","Оскаржені":"","ПІБ2":"","Фото":"","Як живе":"","декларації 2015":"","судові рішення по справах Майдану":""}{"description": "Reflect Emmet HTML expansion in Sass/LESS", "version": "1.0.2", "sublime_text": ">=3000", "dependencies": [], "url": "https://github.com/eecolella/emmet-style-reflector", "platforms": ["*"]}dgapitts/pev20 { "name": "pev2", "version": "0.1.20", "description": "A VueJS component to show a graphical vizualization of a PostgreSQL execution plan.", "main": "index.html", "repository": ":dgapitts/pev2.git", "author": " <>", "license": "MIT", "private": true, "scripts": { "deploy": "aws s3 sync ./public s3://pev2.ebabel.eu --acl public-read --exclude '*.DS_Store*'", "invalidate": "aws cloudfront create-invalidation --distribution-id E3EGZABZ7I8T0L --paths \"/*\"" } } { "user": "jayazhang", "repos": 1, "login": "jayazhang", "id": 10206233, "avatar_url": "https://avatars1.githubusercontent.com/u/10206233?v=3", "url": "https://api.github.com/users/jayazhang", "html_url": "https://github.com/jayazhang", "followers_url": "https://api.github.com/users/jayazhang/followers", "following_url": "https://api.github.com/users/jayazhang/following{/other_user}", "gists_url": "https://api.github.com/users/jayazhang/gists{/gist_id}", "starred_url": "https://api.github.com/users/jayazhang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jayazhang/subscriptions", "organizations_url": "https://api.github.com/users/jayazhang/orgs", "repos_url": "https://api.github.com/users/jayazhang/repos", "events_url": "https://api.github.com/users/jayazhang/events{/privacy}", "received_events_url": "https://api.github.com/users/jayazhang/received_events", "type": "User", "site_admin": false, "name": "张家裕", "company": null, "blog": null, "location": null, "email": "", "hireable": null, "bio": "前端工程师", "public_repos": 13, "public_gists": 0, "followers": 0, "following": 0, "created_at": "2014-12-16T09:15:25Z", "updated_at": "2017-01-19T03:34:44Z" }{"geometry": {"type": "Point", "coordinates": [-79.64, 39.92]}, "type": "Feature", "id": "15456", "properties": {"other_cities": "Lemont Frnc, Lemont Frnce", "city": "Lemont Furnace", "state": "PA", "county": "Fayette County"}}1-10 { "parent": "onlysilver:block/ore", "textures": { "base": "blocks/stone", "overlay": "onlysilver:blocks/silver_ore" } } resources/tr/sifa-university.json {"name":"","alt_name":"iversitesi","country":"Turkey","state":null,"address":{"street":"Gazi Bulvari N° 72, Eta Iş Hani Kat: 1 D","city":"Izmir","province":null,"postal_code":"35210"},"contact":{"telephone":"+90(232) 441-41-72","website":"http:\/\/www.sifa.edu.tr\/","email":"","fax":"+90(232) 441-00-98"},"funding":"Private","languages":null,"academic_year":null,"accrediting_agency":"Council of Higher Education (YÖK)"} laokingshineUAV/VoTT1-10 {"ast":null,"code":"var $export = require('./_export');\n\nvar $task = require('./_task');\n\n$export($export.G + $export.B, {\n setImmediate: $task.set,\n clearImmediate: $task.clear\n});","map":null,"metadata":{},"sourceType":"script"}1000+ { "name": "ASecurity", "version": "1.0.0", "summary": "ASecurity is an advanced extension for Security framework.", "description": "ASecurity is an advanced extension for Security framework. Description.", "homepage": "https://github.com/ihormyroniuk/ASecurity", "license": "MIT", "authors": { "": "" }, "platforms": { "ios": "10.0" }, "source": { "git": "https://github.com/ihormyroniuk/ASecurity.git", "tag": "1.0.0" }, "source_files": "ASecurity/**/*.{swift}", "swift_versions": "4.2", "swift_version": "4.2" } 0 {"name":"chevron_right","subject":1008,"date":"10122009-052427","paths":{"Pen":{"strokes":[{"x":379,"y":-710,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":405,"y":-714,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":424,"y":-720,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":424,"y":-720,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":434,"y":-703,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":418,"y":-685,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":392,"y":-650,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":350,"y":-615,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":301,"y":-561,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":229,"y":-511,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":151,"y":-451,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":58,"y":-394,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":-42,"y":-334,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":-148,"y":-281,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":-245,"y":-226,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":-343,"y":-179,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":-427,"y":-135,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":-500,"y":-98,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":-554,"y":-61,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":-590,"y":-31,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":-604,"y":1,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":-594,"y":32,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":-561,"y":63,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":-508,"y":94,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":-431,"y":127,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":-336,"y":162,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":-221,"y":202,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":-95,"y":243,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":37,"y":286,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":170,"y":333,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0},{"x":293,"y":378,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":30,"stroke_id":0},{"x":418,"y":440,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":31,"stroke_id":0},{"x":522,"y":499,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":32,"stroke_id":0},{"x":612,"y":554,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":33,"stroke_id":0},{"x":684,"y":609,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":34,"stroke_id":0},{"x":746,"y":661,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":35,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Lenovo X61 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}{ "id": 43, "logo_path": "/yB45Ha9e2nUP4ydYCNIOdl54xp5.png", "description": null, "name": "", "parent_company": null, "homepage": "http://www.foxsearchlight.com/", "headquarters": "Los Angeles, California, USA" }{ "recommendations": [ "streetsidesoftware.code-spell-checker", "editorconfig.editorconfig", "bierner.emojisense", "donjayamanne.githistory", "eamodio.gitlens", "xabikos.javascriptsnippets", "shd101wyy.markdown-preview-enhanced", "esbenp.prettier-vscode", "jasonnutter.search-node-modules", "dbaeumer.vscode-eslint", "vscode-icons-team.vscode-icons", "pflannery.vscode-versionlens", "visualstudioexptteam.vscodeintellicode", "jounqin.vscode-mdx" ] } { "name": "dummy-test", "version": "1.0.0", "description": "", "main": "destroy.js", "scripts": { "test": "mocha" }, "repository": { "type": "git", "url": "git+https://github.com/FenrirUnbound/dummy.git" }, "author": "", "license": "MIT", "bugs": { "url": "https://github.com/FenrirUnbound/dummy/issues" }, "homepage": "https://github.com/FenrirUnbound/dummy#readme", "private": true, "dependencies": { "async": "^2.0.0-rc.6" } } { "name": "snapcam", "description": "send webcam snaps to snapchatters", "main": "snapchat.js", "repository": { "type": "git", "url": "https://github.com/rosedigital/snapcam" }, "bugs": { "url": "https://github.com/rosedigital/snapcam/issues" }, "homepage": "https://github.com/rosedigital/snapcam", "dependencies": { "gm": "^1.16.0", "moment": "^2.8.3", "q": "^1.0.1", "snapchat": "^1.0.6" } } fofr/manage-courses-prototype {"id":"9e95a47b-86be-4df9-b9e3-1b58dd5709bf","name":"GLF Schools' Teacher Training (Southern)","code":"B","address":{"addressLine1":"c/o Merstham Park School","addressLine2":"Taynton Drive","town":"Merstham, REDHILL","county":"Surrey","postcode":"RH1 3PU"},"organisation":{"id":"21b5c491-0ed0-40bb-9374-c2af4b531ef8","code":"1HV","name":"GLF Schools’ Teacher Training – School Direct"}}{ "fbPageAccessToken": "", "fbAppId": "486336518218607", "fbPageId": "benjamin", "djangoToken": "Token ", "product_analytics_index": "staging_bot_product_analytics", "eng_analytics_index": "staging_bot_eng_analytics", "bot_host_name": "https://host.com", "stripeKey": "sk_test", "isActiveMode": true } Fulwin/atop-backend { "trigger": { "title": "Browse ..." }, "manager": { "title": "Media Manager", "newFolderBtnTitle": "New Folder", "uploadBtnTitle": "Upload", "backToParentBtnTitle": "Parent", "deleteBtnTitle": "Delete", "copyPathBtnTitle": "Copy Path", "filePreview": "File Preview", "rootFolderName": "Root", "rootFolderPrefix": "Upload" } } package.json { "name": "gulp-tasks-nimedev", "version": "1.4.3", "description": "common gulp tasks", "scripts": { "update": "rm -rf node_modules && npm install && npm outdated" }, "repository": { "type": "git", "url": "https://github.com/nimedev/gulp-tasks-nimedev" }, "keywords": [ "gulp", "gulp tasks" ], "author": { "name": "", "email": "" }, "license": "MIT", "bugs": { "url": "https://github.com/nimedev/gulp-tasks-nimedev/issues" }, "homepage": "https://github.com/nimedev/gulp-tasks-nimedev", "engines": { "node": ">=6.0.0 <7.0.0" }, "devDependencies": { "eslint": "3.2.2", "eslint-config-nimedev": "3.0.0" } }{ "id": 42220, "info": { "name": "Quora — Minimal and netbook-friendly", "description": "Slightly fixed version of the style 41612 with narrow viewport optimised for netbook screens (up to 1024px). Logo removed, max-width is 920px;", "additionalInfo": null, "format": "uso", "category": "quora", "createdAt": "2011-01-03T10:26:14.000Z", "updatedAt": "2011-01-03T10:35:25.000Z", "license": "NO-REDISTRIBUTION", "author": { "id": 4163, "name": "sheep" } }, "stats": { "installs": { "total": 291, "weekly": 0 } }, "screenshots": { "main": { "name": "42220-after.png", "archived": false } }, "discussions": { "stats": { "discussionsCount": 0, "commentsCount": 0 }, "data": [] }, "style": { "css": "@namespace url(http://www.w3.org/1999/xhtml);\r\n\r\n/* Built upon http://userstyles.org/styles/41612 */\r\n\r\n@-moz-document domain(\"quora.com\")\r\n{\r\n /* Find your friends on Quora */\r\n div.section.import_contacts_section,\r\n /* Trending topics and people */\r\n div.section.simple_more_button.people_suggestions\r\n {\r\n display: none;\r\n }\r\n\r\nbody, .header, .wrapper { max-width:920px !important; }\r\n\r\n.contents, .w8 { width: 850px !important; }\r\n.w6_5 { width: 650px !important;}\r\n\r\n#layout_header .contents { padding:0 10px 0 10px !important; width:95% !important;}\r\n.topic_page .main_col, .home_page .main_col { margin-left: 0px !important; }\r\n.question_page .main_col { margin-left: -110px !important; }\r\n\r\n.logo {display:none !important;}\r\n\r\n}" } }1-10 version https://git-lfs.github.com/spec/v1 oid sha256:22ea80d86bcfb0c77faacd443b97a5d85b7932b4a45084c77a839d77a7386ca2 size 28350 tsirolnik/AffiliTest-JSpackage.json { "name": "affilitest", "version": "1.0.0", "description": "AffiliTest's API implemented in JS for Node", "main": "lib/index.js", "repository": "https://github.com/tsirolnik/AffiliTest-JS.git", "author": "", "license": "MIT", "dependencies": { "request": "^2.87.0" } } { "name": "MKSensor", "version": "0.1.0", "summary": "A short description of MKSensor.", "description": "TODO: Add long description of the pod here.", "homepage": "https://github.com/moko-sensor/-iOS", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "Chengang": "" }, "source": { "git": "https://github.com/moko-sensor/-iOS.git", "tag": "0.1.0" }, "platforms": { "ios": "8.0" }, "subspecs": [ { "name": "MKSDKForDevice", "source_files": "MKSensor/Classes/MKSDKForDevice/**", "dependencies": { "CocoaAsyncSocket": [ ] } }, { "name": "MKSDKForMqttServer", "source_files": "MKSensor/Classes/MKSDKForMqttServer/**", "dependencies": { "MQTTClient": [ "0.14.0" ] } } ] } contratos/1.AD.EDCA_JSON_20181112_135858/1481480.json {"uri":"http://172.29.100.36:8080/OCDSWS/rest/ocid/ocds-07smqs-1481480","version":"1.1","extensions":["https://raw.githubusercontent.com/CompraNet/ocds_releasePublisher_extension/master/extension.json","https://raw.githubusercontent.com/CompraNet/ocds_schemeUrl_extension/master/extension.json","https://raw.githubusercontent.com/CompraNet/ocds_cycle_extension/master/extension.json"],"publishedDate":"2018-11-12T14:28:44Z","publicationPolicy":"https://compranetinfo.funcionpublica.gob.mx/descargas/politica-publicacion-EDCA-MX.pdf","releases":[{"publisher":{"uid":"27511","name":"SECRETARÍA DE LA FUNCIÓN PÚBLICA","uri":"http://www.gob.mx/sfp"},"cycle":2017,"ocid":"ocds-07smqs-1481480","id":"SFP-1481480-2018-11-12","date":"2017-09-11T07:12:24Z","tag":["tender","award"],"initiationType":"tender","parties":[{"name":"Servicio de Administración Tributaria","id":"SAT-284","roles":["buyer"]},{"name":"SAT-Administración de Operación de Recursos y Servicios 6 #006E00002","id":"SAT970701NN3-006E00002","identifier":{"id":"SAT970701NN3-006E00002","legalName":"SAT-Administración de Operación de Recursos y Servicios 6 #006E00002","scheme":"MX-RFC","uri":"https://portalsat.plataforma.sat.gob.mx/ConsultaRFC"},"address":{"streetAddress":"Avenida Hidalgo No. 77, Colonia Guerrero","locality":"Cuauhtémoc","region":"Ciudad de México","postalCode":"06300","countryName":"MX"},"contactPoint":{"name":"","email":"","telephone":"5802-0318"},"roles":["procuringEntity"]},{"name":"MARIA TERESA DE JESUS CHAVEZ INIESTRA","id":"4B27EFC18DE81DA444184656E61E2468","identifier":{"id":"4B27EFC18DE81DA444184656E61E2468","legalName":"MARIA TERESA DE JESUS CHAVEZ INIESTRA","scheme":"MX-RFC","uri":"https://portalsat.plataforma.sat.gob.mx/ConsultaRFC"},"address":{"streetAddress":"Luis de la Rosa 22 Presidentes Ejidales","locality":"Coyoacán","region":"MX-CMX","postalCode":"04470","countryName":"MÉXICO"},"contactPoint":{"name":" de ","email":"","telephone":"55-56565233"},"roles":["tenderer","supplier"]}],"buyer":{"name":"Servicio de Administración Tributaria","id":"SAT-284"},"tender":{"id":"1481480","title":"SERVICIO DE TRADUCCIÓN DE DOCUMENTOS DEL IDIOMA INGLÉS AL ESPAÑOL.","description":"SERVICIO DE TRADUCCIÓN DE DOCUMENTOS DEL IDIOMA INGLÉS AL ESPAÑOL, RELATIVO AL JUICIO DE NULIDAD 10582/14-17-13-6.","status":"complete","procuringEntity":{"name":"SAT-Administración de Operación de Recursos y Servicios 6 #006E00002","id":"SAT970701NN3-006E00002"},"items":[],"value":{"amount":0.0},"procurementMethod":"direct","procurementMethodRationale":"Art. 42 párrafo primero","submissionMethod":["inPerson"],"tenderPeriod":{"startDate":"2017-09-11T07:12:24Z"},"enquiryPeriod":{"startDate":"2017-09-11T07:12:24Z"},"hasEnquiries":false,"awardPeriod":{"endDate":"2017-08-10T00:00:00Z"}},"language":"es","awards":[{"id":"1567433","title":"SERVICIO DE TRADUCCIÓN DE DOCUMENTOS DEL IDIOMA INGLÉS AL ESPAÑOL, RELATIVO AL JUICIO DE NULIDAD 10582/14-17-13-6.","description":"SERVICIO DE TRADUCCIÓN DE DOCUMENTOS DEL IDIOMA INGLÉS AL ESPAÑOL, RELATIVO AL JUICIO DE NULIDAD 10582/14-17-13-6.","status":"active","value":{"amount":29750.0,"currency":"MXN"},"suppliers":[{"name":"","id":"4B27EFC18DE81DA444184656E61E2468"}],"items":[{"id":"4933775","description":"Servicio de traducción de documentos del idioma inglés al español, relativo al juicio de nulidad 10582/14-17-13-6.","classification":{"scheme":"CUCOP: Clasificador Único de las Contrataciones Públicas","id":"33601001","description":"Servicios relacionados con traducciones","uri":"https://compranetinfo.funcionpublica.gob.mx/descargas/CUCOP.xlsx"},"quantity":1,"unit":{"name":"Servicio","value":{"amount":29750.0,"currency":"MXN"}}}],"contractPeriod":{"startDate":"2017-08-17T12:00:00Z","endDate":"2017-08-24T11:59:00Z"}}],"contracts":[{"id":1567433,"awardID":"1567433","title":"SERVICIO DE TRADUCCIÓN DE DOCUMENTOS DEL IDIOMA INGLÉS AL ESPAÑOL, RELATIVO AL JUICIO DE NULIDAD 10582/14-17-13-6.","description":"SERVICIO DE TRADUCCIÓN DE DOCUMENTOS DEL IDIOMA INGLÉS AL ESPAÑOL, RELATIVO AL JUICIO DE NULIDAD 10582/14-17-13-6.","status":"terminated","period":{"startDate":"2017-08-17T12:00:00Z","endDate":"2017-08-24T11:59:00Z"},"value":{"amount":29750.0,"currency":"MXN"},"items":[{"id":"4933775","description":"Servicio de traducción de documentos del idioma inglés al español, relativo al juicio de nulidad 10582/14-17-13-6.","classification":{"id":"33601001","description":"Servicios relacionados con traducciones"},"quantity":1,"unit":{"name":"Servicio","value":{"amount":29750.0,"currency":"MXN"}}}],"dateSigned":"2017-08-16T00:00:00Z"}]}],"publisher":{"uid":"27511","name":"SECRETARÍA DE LA FUNCIÓN PÚBLICA","uri":"http://www.gob.mx/sfp"}}{ "title": "A Year on the Sun", "credit": "NASA, Solar Dynamics Observatory", "explanation": "Our solar system's miasma of incandescent plasma, the Sun may look a little scary here. The picture is a composite of 25 images recorded in extreme ultraviolet light by the orbiting Solar Dynamics Observatory between April 16, 2012 and April 15, 2013. The particular wavelength of light, 171 angstroms, shows emission from highly ionized iron atoms in the solar corona at a characteristic temperatures of about 600,000 kelvins (about 1 million degrees F). Girdling both sides of the equator during approach to maximum in the 11-year solar cycle, the solar active regions are laced with bright loops and arcs along magnetic field lines. Of course, a more familiar visible light view would show the bright active regions as groups of dark sunspots. Three years of Solar Dynamics Observatory images are compressed into this short video.", "date": "2013-04-26", "hdurl": "https://apod.nasa.gov/apod/image/1304/743348main_SDOTimelapse_Sun_2k.jpg", "service_version": "v1", "media_type": "image", "url": "https://apod.nasa.gov/apod/image/1304/743348main_SDOTimelapse_Sun_900.jpg" }10-100 { "directions": [ "Cook bacon in a 10- to 12-inch heavy skillet over moderate heat, stirring occasionally, until lightly browned but not crisp, about 4 minutes.", "Add cauliflower and cook, stirring occasionally, until lightly browned, about 5 minutes.", "Add bell pepper, cream, raisins, capers, garlic, and bay leaf and cook over moderately low heat, covered, stirring occasionally, until cauliflower is tender, about 12 minutes. Add lemon juice and salt and pepper to taste. Discard bay leaf." ], "ingredients": [ "3 oz bacon (3 or 4 slices), cut crosswise into 1/4-inch strips", "2 lb cauliflower, cored and cut into 2- to 2 1/2-inch-wide florets", "1 red bell pepper, seeded and cut into 1/2-inch pieces", "3/4 cup heavy cream", "1/2 cup golden raisins", "1 tablespoon drained bottled capers", "1 teaspoon finely chopped garlic", "1 Turkish or 1/2 California bay leaf", "1 tablespoon fresh lemon juice" ], "language": "en-US", "source": "www.epicurious.com", "tags": [ "Fruit", "Pepper", "Side", "Quick & Easy", "Raisin", "Bacon", "Cauliflower", "Fall", "Capers", "Gourmet", "Wheat/Gluten-Free", "Peanut Free", "Tree Nut Free", "Soy Free", "No Sugar Added" ], "title": "Cauliflower with Bacon, Capers, Peppers, and Raisins", "url": "http://www.epicurious.com/recipes/food/views/cauliflower-with-bacon-capers-peppers-and-raisins-230476" } v1/states/nd/20200625.json {"date":20200625,"state":"ND","positive":3388,"probableCases":0,"negative":96051,"pending":null,"totalTestResultsSource":"totalTestEncountersViral","totalTestResults":161946,"hospitalizedCurrently":25,"hospitalizedCumulative":222,"inIcuCurrently":null,"inIcuCumulative":null,"onVentilatorCurrently":null,"onVentilatorCumulative":null,"recovered":3064,"lastUpdateEt":"6/25/2020 00:00","dateModified":"2020-06-25T00:00:00Z","checkTimeEt":"06/24 20:00","death":87,"hospitalized":222,"hospitalizedDischarged":null,"dateChecked":"2020-06-25T00:00:00Z","totalTestsViral":165399,"positiveTestsViral":null,"negativeTestsViral":null,"positiveCasesViral":3388,"deathConfirmed":null,"deathProbable":null,"totalTestEncountersViral":161946,"totalTestsPeopleViral":97103,"totalTestsAntibody":4677,"positiveTestsAntibody":147,"negativeTestsAntibody":4530,"totalTestsPeopleAntibody":null,"positiveTestsPeopleAntibody":null,"negativeTestsPeopleAntibody":null,"totalTestsPeopleAntigen":null,"positiveTestsPeopleAntigen":null,"totalTestsAntigen":null,"positiveTestsAntigen":null,"fips":"38","positiveIncrease":32,"negativeIncrease":724,"total":99439,"totalTestResultsIncrease":2894,"posNeg":99439,"dataQualityGrade":null,"deathIncrease":1,"hospitalizedIncrease":3,"hash":"c1f4943cad2dbcd6fab1068d39c7f8faf98b63b4","commercialScore":0,"negativeRegularScore":0,"negativeScore":0,"positiveScore":0,"score":0,"grade":""} { "schema_version": "1.2.0", "id": "GHSA-55j6-m45q-vmr2", "modified": "2022-05-02T03:21:32Z", "published": "2022-05-02T03:21:32Z", "aliases": [ "CVE-2009-1092" ], "details": "Use-after-free vulnerability in the LIVEAUDIO.LiveAudioCtrl.1 ActiveX control in LIVEAU~1.OCX 7.0 for GeoVision DVR systems allows remote attackers to execute arbitrary code by calling the GetAudioPlayingTime method with certain arguments.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2009-1092" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/49238" }, { "type": "WEB", "url": "https://www.exploit-db.com/exploits/8206" }, { "type": "WEB", "url": "http://retrogod.altervista.org/9sg_geovision_liveaudio_freedmem.html" }, { "type": "WEB", "url": "http://www.securityfocus.com/archive/1/501773/100/0/threaded" }, { "type": "WEB", "url": "http://www.securityfocus.com/bid/34115" } ], "database_specific": { "cwe_ids": [ ], "severity": "HIGH", "github_reviewed": false } }{ "devDependencies": { "javascript-typescript-langserver": "^2.11.3", "sparql-language-server": "^4.0.0", "sql-language-server": "^0.12.0", "stardog-graphql-language-server": "^2.0.0", "turtle-language-server": "^3.0.0", "vscode-json-languageserver-bin": "^1.0.1", "yaml-language-server": "^0.15.0" } } elanthia-online/cartograph { "id": 13897, "title": [ "[Hearthstone, Pub]" ], "description": [ "Quieter than its neighbor to the west, the pub is filled with comfortable oak tables and chairs. Muted moonlight from a large leaded glass window on the north wall highlights the gleam of polished oak flooring. Deep green, plastered walls provide a backdrop for several murals depicting life in a country village. A large fireplace on the western wall promises warmth on the chilliest of winter nights.", "Quieter than its neighbor to the west, the pub is filled with comfortable oak tables and chairs. Muted light from a large leaded glass window on the north wall highlights the gleam of polished oak flooring. Deep green, plastered walls provide a backdrop for several murals depicting life in a country village. A large fireplace on the western wall promises warmth on the chilliest of winter nights." ], "paths": [ "Obvious exits: west, out" ], "location": "Wehnimer's Landing", "wayto": { "14684": "west", "14688": "go door", "13500": "out" }, "timeto": { "14684": 0.2, "14688": 0.2, "13500": 0.2 }, "image": "wl-Hearthstone_Manor-1439662135.png", "image_coords": [ 424, 477, 491, 521 ] }node_modules/.cache/babel-loader/03d3cf9b3e1a04f439ad4d15b6378f85.json {"ast":null,"code":"var _interopRequireDefault = require(\"@babel/runtime/helpers/interopRequireDefault\");\n\nvar _interopRequireWildcard = require(\"@babel/runtime/helpers/interopRequireWildcard\");\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.default = void 0;\n\nvar _extends2 = _interopRequireDefault(require(\"@babel/runtime/helpers/extends\"));\n\nvar _classCallCheck2 = _interopRequireDefault(require(\"@babel/runtime/helpers/classCallCheck\"));\n\nvar _createClass2 = _interopRequireDefault(require(\"@babel/runtime/helpers/createClass\"));\n\nvar _possibleConstructorReturn2 = _interopRequireDefault(require(\"@babel/runtime/helpers/possibleConstructorReturn\"));\n\nvar _getPrototypeOf2 = _interopRequireDefault(require(\"@babel/runtime/helpers/getPrototypeOf\"));\n\nvar _inherits2 = _interopRequireDefault(require(\"@babel/runtime/helpers/inherits\"));\n\nvar _defineProperty2 = _interopRequireDefault(require(\"@babel/runtime/helpers/defineProperty\"));\n\nvar React = _interopRequireWildcard(require(\"react\"));\n\nvar _reactNative = require(\"react-native\");\n\nvar _jsxFileName = \"/Users/trensik/dev/react-native-paper/src/components/Dialog/DialogScrollArea.tsx\";\n\nvar DialogScrollArea = function (_React$Component) {\n (0, _inherits2.default)(DialogScrollArea, _React$Component);\n\n function DialogScrollArea() {\n (0, _classCallCheck2.default)(this, DialogScrollArea);\n return (0, _possibleConstructorReturn2.default)(this, (0, _getPrototypeOf2.default)(DialogScrollArea).apply(this, arguments));\n }\n\n (0, _createClass2.default)(DialogScrollArea, [{\n key: \"render\",\n value: function render() {\n return React.createElement(_reactNative.View, (0, _extends2.default)({}, this.props, {\n style: [styles.container, this.props.style],\n __source: {\n fileName: _jsxFileName,\n lineNumber: 52\n }\n }), this.props.children);\n }\n }]);\n return DialogScrollArea;\n}(React.Component);\n\n(0, _defineProperty2.default)(DialogScrollArea, \"displayName\", 'Dialog.ScrollArea');\n\nvar styles = _reactNative.StyleSheet.create({\n container: {\n borderColor: 'rgba(0, 0, 0, .12)',\n borderTopWidth: _reactNative.StyleSheet.hairlineWidth,\n borderBottomWidth: _reactNative.StyleSheet.hairlineWidth,\n paddingHorizontal: 24,\n flexGrow: 1,\n flexShrink: 1\n }\n});\n\nvar _default = DialogScrollArea;\nexports.default = _default;","map":null,"metadata":{},"sourceType":"script"}data/FakeNewsNet/code/fakenewsnet_dataset/gossipcop/real/gossipcop-895961/news content.json1-10 {"url": "https://omgcheckitout.com/serena-williams-shows-stunning-wedding-ring-cute-pic-baby-alexis/", "text": " shared a gorgeous photo of her baby Alexis which also shows her beautiful wedding ring!\n\nA new photo posted by the famous tennis player on Instagram shows 2-month-old , her daughter with , dressed in a leopard-print romper with a towel in the same print underneath her. The picture caption reads, \u201cDaddy knows how much I love leopard print.\u201d\n\nWhat she doesn\u2019t mention but we noticed immediately is a massive diamond ring! Serena and Alexis got married last week in a Beauty and the Beast-themed wedding in New Orleans, with a guest list that included , Beyonce, , , and more celebrities and friends.", "images": ["https://certify.alexametrics.com/atrk.gif?account=3BRhr1Fx9f207i", "https://omgcheckitout.com/wp-content/uploads/2017/08/shutterstock_8785473ej-696x870.jpg", "https://omgcheckitout.com/wp-content/uploads/2019/07/2-1-324x400.jpg", "https://omgcheckitout.com/wp-content/uploads/2019/07/5886253az-1-100x70.jpg", "https://omgcheckitout.com/wp-content/uploads/2016/12/logo_transparent_background_small.png", "https://omgcheckitout.com/wp-content/uploads/2018/02/shutterstock_9306496y-100x70.jpg", "https://omgcheckitout.com/wp-content/uploads/2017/12/shutterstock_8633262js-100x70.jpg", "https://pixel.quantserve.com/pixel/p-Lt5C7hC-NKF-K.gif", "https://omgcheckitout.com/wp-content/uploads/2019/07/912GwIyXrL-100x70.jpg", "https://omgcheckitout.com/wp-content/uploads/2017/08/shutterstock_8785473ej.jpg"], "top_img": "https://omgcheckitout.com/wp-content/uploads/2017/08/shutterstock_8785473ej.jpg", "keywords": [], "authors": [], "canonical_link": "https://omgcheckitout.com/serena-williams-shows-stunning-wedding-ring-cute-pic-baby-alexis/", "title": " Shows Stunning Wedding Ring in Cute Pic of Baby Alexis", "meta_data": {"viewport": "width=device-width, initial-scale=1.0", "og": {"image": {"identifier": "https://omgcheckitout.com/wp-content/uploads/2017/08/shutterstock_8785473ej-819x1024.jpg", "secure_url": "https://omgcheckitout.com/wp-content/uploads/2017/08/shutterstock_8785473ej-819x1024.jpg", "width": 819, "height": 1024}, "locale": "en_US", "type": "article", "title": " Shows Stunning Wedding Ring in Cute Pic of Baby Alexis", "description": " shared a gorgeous photo of her baby Alexis which also shows her beautiful wedding ring! A new photo posted by the famous tennis player on", "url": "https://omgcheckitout.com/serena-williams-shows-stunning-wedding-ring-cute-pic-baby-alexis/", "site_name": "OMG Check It Out !", "updated_time": "2017-11-25T07:19:37+00:00"}, "description": " shared a gorgeous photo of her baby Alexis which also shows her beautiful wedding ring! A new photo posted by the famous tennis player on", "article": {"publisher": "https://facebook.com/omgcheckitout", "tag": "Wedding Ring", "section": "Celebs", "published_time": "2017-11-24T07:19:28+00:00", "modified_time": "2017-11-25T07:19:37+00:00"}, "twitter": {"card": "summary", "description": " shared a gorgeous photo of her baby Alexis which also shows her beautiful wedding ring! A new photo posted by the famous tennis player on", "title": " Shows Stunning Wedding Ring in Cute Pic of Baby Alexis", "site": "@OMG_CheckItOut", "image": "https://omgcheckitout.com/wp-content/uploads/2017/08/shutterstock_8785473ej.jpg", "creator": "@OMG_CheckItOut"}, "theme-color": "#bc2025", "generator": "Powered by Visual Composer - drag and drop page builder for WordPress."}, "movies": [], "publish_date": 1511507968.0, "source": "https://omgcheckitout.com", "summary": ""}1-10 {"activities_easter": [], "activities_epiphany": [], "activities_michaelmas": [], "code": "GEOG3651", "title": "Visual Geographies: Representing Africa"}{ "compilerOptions": { /* Visit https://aka.ms/tsconfig.json to read more about this file */ /* Language and Environment */ "target": "ES2021", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ "lib": ["ES2021"], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ /* Modules */ "module": "commonjs", /* Specify what module code is generated. */ // "rootDir": "./", /* Specify the root folder within your source files. */ "moduleResolution": "node", /* Specify how TypeScript looks up a file from a given module specifier. */ "resolveJsonModule": true, /* Enable importing .json files */ /* JavaScript Support */ // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */ // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from `node_modules`. Only applicable with `allowJs`. */ /* Emit */ "sourceMap": true, /* Create source map files for emitted JavaScript files. */ "outDir": "dist", /* Specify an output folder for all emitted files. */ "removeComments": true, /* Disable emitting comments. */ // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ /* Interop Constraints */ "esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables `allowSyntheticDefaultImports` for type compatibility. */ "forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */ /* Type Checking */ "strict": true, /* Enable all strict type-checking options. */ // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ // "useUnknownInCatchVariables": true, /* Type catch clause variables as 'unknown' instead of 'any'. */ "noUnusedLocals": true, /* Enable error reporting when a local variables aren't read. */ "noUnusedParameters": true, /* Raise an error when a function parameter isn't read */ "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ /* Completeness */ "skipLibCheck": true /* Skip type checking all .d.ts files. */ } } {"deprecated": "false", "authority-code": "CDE", "identifier": "CDE", "about": "http://publications.europa.eu/resource/authority/language/CDE", "prefLabel": {"@lang": "cde", "#text": "chenchu"}, "start.use": "1950-05-09"}shhuan/algorithmspy/topcoder/ProblemCrawler/TopCoderProblemCrawler/data/matches/d808deed82a253e03000a3120662dd65.json { "problems":[ { "avgPts":186.15, "constraints":[], "correctRatio":89.14, "definition":[], "div":0, "examples":[], "level":1, "link":"http://community.topcoder.com/stat?c=problem_statement&pm=11381&rd=14545", "name":"SRMCodingPhase", "notes":[], "submission":783 }, { "avgPts":264.64, "constraints":[], "correctRatio":77.62, "definition":[], "div":0, "examples":[], "level":2, "link":"http://community.topcoder.com/stat?c=problem_statement&pm=11496&rd=14545", "name":"SRMIntermissionPhase", "notes":[], "submission":143 }, { "avgPts":522.74, "constraints":[], "correctRatio":33.33, "definition":[], "div":0, "examples":[], "level":3, "link":"http://community.topcoder.com/stat?c=problem_statement&pm=11495&rd=14545", "name":"SRMChallengePhase", "notes":[], "submission":6 } ], "name":"Single Round Match 520 > Round 1", "link":"http://community.topcoder.com/stat?c=round_overview&er=5&rd=14545" }sections/1-744.json {"text":"","historical":"Prior Codifications\n\n1981 Ed., § 1-744.\n\n1973 Ed., § 1-1844.\n\nDC CODE § 1-744\n\nCurrent through December 11, 2012","credits":"(Nov. 17, 1979, 93 Stat. 866, Pub. L. 96-122, § 184.)","sections":[{"prefix":"a","text":" No person who has been convicted of, or has been imprisoned as a result of his conviction of, robbery, bribery, extortion, embezzlement, fraud, grand larceny, burglary, arson, a felony violation of federal or state law involving substances defined in § 102(6) of the Comprehensive Drug Abuse Prevention and Control Act of 1970 (21 U.S.C. § 802(6)), murder, rape, kidnapping, perjury, assault with intent to kill, any crime described in § 9(a)(1) of the Investment Company Act of 1940 (15 U.S.C. § 80a-9(a)(1)), a violation of any provision of this chapter, a violation of § 302 of the Labor-Management Relations Act, 1947 (29 U.S.C. § 186), a violation of Chapter 63 of Title 18, United States Code, a violation of § 874, 1027, 1503, 1505, 1506, 1510, 1951, or 1954 of Title 18, United States Code, a violation of the Labor-Management Reporting and Disclosure Act of 1959 (29 U.S.C. § 401), or conspiracy to commit any such crime or attempt to commit any such crime, or a crime in which any of the foregoing crimes is an element, shall serve or be permitted to serve: (1) As a fiduciary, investment counsel, agent, or employee of any Fund established by this chapter; or (2) as a consultant to any Fund established by this chapter; during or for 5 years after such conviction or after the end of such imprisonment, whichever is the later, unless prior to the end of such 5-year period, in the case of a person so convicted or imprisoned, his citizenship rights, having been revoked as a result of such conviction, have been fully restored, or the Board of Parole of the United States Department of Justice determines that such person's service in any capacity referred to in clause (1) or (2) of this subsection would not be contrary to the purposes of this chapter. Prior to making any such determination the Board of Parole shall hold an administrative hearing and shall give notice of such proceeding by certified mail to the state, county, and federal prosecuting officials in the jurisdiction or jurisdictions in which such person was convicted. The Board of Parole's determination in any such proceeding shall be final. No person shall knowingly permit any other person to serve in any capacity referred to in clause (1) or (2) of this subsection in violation of this subsection. Notwithstanding the preceding provisions of this subsection, no corporation or partnership will be precluded from acting as an administrator, fiduciary, officer, trustee, custodian, counsel, agent, or employee, of any Fund established by this chapter, or as a consultant to any Fund established by this chapter, without a notice, hearing, and determination by such Board of Parole that such service would be inconsistent with the intention of this section."},{"prefix":"b","text":" Whoever willfully violates this section shall be fined not more than $10,000, or imprisoned for not more than 1 year, or both."},{"prefix":"c","text":" For the purposes of this section:"},{"prefix":"1","text":" A person shall be deemed to have been \"convicted\" and to be under the disability of \"conviction\" from the date of entry of the judgment of the trial court or the date of the final sustaining of such judgment on appeal, whichever is the later event."},{"prefix":"2","text":" The term \"consultant\" means any person who, for compensation, advises or represents a Fund or who provides other assistance to such Fund concerning the operation of such Fund."},{"prefix":"3","text":" A period of parole shall not be considered as part of a period of imprisonment."}],"division":{"identifier":"I","text":"Government of District."},"title":{"identifier":"1","text":"Government Organization. (Refs & Annos)"},"chapter":{"identifier":"7","text":"District of Columbia Employees Retirement Program Management."},"subchapter":{"identifier":"V","text":"Fiduciary Responsibility; Civil Sanctions."},"heading":{"title":"1","chaptersection":"744","identifier":"1-744","catch_text":"Prohibition against certain persons holding certain positions."}}bundie1990/new-website0 {"jss.js":"sha256-5TsVvggpAVcDjMVzlYjCWfOcWfrvNc3BsbYdTgKPRaU=","jss.min.js":"sha256-HKI/gWR0BbPqDFnlh8aLQUu/lNKgXuB/3OuQy1k8tCM="}vishnu-vashist/es-lz-repo1 { "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "input": { "value": { "Id": "/subscriptions/5b5b79cf-7e6a-4a79-8e41-ea366c866649/providers/Microsoft.Authorization/roleAssignments/a97d2274-c930-8ba7-9a41-26fff666d589", "Name": "a97d2274-c930-8ba7-9a41-26fff666d589", "properties": { "DisplayName": "rtet-caf_launchpad_level0", "ObjectType": "ServicePrincipal", "PrincipalId": "", "RoleDefinitionId": "/providers/Microsoft.Authorization/RoleDefinitions/3bbff388-12c8-6021-6c32-c9501e5d4cb8", "RoleDefinitionName": "rtet-rg-caf-launchpad-contributor" }, "ResourceType": "Microsoft.Authorization/roleAssignments" } } } } {"id": 165777, "date": "2020-07-15 04:29:51", "user": "CycloneServers", "post": "\r\n<center>![](https://i.imgur.com/gsAT9Cd.png \"\")</center>\r\n\r\n\r\nCheck out our new location in Dallas, TX (Hosted on Dual E5-2630Lv3) 10GBps Connection!\r\n\r\nTest out our network with these IPs!\r\n\r\nNETWORK INFORMATION:\r\n\r\nSeattle WA:\r\nLooking Glass: [Click Here](https://lgseattle.cycloneservers.net/ \"Click Here\")\r\n\r\nNorth Carolina:\r\nTest iPv4: 192.168.127.12\r\n\r\nLos Angeles:\r\nTest iPv4: 172.16.31.10\r\n\r\nDallas:\r\nTest iPv4: 172.16.31.10\r\n\r\n-------------------------------------------------------------------------------\r\nOur Features:\r\n\r\n- Instant Activation (Subject to fraud checks)\r\n- One-click OS Install with ready templates - CentOS, Debian, Suse, Ubuntu and now Windows\r\n- Custom ISO - Use our available Windows & FreeBSD or bring your own ISO.\r\n- One-click CP Install - Available on our VPS Panel, cPanel, Plesk, Webuzo, Webmin, VestaCP, and more!. New Direct Admin!\r\n\r\n-------------------------------------------------------------------------------\r\n**Check out our current specials and deals!**\r\n\r\nPromo Code: **LETLSN45** (47% off). This is for new and existing clients. (Excludes Dallas)\r\n\r\nPromo Code: **15TXLET** for (15% off Recurring)This is for new and existing clients. ( Dallas)\r\n\r\nWe will be running a sale until August 1st for VPS's. This sale applies to ALL Customers. Order a new VPS and receive a free x2 RAM Allocation for the life of the VPS. Only Applicable in Dallas - Only Applicable on new VPS Purchases!\r\n\r\nWe now offer better than ever Game Server deals. Check them out [here](https://cycloneservers.net/game-hosting.html \"here\").\r\n\r\n-------------------------------------------------------------------------------\r\n**KVM - LAX - Seattle - NC**\r\n\r\nKVM 1GB | 25 GB SSD + 1 TB Bandwidth | $4.23/mo\r\nKVM 2GB | 35 GB SSD + 2 TB Bandwidth | $4.76/mo\r\nKVM 4GB | 55 GB SSD + 4 TB Bandwidth | $5.82/mo\r\n\r\n[Order here for LAX and Seattle and NC](https://clients.cycloneservers.net/cart.php \"Order here for LAX and Seattle and NC\")\r\n\r\nFor Dallas check out the plans [here!](https://clients.cycloneservers.net/cart.php?gid=23 \"here!\")\r\n\r\n-------------------------------------------------------------------------------\r\n\r\n**[Extras]**\r\n\r\nNow accepting CC. (Stripe) and (BTC and More Crypto) - We are willing to create custom plans or upgrade individual specifications (such as disk space) for an additional fee. - We own ALL our own hardware.\r\n\r\nDDOS Protection in all locations.\r\n\r\nPlease let us know if you have any questions. You can submit a ticket to reach a sales rep here: [Click Here to Contact Support/Sales](https://clients.cycloneservers.net/submitticket.php?step=2&deptid=4 \"Click Here to Contact Support/Sales\") Or on our discord [Join Here](https://discord.gg/jac3y2P \"Join Here\")\r\n\r\nThank you!!"}{ "name": "", "description": "You lack any skin or muscle and therefore you are physically weaker than others, but you also are much faster.", "type": "origins:modify_damage_taken", "modifier": { "operation": "multiply_total", "value": 0.15, "name": "Increases the damage taken by 15% (multiply_total)" } }{ "id" : "https://bluebrain.github.io/nexus/vocabulary/file", "project" : "myorg/myproj", "storage" : "https://bluebrain.github.io/nexus/vocabulary/disk-storage?rev=1", "storageType" : "DiskStorage", "attributes" : { "origin" : "Client", "uuid" : "8049ba90-7cc6-4de5-93a1-802c04200dcc", "location" : "http://localhost/file.txt", "path" : "file.txt", "filename" : "file.txt", "mediaType" : "text/plain; charset=UTF-8", "bytes" : 12, "digest" : { "@type" : "NotComputedDigest" } }, "rev" : 1, "instant" : "1970-01-01T00:00:00Z", "subject" : { "subject" : "username", "realm" : "myrealm", "@type" : "User" }, "@type" : "FileCreated" } { "meta": { "username": "jmohr", "repositoryName": "jmohr.github.io", "branch": "master", "cname": "justy.io" }, "site": { "title": "justy.io", "description": "Justin's personal junk goes in here." }, "socialnetwork": { "email": "", "twitter": "https://twitter.com/justyio", "linkedin": "https://www.linkedin.com/in/justinmohr", "github": "https://github.com/jmohr" }, "theme": { "name": "uno" } }rename.json1-10 { "src/scripts/name.coffee": "src/scripts/{%= name %}.coffee", "src/test/name_test.coffee": "src/test/{%= name %}_test.coffee" } 1-10 {"pos":"adj","translits":{"malakois":{"luk.7.25|6":["in","fine","clothing"],"mat.11.8|6":["in","delicate [clothing]","having been dressed?"]},"malakoi":{"1co.6.9|18":["nor","effeminate,","nor"]},"malaka":{"mat.11.8|11":["-","delicate [clothing]","wearing,"]}},"meanings":{"delicate":2,"fine":1,"effeminate":1},"meaningsCount":3,"occurences":4,"translation":"hebrew-6028"}{"expireTime":9007200867590254000,"key":"8a922d3c3e06fbed13aab99a4d8f9e64{\"background\":\"rgba(0,0,0,1)\",\"duotone\":false,\"grayscale\":false,\"rotate\":0,\"trim\":false,\"toFormat\":\"jpg\",\"toFormatBase64\":\"\",\"cropFocus\":17,\"fit\":\"cover\",\"width\":20,\"height\":27}","val":{"src":"data:image/jpeg;base64,/9j/2wBDABALDA4MChAODQ4SERATGCgaGBYWGDEjJR0oOjM9PDkzODdASFxOQASIAAhEBAxEB/8QAGAAAAwEBAAAAAAAAAAAAAAAAAAIDAQT/xAAXAQEBAQEAAAAAAAAAAAAAAAABAAID/9oADAMBAAIQAxAAAAGVJ5vlIUXW42hiJP8A/8QAGhABAAMBAQEAAAAAAAAAAAAAAQACEhEgIf/aAAgBAQABBQICNacQmTmYk3G/xt4//8QAFhEBAQEAAAAAAAAAAAAAAAAAABES/9oACAEDAQE/AdNIj//EABcRAAMBAAAAAAAAAAAAAAAAAAAQERL/2gAIAQIBAT8Bhl//xAAXEAEAAwAAAAAAAAAAAAAAAAAgACEx/9oACAEBAAY/ApQ1/wD/xAAbEAEAAgMBAQAAAAAAAAAAAAABABEhMUFREP/aAAgBAQABPyHrKPZyVyxi57mJFpamTaB0w9Jawair8//aAAwDAQACAAMAAAAQnODO/8QAFhEBAQEAAAAAAAAAAAAAAAAAABEB/9oACAEDAQE/EIQjEP/","width":20,"height":27,"aspectRatio":0.7407407407407407,"originalName":"hyperobjects.jpg"}}{ "name": "com.omiyagames.builds", "displayName": "Omiya Games - Multiplatform Build Settings", "version": "1.0.0-pre.1", "unity": "2021.1", "unityRelease": "10f1", "description": "Tool for creating multiple builds, for multiple platforms, in just one-click.", "category": "Tool", "type": "tool", "license": "MIT", "author": { "name": "", "email": "", "url": "https://www.omiyagames.com" }, "dependencies": { "com.unity.ext.nunit": "1.0.0", "io.github.icsharpcode.sharpziplib": "1.2.0", "com.omiyagames.common": "1.0.0", "com.omiyagames.cryptography": "1.0.0", "com.omiyagames.global": "1.0.0", "com.omiyagames.web": "1.0.0", "com.omiyagames.web.security": "1.0.0-pre.1" }, "keywords": [ "Omiya Games", "Scripts", "Template", "Tool", "Build", "Platform", "Multiplatform", "Continuous Integration" ], "scopedRegistries": [ { "name": "package.openupm.com", "url": "https://package.openupm.com", "scopes": [ "com.openupm", "io.github.icsharpcode.sharpziplib", "com.omiyagames.common", "com.omiyagames.global", "com.omiyagames.web", "com.omiyagames.web.security", "com.omiyagames.cryptography" ] } ] }thetruevincentchow/main {"acadYear":"2019/2020","description":"","title":"Inside Your Personal Computer","department":"Electrical and Computer Engineering","faculty":"Engineering","moduleCredit":"4","moduleCode":"GEM1500K","attributes":{"su":true},"semesterData":[]} 1-10 { "fieldName" : "attribute2", "fieldType" : 1, "label" : "ExchangeRate", "labelTranslations" : { "" : "Exchange Rate" }, "readOnly" : false, "requiredField" : true }{ "name": "homebridge-garagedoor-supla", "version": "0.1.2", "description": "Supla Garage Door plugin for homebridge: https://github.com/nfarina/homebridge", "license": "MIT", "keywords": [ "homebridge-plugin", "garage", "garage-door", "supla" ], "engines": { "node": ">=0.12.0", "homebridge": ">=0.4.0" }, "author": "Wojciech 'poopi' Lazarski", "repository": { "type": "git", "url": "git://github.com/poopisan/homebridge-garagedoor-supla.git" }, "dependencies": {} } { "name": "compare-date", "version": "0.0.1", "description": "A simplistic library for comparing dates with different formats", "main": "index.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "repository": { "type": "git", "url": "git+https://github.com/Mayvena/compare-date.git" }, "keywords": [ "date", "compare" ], "author": " <> (http://www.mayvena.net)", "license": "MIT", "bugs": { "url": "https://github.com/Mayvena/compare-date/issues" }, "homepage": "https://github.com/Mayvena/compare-date#readme" } Facility_Name,Index,ODCAF_Facility_Type,Street_No,Street_Name,Postal_Code,City,Prov_Terr Bonavista Memorial Public Library,1851,library or archives,..,..,..,..,nl Bonavista Museum,1852,museum,10,ryan's hill,A0C 1B0,bonavista,nl Cape Bonavista Lighthouse Provincial Historic Site,2230,heritage or historic site,1693,cape road,A1B 4J6,bonavista,nl Mockbeggar Plantation Provincial Historic Site,6234,heritage or historic site,230,roper street,A0C 1B0,bonavista,nl Ryan Premises National Historic Site of Canada,7997,museum,10,ryans rd,A0C 1B0,bonavista,nl { "id": 16394, "source": "nyman", "verse_id": 17879, "verse_count": 7, "reference": "10:28\u201334", "title": "", "html": "

The various towns in these verses are conquered during an Assyrian assault on Jerusalem, but the assault will fail because the Lord will intercede.<\/p> ", "audit": null }{ "author": "", "classification": "Free Verse", "keywords": [ "Living", "The Mind", "Arts", "Sciences", "Music", "Language", "Linguistics" ], "period": "", "reference": "http://www.poetryfoundation.org/poem/240754", "region": "", "text": [ "1.", "Cogs & cogs that cannot turn", "to recognitions: such dogs in the dark noonday!", "As if the tongue told & tolled", "Among", "the melancholic arcades.", "Where the", "advance toward the", "Time to try the knot, the Not", "Or to be caught", "Forever in nerve-traceries of Beauty . . .", "Unstrung, the structure is sound.", "2.", "Detour to far fires.", "To be counted missing . . . in a toroidal space", "That mimics the shape of its container, speech.", "The passive of, the possessive of\u2014", "Measureless intent,", "almost", ", the picture", "below the voice.", "Less a name than a substance", "Coming to stillness, star-inhabited.", "Less a substance than a sigh.", "3.", "thou,", "Divided here. O", "then", "Opened as earthen", "ring, cave-recorded.", "A mazed interior. Self-similar aisles of isles, pouring", "form from form.", "Lastness as device. Aligned as measurements (letters)\u2014", "as sensitive, all-too-sensitive compass", "needles forever seeking", "the frozen pole, the zero.", "Caption: \u201cAn end-of-century sailing ship,", "held fast in sheets of ice.\u201d", "4.", "No atmosphere is sufficient.", "An embryo in the brain is not yet breathing.", "There, the labor", "Of the living rock, where an ache, or bruise-ember", "will be discovered.", "Scored", "for Theremin, or permanently scarred.", "Where shadows point:", "lengthening to", ", as unmade", "scaffolding.", "Thus, repetition, resisted", "is the register of thought.", "Now here, even as staves are falling, another story", "\u2014intervallic\u2014cannot be told\u2014that is, besieged", "As the heart encaged in bone.", "The animal calls", ", disconsolate", "In its hollow mountain.", "5.", "Neither nor nor neither, time builds", "Its twelve tones between", "&", ".", "\u2014as the roots of the sunflower, arrayed over earthlight.", "Routes unreturning / term without terminus. Riding as reading", "Migrates", "underground.", "Writing as the righting", "Of fallen", "angles, of tangles of Accident\u2014", "arrives riven, a body never to be / surveyed.", "Abandoned in a wintry field, the sum of its travels", "\u2014its hunting the same as its haunting." ], "title": "Mazed Interior", "year": "" }{"name": "", "address": "0xE478de485ad2fe566d49342Cbd03E49ed7DB3356", "metadata": {"output": {"abi": [{"constant": true, "inputs": [], "name": "proxyType", "outputs": [{"name": "proxyTypeId", "type": "uint256"}], "payable": false, "stateMutability": "pure", "type": "function"}, {"constant": true, "inputs": [], "name": "isDepositable", "outputs": [{"name": "", "type": "bool"}], "payable": false, "stateMutability": "view", "type": "function"}, {"constant": true, "inputs": [], "name": "implementation", "outputs": [{"name": "", "type": "address"}], "payable": false, "stateMutability": "view", "type": "function"}, {"constant": true, "inputs": [], "name": "appId", "outputs": [{"name": "", "type": "bytes32"}], "payable": false, "stateMutability": "view", "type": "function"}, {"constant": true, "inputs": [], "name": "kernel", "outputs": [{"name": "", "type": "address"}], "payable": false, "stateMutability": "view", "type": "function"}, {"inputs": [{"name": "_kernel", "type": "address"}, {"name": "_appId", "type": "bytes32"}, {"name": "_initializePayload", "type": "bytes"}], "payable": false, "stateMutability": "nonpayable", "type": "constructor"}, {"payable": true, "stateMutability": "payable", "type": "fallback"}, {"anonymous": false, "inputs": [{"indexed": false, "name": "sender", "type": "address"}, {"indexed": false, "name": "value", "type": "uint256"}], "name": "ProxyDeposit", "type": "event"}]}}, "version": 1, "checkPoints": []}{ "name": "neveragain", "version": "1.0.0", "description": "Never Again forget who the climate criminals are, starting with Siemens #StopAdani.", "main": "src/neverAgain.ts", "targets": { "main": false }, "scripts": { "test": "echo \"Error: no test specified\" && exit 1", "tsc": "tsc", "dev": "parcel src/index.html --no-source-maps", "clean": "rm -rf dist/", "rename": "mv dist/index.*.js dist/neverAgain.js", "move-core": "cp src/neverAgain.css dist/neverAgain.css && cp src/privacy_policy.txt dist/privacy_policy.txt", "move-ff": "cp src/manifestFirefox.json dist/manifest.json &&mkdir dist/icons && cp icons/na-48x48-optimized.svg dist/icons/na-48x48-optimized.svg", "move-chr": "cp src/manifestChrome.json dist/manifest.json && mkdir dist/icons && cp icons/icon16.png dist/icons/icon16.png && cp icons/icon48.png dist/icons/icon48.png && cp icons/icon128.png dist/icons/icon128.png", "build-core": "npm run clean && parcel build src/index.html --no-source-maps && npm run rename", "build-ff": "npm run build-core && npm run move-ff && npm run move-core", "build-chr": "npm run build-core && npm run move-chr && npm run move-core" }, "repository": { "type": "git", "url": "git+https://github.com/pablooliva/neveragain.git" }, "author": "", "license": "MIT", "bugs": { "url": "https://github.com/pablooliva/neveragain/issues" }, "homepage": "https://github.com/pablooliva/neveragain#readme", "dependencies": { "@popperjs/core": "^2.9.2", "mark.js": "^8.11.1" }, "devDependencies": { "@types/mark.js": "^8.11.5", "parcel": "^2.0.0-beta.2", "typescript": "^4.2.4", "web-ext-types": "^3.2.1" } } JordanZeotni/Explorers-Data { "Id": "dragon-claw", "Name": "", "MoveId": 208, "Description": "Inflicts damage on the target.\nRange: Enemy in front", "Category": 0, "BasePower": 16, "Type": 15, "BasePP": 10, "Accuracy": 88 }0 { "$schema": "http://json-schema.org/draft-07/schema", "type": "object", "properties": { "nonTeachingAssignments": { "$ref": "#/definitions/NonTeachingAssignments" } }, "title":"nonTeachingAssignments", "required": [ "nonTeachingAssignments" ], "definitions": { "NonTeachingAssignments": { "$id": "#/properties/nonTeachingAssignmentsForAcademicCalendar", "type": "object", "required": [ "daysOfNonTeachingAssignments" ], "properties": { "daysOfNonTeachingAssignments": { "$id": "#/properties/daysOfNonTeachingAssignments", "type": "number" } } } }, "_osConfig": { "osComment": "Refer to _osconfig $comment section of Teacher.json", "privateFields": [], "signedFields": [] } }Sploot-NFT/sploot-generator {"name":"","dna":[{"code":1.7575,"color":"#A7194B","scale":0.7575},{"code":1.298,"color":"#FD5308","scale":0.298},{"code":0.0874,"color":"#696969","scale":0.0874},{"code":1.0824,"color":"#FABC02","scale":0.0824},{"code":1.2272,"color":"#D0EA2B","scale":0.2272},{"code":1.8242,"color":"#66B032","scale":0.8242},{"code":1.7993,"color":"#0392CE","scale":0.7993},{"code":1.644,"color":"#3D01A4","scale":0.644},{"code":2.3077,"color":"#FEFE33","scale":0.3077}],"attributes":[{"trait_type":"Speed","value":8},{"trait_type":"Stamina","value":4},{"trait_type":"Strength","value":2},{"trait_type":"Aggression","value":2},{"trait_type":"Creativity","value":3},{"trait_type":"Luck","value":8},{"trait_type":"Focus","value":8},{"trait_type":"Influence","value":7},{"trait_type":"Reflexes","value":4},{"trait_type":"Phobia","value":"Diet Soda"},{"trait_type":"Vice","value":"Custom Vans"},{"trait_type":"Role","value":"Superfan"},{"trait_type":"Personality","value":"Hot-headed"},{"trait_type":"Class","value":"Pessimist"},{"trait_type":"Affinity","value":"Mind"}],"image":""}{"type":"Feature","properties":{"type":"barangay","level":"4","label":"Agsoso, Loon, Bohol, Central Visayas (Region VII), PH","locale":"ph.central-visayas-region-vii.bohol.loon.agsoso","country_id":177,"country_reference":177,"country_name":"Philippines","region_id":"7","region_reference":"7","region_name":"Central Visayas (Region VII)","province_id":"15","province_reference":"15","province_name":"Bohol","city_id":"710","city_reference":"240","city_name":"Loon","barangay_id":"19869","barangay_reference":"5459","barangay_name":"Agsoso"},"geometry":{"type":"MultiPolygon","coordinates":[[[[123.824509,9.79817],[123.826218,9.78612],[123.816261,9.77935],[123.814484,9.79163],[123.817673,9.79432],[123.817917,9.79758],[123.820641,9.79881],[123.824509,9.79817]]]]}}protoboard.json0 { "browserify" : { "dist/conversion.js" : "src/browserify.js" }, "actualtarget": ["dist", "conversion.js"] } {"title": "Reactors: A Case for Predictable, Virtualized Actor Database Systems.", "fields": ["data architecture", "database transaction", "virtualization", "serializability", "online transaction processing"], "abstract": "The requirements for OLTP database systems are becoming ever more demanding. Domains such as finance and computer games increasingly mandate that developers be able to encode complex application logic and control transaction latencies in in-memory databases. At the same time, infrastructure engineers in these domains need to experiment with and deploy OLTP database architectures that ensure application scalability and maximize resource utilization in modern machines. In this paper, we propose a relational actor programming model for in-memory databases as a novel, holistic approach towards fulfilling these challenging requirements. Conceptually, relational actors, or reactors for short, are application-defined, isolated logical actors that encapsulate relations and process function calls asynchronously. Reactors ease reasoning about correctness by guaranteeing serializability of application-level function calls. In contrast to classic transactional models, however, reactors allow developers to take advantage of intra-transaction parallelism and state encapsulation in their applications to reduce latency and improve locality. Moreover, reactors enable a new degree of flexibility in database deployment. We present ReactDB, a system design exposing reactors that allows for flexible virtualization of database architecture between the extremes of shared-nothing and shared-everything without changes to application code. Our experiments illustrate latency control, low overhead, and asynchronicity trade-offs with ReactDB in OLTP benchmarks.", "citation": "Citations (1)", "year": "2018", "departments": ["University of Copenhagen", "University of Copenhagen"], "conf": "sigmod", "authors": [".....http://dblp.org/pers/hd/s/Shah_0001:Vivek", ".....http://dblp.org/pers/hd/s/Salles:Marcos_Antonio_Vaz"], "pages": 16}{ "Irlanda": "Republic of Ireland", "Republica Machedonia": "Republic of Macedonia", "Oceania": "Oceania", "Chipro": "Cyprus", "Montenegro": "Montenegro", "Monaco": "Monaco", "San Marino": "San Marino", "Finlanda": "Finland", "Asia": "Asia", "Andorra": "Andorra", "Cehia": "Czech Republic", "Ispania": "Spain", "Frânție": "France", "Danimarca": "Denmark", "Sãrghia": "Serbia", "Suidia": "Sweden", "Norveghia": "Norway", "Shwaitsã": "Switzerland", "Slovenia": "Slovenia", "Ucraina": "Ukraine", "Ghermãnia": "Germany", "Arbinișia": "Albania", "Staturle tu Europa": "List of sovereign states and dependent territories in Europe", "Islanda": "Iceland", "Britania Mari": "United Kingdom", "Moldova": "Moldova", "Letonia": "Latvia", "Olanda": "Netherlands", "Malta": "Malta", "Vatican": "Vatican City", "Arusia": "Russia", "Romãnia": "Romania", "Arusia albã": "Belarus", "Croația": "Croatia", "Estonia": "Estonia", "Africa": "Africa", "Turchia": "Turkey", "Austria": "Austria", "Polonia": "Poland", "Ungaria": "Hungary", "Italia": "Italy", "Vurgaria": "Bulgaria", "Gârția": "Greece" }{ "required": true, "minVersion": "0.8", "package": "io.github.simplycmd.camping.mixin", "compatibilityLevel": "JAVA_16", "mixins": [ "CampfireComfortMixin", "LiveFishingMixin", "SapScrapingMixin", "SleepingBagFreezingMixin", "SleepingBagSpawnpointMixin" ], "client": [ "HotSpringWaterColorMixin" ], "injectors": { "defaultRequire": 1 } } { "properties": { "policyDefinitionId": "/providers/Microsoft.Authorization/policyDefinitions/7c5a74bf-ae94-4a74-8fcf-644d1e0e6e6f", "parameters": { }, "dependsOn": [ ], "displayName": "Require blob encryption for storage accounts" }, "kind": "policyAssignment", "id": "/providers/Microsoft.Blueprint/blueprints/ISO_27001_Shared_Services/artifacts/fc0d3d54-3c7a-474c-8937-46e7bb4120b9", "type": "Microsoft.Blueprint/blueprints/artifacts", "name": "fc0d3d54-3c7a-474c-8937-46e7bb4120b9" } {"polyfill":["/polyfill-3fabdde4c08129efdc8f.js"],"app":["/app-d29b580b5c922572a1a6.js"],"component---src-pages-404-js":["/component---src-pages-404-js-a1c5686f04c61c840a61.js"],"component---src-pages-index-js":["/component---src-pages-index-js-ead877eb74ad5d846644.js"],"component---src-pages-page-2-js":["/component---src-pages-page-2-js-9d97c18fa1739395ed54.js"],"component---src-pages-resume-page-tsx":["/component---src-pages-resume-page-tsx-df57f6facb65eb170cb4.js"],"component---src-pages-using-typescript-tsx":["/component---src-pages-using-typescript-tsx-d9dab359a34c7b40456b.js"]}bower.json { "name": "majordome", "version": "0.1.0", "homepage": "https://github.com/Banasura/majordome", "authors": [ " " ], "description": "A Dotclear plugin which allow user to easily create and manage user forms.", "license": "MIT", "dependencies": { "ie8-node-enum": "https://gist.github.com/ajb/8902451/raw/b68459289526cb76c2e22416d98d0dbd0b722fa0/ie8_node_enum.js", "jquery-ui": "~1.10.3", "jquery.scrollWindowTo": "https://gist.github.com/ajb/6519570/raw/cd741057495d0fb19e545a0f9a098efba3bef9c8/jquery.scrollWindowTo.js", "underscore": "~1.5.2", "underscore.mixin.deepExtend": "https://gist.github.com/ajb/6519561/raw/63682037af9b10200b05c1a3d5890903397b2103/underscore.mixin.deepExtend.js", "backbone": "~1.1.0", "backbone-deep-model": "~0.10.4", "rivets": "~0.5.13" }, "suppliedDependencies": ["jquery"] } { "url": "https://static.wikia.nocookie.net/griftlands_gamepedia_en/images/c/c2/Graft_system_shock.png/revision/latest?cb=20200731170141", "sha1": "23581af10e1b631d5a56f1742acf947a2afcdb27" }{"angular-chart.js":","angular-chart.min.js":"}{"resourceType":"DataElement","id":"AppointmentResponse","meta":{"lastUpdated":"2015-10-24T07:41:03.495+11:00"},"url":"http://hl7.org/fhir/DataElement/AppointmentResponse","status":"draft","experimental":true,"stringency":"fully-specified","element":[{"path":"AppointmentResponse","short":"A reply to an appointment request for a patient and/or practitioner(s), such as a confirmation or rejection","definition":"A reply to an appointment request for a patient and/or practitioner(s), such as a confirmation or rejection.","min":0,"max":"*","type":[{"code":"DomainResource"}],"constraint":[{"key":"apr-1","severity":"error","human":"Either the participantType or actor must be specified","xpath":"(exists(f:participantType) or exists(f:actor))"}],"mapping":[{"identity":"ical","map":"VEVENT"},{"identity":"w5","map":"workflow.scheduling"}]}]}datasets/2D/SIGN/writer1002/sessionC/chevron_right-1-24.json {"name":"chevron_right","subject":1002,"date":"7122009-014219","paths":{"Pen":{"strokes":[{"x":360,"y":-716,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":351,"y":-692,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":332,"y":-648,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":291,"y":-598,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":227,"y":-538,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":159,"y":-460,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":74,"y":-380,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":-19,"y":-292,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":-127,"y":-209,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":-234,"y":-122,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":-350,"y":-44,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":-455,"y":33,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":-558,"y":98,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":-650,"y":161,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":-735,"y":215,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":-807,"y":264,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":-864,"y":307,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":-902,"y":346,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":-924,"y":385,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":-925,"y":424,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":-908,"y":462,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":-872,"y":500,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":-817,"y":539,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":-742,"y":581,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":-655,"y":623,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":-557,"y":666,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":-454,"y":706,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":-347,"y":747,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":-247,"y":783,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":-153,"y":819,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0},{"x":-70,"y":849,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":30,"stroke_id":0},{"x":4,"y":881,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":31,"stroke_id":0},{"x":63,"y":911,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":32,"stroke_id":0},{"x":114,"y":938,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":33,"stroke_id":0},{"x":156,"y":967,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":34,"stroke_id":0},{"x":182,"y":990,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":35,"stroke_id":0},{"x":210,"y":1030,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":36,"stroke_id":0},{"x":220,"y":1062,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":37,"stroke_id":0},{"x":215,"y":1096,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":38,"stroke_id":0},{"x":201,"y":1132,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":39,"stroke_id":0},{"x":168,"y":1173,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":40,"stroke_id":0},{"x":118,"y":1219,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":41,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Lenovo X61 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}1-10 { "description": "Runs the tQ linter to check files for problems. Called by https://api.door43.org/tx/lint/tq" } 10-100 {"name":"jquery.metadata","assets":[{"files":["jquery.metadata.js","jquery.metadata.min.js"],"version":"2.0","mainfile":"jquery.metadata.min.js"}],"versions":["2.0"],"zip":"jquery.metadata.zip","author":"","github":"https://github.com/jquery-orphans/jquery-metadata","homepage":"https://github.com/jquery-orphans/jquery-metadata","description":"This plugin is capable of silently, and automatically, extracting metadata from classes, random attributes, and child elements.","mainfile":"jquery.metadata.min.js","lastversion":"2.0"}{ "emittedFiles": [ "Deeply/README.md", "Deeply/nested/README.md", "Deeply/nested/module/README.md", "Deeply/nested/module/Resource.cs", "Provider.cs", "Pulumi.Foo-bar.csproj", "README.md", "Utilities.cs", "logo.png", "pulumiplugin.json" ] } 10-100 {"title": "Monetary Discount Strategies for Real-Time Promotion Campaign.", "fields": ["profit", "kernel density estimation", "software deployment", "thompson sampling", "revenue"], "abstract": "The effectiveness of monetary promotions has been well reported in the literature to affect shopping decisions for products in real life experience. Nowadays, e-commerce retailers are facing more fierce competition on price promotion in that consumers can easily use a search engine to find another merchant selling an identical product for comparing price.\n\nTo achieve more effectiveness on real-time promotion in pursuit of better profits, we propose two discount-giving strategies: an algorithm based on Kernel density estimation, and the other algorithm based on Thompson sampling strategy. We show that, given a pre-determined discount budget, our algorithms can significantly acquire better revenue in return than classical strategies with simply fixed discount on label price. We then demonstrate its feasibility to be a promising deployment in e-commerce services for real-time promotion.", "citation": "Citations (1)", "departments": ["National Cheng Kung University", "National Cheng Kung University", "National Cheng Kung University", "National Cheng Kung University", "Slice Technolog ... Mateo, CA, USA"], "authors": [".....http://dblp.org/pers/hd/l/Lin:Ying=Chun", "Chi-Hsuan Huang.....http://dblp.org/pers/hd/h/Huang:Chi=Hsuan", "Chu-Cheng Hsieh.....http://dblp.org/pers/hd/h/Hsieh:Chu=Cheng", "Yu-Chen Shu.....http://dblp.org/pers/hd/s/Shu:Yu=Chen", "Kun-Ta Chuang.....http://dblp.org/pers/hd/c/Chuang:Kun=Ta"], "conf": "www", "year": "2017", "pages": 10}014_lesson_API_Essential/src/main/resources/user.json { "id": 1, "username": "LordOfDarkness", "firstName": "Taras", "lastName": "Ivanov", "email": "", "password": "", "phone": "+380634445522", "userStatus": 0 }Human-Charity-Coin/node-humancharitycoin { "name": "@human-charity-coin/node-humancharitycoin", "version": "0.3.9", "description": "humancharitycoin client", "main": "lib/humancharitycoin.js", "types": "lib/humancharitycoin.js", "engines": { "node": ">= 0.4.0" }, "scripts": { "build": "npm run build", "deploy": "npm publish" }, "author": "CryptoLover705", "license": "MIT", "repository": { "type": "git", "url": "git+https://github.com/Human-Charity-Coin/node-humancharitycoin.git" } } 0 { "private": true, "name": "@fullcalendar-lw/core-preact", "version": "5.6.0-1", "title": "FullCalendar Core-Preact Package", "description": "Internal package", "dependencies": { "preact": "^10.0.5", "tslib": "^2.0.3" }, "main": "tsc/vdom.js", "types": "tsc/vdom.d.ts", "sideEffects": true, "repository": { "type": "git", "url": "https://github.com/aleksandr-erin/fullcalendar.git", "homepage": "https://github.com/aleksandr-erin/fullcalendar" }, "license": "MIT", "author": { "name": "", "email": "", "url": "http://arshaw.com/" } } { "active": true, "synopsis": "This is the initial SDK release for AWS Glue DataBrew. DataBrew is a visual data preparation tool that enables users to clean and normalize data without writing any code.", "generate-client-constructors": true }{"xcharts.css":"sha256-IuNKG2IitK09fwxF2idr8wtSP/NX3CvrqSjQR4uxdH8=","xcharts.js":"sha256-tDNiz7Iv11M+4ehx/YRIJ250/QhMAok9NHiHTQxiMkI=","xcharts.min.css":"sha256-VI2hf6exVaLAIHFpic9Mvy/KQ0N5oRsl6A4Yh6Up4d4=","xcharts.min.js":"sha256-jZr3dcNTw6hWKoVlzRQRhTWbw2OTq9Dj0xkUydxtnL0="}jsa2/EAST { "ControlId": "aks_apiServer", "Category": "Access", "Description": "Ensure API server does not allow" }[ {"word": "collateral", "translated": "zabezpieczenie po\u017cyczki", "explanation": "valuable property owned by someone who wants to borrow money, that they agree will become the property of the company or person who lends the money if the debt is not paid back", "pronunciation": "/k\u0259\u02c8l\u00e6t\u032c.\u025a.\u0259l/", "sentence": "She put up her house as collateral for a loan.", "media_fname": "collateral.mp3", "type": "noun", "sentence_gap": "She put up her house as __________ for a loan.", "gap_term": "collateral", "picture": "", "synonyms": ""}, {"word": "demean", "translated": "poni\u017ca\u0107, zniewa\u017ca\u0107", "explanation": "to cause someone to become less respected", "pronunciation": "/d\u026a\u02c8mi\u02d0n/", "sentence": "I wouldn't demean myself by asking for charity.", "media_fname": "demean.mp3", "type": "verb", "sentence_gap": "I wouldn't ______ myself by asking for charity.", "gap_term": "demean", "picture": "", "synonyms": ""}, {"word": "denounce", "translated": "pot\u0119pia\u0107", "explanation": "to criticize something or someone strongly and publicly", "pronunciation": "/d\u026a\u02c8na\u028ans/", "sentence": "The government's economic policy has been denounced on all sides.", "media_fname": "denounce.mp3", "type": "verb", "sentence_gap": "The government's economic policy has been _________ on all sides.", "gap_term": "denounced", "picture": "", "synonyms": ""}, {"word": "depict", "translated": "przedstawia\u0107", "explanation": "to represent or show something in a picture or story", "pronunciation": "/d\u026a\u02c8p\u026akt/", "sentence": "Her paintings depict the lives of ordinary people in the last century.", "media_fname": "depict.mp3", "type": "verb", "sentence_gap": "Her paintings ______ the lives of ordinary people in the last century.", "gap_term": "depict", "picture": "", "synonyms": ""}, {"word": "infringement", "translated": "naruszenie", "explanation": "an action that breaks a rule, law, etc.", "pronunciation": "/\u026an\u02c8fr\u026and\u0292.m\u0259nt/", "sentence": "Even minor infringements of the law will be severely punished.", "media_fname": "infringement.mp3", "type": "noun", "sentence_gap": "Even minor _____________ of the law will be severely punished.", "gap_term": "infringements", "picture": "", "synonyms": ""}, {"word": "ladle", "translated": "chochla", "explanation": "a very big spoon with a long handle and a deep cup-shaped part, used especially for serving soup", "pronunciation": "/\u02c8le\u026a.d\u0259l/", "sentence": "She dipped out soup with a ladle.", "media_fname": "ladle.mp3", "type": "noun", "sentence_gap": "She dipped out soup with a _____.", "gap_term": "ladle", "picture": "", "synonyms": ""}, {"word": "namely", "translated": "mianowicie", "explanation": "used when you want to give more detail or be more exact about something you have just said", "pronunciation": "/\u02c8ne\u026am.li/", "sentence": "We need to get more teachers into the classrooms where they're most needed, namely in high poverty areas.", "media_fname": "namely.mp3", "type": "adverb", "sentence_gap": "We need to get more teachers into the classrooms where they're most needed, ______ in high poverty areas.", "gap_term": "namely", "picture": "", "synonyms": ""}, {"word": "oar", "translated": "wios\u0142o", "explanation": "a long pole with a wide, flat part at one end, used for rowing a boat", "pronunciation": "/\u0254\u02d0r/", "sentence": "We took one oar each and rowed quickly to the shore.", "media_fname": "oar.mp3", "type": "noun", "sentence_gap": "We took one ___ each and rowed quickly to the shore.", "gap_term": "oar", "picture": "", "synonyms": ""}, {"word": "outset", "translated": "pocz\u0105tek", "explanation": "the beginning", "pronunciation": "/\u02c8a\u028at.set/", "sentence": "I made it clear right from the outset that I disapproved.", "media_fname": "outset.mp3", "type": "noun", "sentence_gap": "I made it clear right from the ______ that I disapproved.", "gap_term": "outset", "picture": "", "synonyms": ""}, {"word": "outskirts", "translated": "przedmie\u015bcia, peryferie", "explanation": "the areas that form the edge of a town or city", "pronunciation": "/\u02c8a\u028at.sk\u025d\u02d0ts/", "sentence": "They live on the outskirts of Milan.", "media_fname": "outskirts.mp3", "type": "noun", "sentence_gap": "They live on the _________ of Milan.", "gap_term": "outskirts", "picture": "", "synonyms": ""} ]data/politics/577586.json { "id": 577586, "title": "\"โพล\"เผยปชช.เกินครึ่งไม่ต้อนรับนักการเมืองเลวกลับสภาฯ | เดลินิวส์", "description": "“โพล” เผยปชช.ส่วนใหญ่เชื่อหลังลต.ได้รัฐบาลไร้ธรรมาภิบาล ชี้แนวทางแก้ปัญหาต้องมีบทลงโทษที่เข้มงวด ร้อยละ 73.52 มองการเลือกตั้งไม่ใช่สิ่งที่สำคัญที่สุด ผลสำรวจเกินครึ่งไม่ต้อนรับนักการเมืองเลวกลับสภาฯ หวั่นทำเสียหายต่อประเทศ", "keywords": "นิด้าโพล,คำถาม 4 ข้อ,นักการเมือง,เดลินิวส์,เดลินิวส์ออนไลน์,เดลินิวส์วันนี้,ข่าวเดลินิวส์ออนไลน์,ข่าวเดลินิวส์ล่าสุด,ข่าว,ข่าวด่วน,ข่าววันนี้,ข่าวร้อน,ข่าวบันเทิง,ข่าวเด่นวันนี้,ข่าวดารา,ข่าวล่าสุด,คลิป,คลิปด่วน,คลิปข่าว,คลิปร้อน,คลิปฮอต,ท่องเที่ยว,สกู๊ปพิเศษ,ไลฟ์สไตล์,lifestyle,ความงาม,เรื่องย่อละคร,เทคนิค,ดวง,ตรวจหวย,special scoop,dailynews,เซ็กซ์,ไอที,ทิป,ยานยนตร์,หนังสือพิมพ์,สาระน่ารู้,ข่าวล่าสุด,สุขภาพ,แต่งบ้าน,อาหาร,แฟชั่น", "category": "politics", "content": "เมื่อวันที่ 3 มิ.ย. ศูนย์สำรวจความคิดเห็นสถาบันบัณฑิตพัฒนบริหารศาสตร์หรือนิด้าโพล เปิดเผยผลสำรวจความคิดเห็นของประชาชนเรื่อง “4 คำถามกับทิศทางอนาคตของประเทศไทย” ทำการสำรวจระหว่างวันที่ 1 – 2 มิ.ย.จากประชาชนที่มีอายุ 18 ปีขึ้นไปทั่วประเทศ จำนวน 1,250 หน่วยตัวอย่างพบว่า ร้อยละ15.68 เชื่อมีความเป็นไปได้มากที่จะได้รัฐบาลที่มีธรรมาภิบาลหลังเลือกตั้ง ร้อยละ 24.40 มีความเป็นไปได้ค่อนข้างมาก ที่จะได้รัฐบาลที่มีธรรมาภิบาล ร้อยละ 34.48 ระบุว่า มีความเป็นไปได้ค่อนข้างน้อย ที่จะได้รัฐบาลที่มีธรรมาภิบาล สำหรับความคิดเห็นของประชาชนเกี่ยวกับแนวทางการแก้ไขหากภายหลังการเลือกตั้งได้รัฐบาลที่ไม่มีธรรมาภิบาลพบว่าร้อยละ 37.76 ระบุว่าควรดำเนินตามข้อกฎหมายและมีบทลงโทษที่จริงจังและเข้มงวด เช่น ให้ลาออก ถอดถอนจากตำแหน่ง ตัดสิทธิทางการเมือง นอกจากนี้จะใช้วิธีการชุมนุม คัดค้าน ลงรายชื่อถอดถอนตำแหน่งทางการเมือง หากมีความผิดมากก็ควรยุบสภาแล้วจัดให้มีการเลือกตั้งใหม่ ร้อยละ 27.36 ระบุไม่แน่ใจว่าจะมี แนวทางแก้ไขอย่างไร ขณะที่คำถามการเลือกตั้งเป็นส่วนสำคัญส่วนหนึ่งของประชาธิปไตย แต่การเลือกตั้งอย่างเดียว โดยที่ไม่คำนึงถึงอนาคตของประเทศและเรื่องอื่นๆ เป็นความคิดที่ถูกต้องหรือไม่พบว่าร้อยละ73.52 ระบุว่า เป็นความคิดที่ไม่ถูกต้องเพราะการเลือกตั้งไม่ใช่ทางออกหรือสิ่งที่สำคัญที่สุด อนาคตของประเทศชาติไม่ได้ขึ้นอยู่กับการเลือกตั้งเพียงอย่างเดียว ขึ้นอยู่กับปัจจัยและองค์ประกอบหลายประการ ขณะที่ร้อยละ 14.96 เป็นความคิดที่ถูกต้องเพราะควรเคารพสิทธิและเสียงของประชาชนตามระบอบประชาธิปไตย ต้องเลือกผู้นำเข้ามาก่อน แล้วค่อยมาแก้ไขปัญหาของประเทศ เมื่อถามถึงความคิดเห็นของประชาชนเกี่ยวกับการให้โอกาสกับกลุ่มนักการเมืองที่มีพฤติกรรมไม่เหมาะสมในทุกกรณีกลับเข้ามาสู่การเลือกตั้งอีก พบว่าร้อยละ 69.28 ระบุว่าไม่ควรเพราะนักการเมืองที่ไม่ดีหรือมีพฤติกรรมไม่เหมาะสม ไม่ควรได้รับอำนาจให้กลับเข้ามาทำงานอีก หากใช้อำนาจในทางที่ไม่ดี ก็จะก่อให้เกิดผลเสียกับประเทศ ร้อยละ 11.12 ระบุว่าควรเพราะทุกคนย่อมได้รับโอกาสในการปรับปรุงตัวและอาจจะเปลี่ยนไปในทิศทางที่ดีขึ้น ร้อยละ 17.52 ควรดูเป็นรายกรณีไป.  ", "url": "https://www.dailynews.co.th/politics/577586", "date": "2017-06-03T18:35:18.500Z" }hltfbk/E3C-Corpusdata_collection/English/layer3/EN119709.json { "authors": [ { "author": "" }, { "author": "" }, { "author": "" }, { "author": "" }, { "author": "" }, { "author": "" } ], "doi": "10.1186/1471-2407-5-88", "publication_date": "2005-07-29", "id": "EN119709", "url": "https://pubmed.ncbi.nlm.nih.gov/16048646", "source": "BMC cancer", "source_url": "", "licence": "CC BY", "language": "en", "type": "pubmed", "description": "", "text": "The case of a 68-year-old male with carcinoid tumor arising in the urinary bladder is presented. Transurethral resection of a polypoid small tumor 0.4 cm in diameter was performed. Immunohistochemical study using neuroendocrine markers allowed a straightforward diagnosis of a low-grade neuroendocrine carcinoma (carcinoid tumor) of the urinary bladder. Immunohistochemistry demonstrated calcitonin immunoreactivity in the most of the tumor cells." }{"947118916450":{"type":"book open","bookName":"Epic%20Quest","subjectName":"school","fName":"users/"},"947118916735":{"type":"st"},"947118916736":{"type":"ss"},"947118937777":{"type":"sf"}}{ "法規性質": "命令", "法規名稱": "行政院國家科學委員會聘用人員遴聘規則", "法規網址": "https://law.moj.gov.tw/LawClass/LawAll.aspx?pcode=H0160009", "法規類別": "廢止法規>科技部", "最新異動日期": "20141001", "廢止註記": "廢", "是否英譯註記": "Y", "英文法規名稱": "Regulations Governing the Employment of Contract-based Personnel by the National Science Council of the Executive Yuan", "沿革內容": "1.中華民國六十年三月十五日行政院(60)台政貳字 5161 號令核准備查\r\n2.中華民國六十七年一月三十一日行政院(67)台人政貳字第 02108 號\r\n 函核定修正\r\n3.中華民國七十年十一月十七日行政院(70)台人政貳字第 32173 號函\r\n 核定修正\r\n 中華民國一百零三年二月二十七日行政院院臺規字第 1030125872 號公\r\n 告本規則之主管機關原為「行政院國家科學委員會」,自一百零三年三\r\n 月三日起變更為「科技部」,第 1 條、第 2 條、第 3 條、第 4 \r\n 條、第 5 條、第 6 條、第 7 條第 1 項所列主管機關掌理事項,\r\n 改由「科技部」管轄\r\n4.中華民國一百零三年十月一日科技部科部人字第 1030070432A 號令發\r\n 布廢止", "法規內容": [ { "條號": "第 1 條", "條文內容": "本會聘用人員之遴聘,除法令另有規定外,均適用本規則。" }, { "條號": "第 2 條", "條文內容": "本會之聘用人員,除委員由行政院遴聘外,計包括研究員、副研究員、助\r\n理研究員。" }, { "條號": "第 3 條", "條文內容": "本會研究員應就具有左列各款資格之一者遴聘之:\r\n一、曾任國內外大學教授或研究機構之研究員等相當職務,著有成績者。\r\n二、曾任國內外大學副教授或相當職務四年以上,著有成績者。\r\n三、國內外大學或研究院 (所) 得有博士學位,並曾從事學術研究或專業\r\n 工作四年以上,著有成績者。\r\n四、國內外大學或研究院 (所) 得有碩士學位,曾從事學術研究或專業工\r\n 作八年以上,並有著作或傑出貢獻者。\r\n五、國內外大學畢業,曾從事學術研究或專業工作十二年以上,並有著作\r\n 或傑出貢獻者。" }, { "條號": "第 4 條", "條文內容": "本會副研究員應就具有左列各款資格之一者遴聘之:\r\n一、曾任國內外大學副教授或研究機構之副研究員等相當職務,著有成績\r\n 者。\r\n二、國內外大學或研究院 (所) 得有博士學位,成績優良者。\r\n三、曾任國內外大學講師或研究機構相當職務四年以上,成績優良並有著\r\n 作者。\r\n四、國內外大學或研究院 (所) 得有碩士學位,曾從事學術研究或專業工\r\n 作四年以上,成績優良並有著作者。\r\n五、國內外大學畢業,曾從事學術研究或專業工作八年以上,並有著作或\r\n 傑出貢獻者。" }, { "條號": "第 5 條", "條文內容": "本會助理研究員應就具有左列各款資格之一者遴聘之:\r\n一、國內外大學或研究院 (所) 得有碩士學位者。\r\n二、國內外大學或獨立學院畢業,得有學士學位,成績優良者。" }, { "條號": "第 6 條", "條文內容": "本會為應業務需要,得以「合聘」或「兼任」方式遴聘副研究員以上之科\r\n技專業人員,其應具之資格條件比照專任人員辦理。" }, { "條號": "第 7 條", "條文內容": "本會聘用人員酬金,應就其工作內容審酌其職務與責任及所具專門知能條\r\n件評定薪點及折合金額報請行政院核定之。\r\n前項聘用人員酬金準用行政院暨所屬各級機關約聘聘用人員注意事項所定\r\n「聘用人員比照分類職位公務人員俸點支給報酬標準表」辦理。" }, { "條號": "第 8 條", "條文內容": "本規則自發布日施行。" } ] }msakai/icfpc2015 [{"seed":17818,"tag":"problem4-seed10-15-41-28.626553000000","problemId":4,"solution":"aEi!lbYuggothCthulhu fhtagn!lbaalCthulhu fhtagn!dpllR'lyehkYuggothR'lyehYuggothYuggothkadabddEi!Ia! Ia!R'lyehCthulhu fhtagn!alpkYuggothlIa! Ia!blYuggothYuggothdEi!kaR'lyehYuggothbR'lyehklddpR'lyehlR'lyehdR'lyehpR'lyehaEi!kadppkYuggothEi!R'lyehdaR'lyehkR'lyehkIa! Ia!llladddYuggothIa! Ia!Ia! Ia!baEi!kblllIa! Ia!a"}]{"organizations": [], "uuid": "ae414eadc97824d73473f2084a529252ab9f7830", "thread": {"social": {"gplus": {"shares": 0}, "pinterest": {"shares": 0}, "vk": {"shares": 0}, "linkedin": {"shares": 0}, "facebook": {"likes": 0, "shares": 0, "comments": 0}, "stumbledupon": {"shares": 0}}, "site_full": "www.tripadvisor.com", "main_image": "https://media-cdn.tripadvisor.com/media/photo-s/0d/f2/92/18/terrace-with-esbview.jpg", "site_section": "https://www.tripadvisor.com/Hotel_Review-g60763-d80110-Reviews-or40-The_Roger-New_York_City_New_York.html", "section_title": "The Roger - UPDATED 2017 Hotel Reviews & Price Comparison (New York City) - TripAdvisor", "url": "https://www.tripadvisor.com/ShowUserReviews-g60763-d80110-r467297223-The_Roger-New_York_City_New_York.html", "country": "US", "domain_rank": 189, "title": "Country House Boutique in the BIG APPLE", "performance_score": 0, "site": "tripadvisor.com", "participants_count": 1, "title_full": "Country House Boutique in the BIG APPLE - Review of The Roger, New York City, NY - TripAdvisor", "spam_score": 0.373, "site_type": "discussions", "published": "2017-03-14T02:00:00.000+02:00", "replies_count": 0, "uuid": "ae414eadc97824d73473f2084a529252ab9f7830"}, "author": "kevinnS5838TP", "url": "https://www.tripadvisor.com/ShowUserReviews-g60763-d80110-r467297223-The_Roger-New_York_City_New_York.html", "ord_in_thread": 0, "title": "Country House Boutique in the BIG APPLE", "locations": [], "entities": {"persons": [], "locations": [], "organizations": []}, "highlightText": "", "language": "english", "persons": [], "text": "A little GEM in the middle of the City, with a FIVE STAR personal touch.\nIf you wish to explore its a great location, convenient for everywhere in the City. Once inside you escape the hustle and bustle of NY, and relax in a chill laid back atmosphere, with very attentive staff.", "external_links": [], "published": "2017-03-14T02:00:00.000+02:00", "crawled": "2017-03-29T14:38:45.229+03:00", "highlightTitle": ""}{ "vorgangId": "120234", "VORGANG": { "WAHLPERIODE": "13", "VORGANGSTYP": "Kleine Anfrage", "TITEL": "Gefährdung der Mehrwegsysteme im Getränkebereich (G-SIG: 13010702)", "AKTUELLER_STAND": "Beantwortet", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "WICHTIGE_DRUCKSACHE": [ { "DRS_HERAUSGEBER": "BT", "DRS_NUMMER": "13/1947", "DRS_TYP": "Kleine Anfrage", "DRS_LINK": "http://dipbt.bundestag.de:80/dip21/btd/13/019/1301947.pdf" }, { "DRS_HERAUSGEBER": "BT", "DRS_NUMMER": "13/2116", "DRS_TYP": "Antwort", "DRS_LINK": "http://dipbt.bundestag.de:80/dip21/btd/13/021/1302116.pdf" } ], "EU_DOK_NR": "", "SACHGEBIET": [ "Umwelt", "Landwirtschaft und Ernährung" ], "SCHLAGWORT": [ "Einweg-Verpackung", "Getränkedose", "Lebensmittel", { "_fundstelle": "true", "__cdata": "Mehrweg-Flasche" }, "Mineralwasser", "Recycling", "Umweltschutz" ], "ABSTRAKT": "Entwicklung der Mehrwegquoten für die einzelnen Getränkesegmente in den Bundesländern seit 1991, Erlaß einer Getränkemehrwegverordnung, Ausnahmen von der Pfandpflicht für Mehrweggebinde, Vorkehrungen gegen ein vermehrtes Recycling von Dosen-, Glas- und Verbundverpackungen, Ergebnisse einer Studie über ökologische und ökonomische Auswirkungen einer obligatorischen Pfand- und Rücknahmepflicht für Einweggetränkeverpackungen, Einführung einer PET-Flasche bzw. Leichtglas-Einwegflasche mit der Gefahr einer starken Konzentration im Mineralbrunnenbereich, Anzahl der im Markt befindlichen Getränkedosen, Recyclingfähigkeit von Weißblech- und Aluminiumdosen " }, "VORGANGSABLAUF": { "VORGANGSPOSITION": [ { "ZUORDNUNG": "BT", "URHEBER": "Kleine Anfrage ", "FUNDSTELLE": "04.07.1995 - BT-Drucksache 13/1947", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/13/019/1301947.pdf", "PERSOENLICHER_URHEBER": [ { "PERSON_TITEL": "Dr.", "VORNAME": "Liesel", "NACHNAME": "Hartenstein", "FUNKTION": "MdB", "FRAKTION": "SPD", "AKTIVITAETSART": "Kleine Anfrage" }, { "VORNAME": "Brigitte", "NACHNAME": "Adler", "FUNKTION": "MdB", "FRAKTION": "SPD", "AKTIVITAETSART": "Kleine Anfrage" } ] }, { "ZUORDNUNG": "BT", "URHEBER": "Antwort, Urheber : Bundesregierung, Bundesministerium für Umwelt, Naturschutz und Reaktorsicherheit (federführend)", "FUNDSTELLE": "07.08.1995 - BT-Drucksache 13/2116", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/13/021/1302116.pdf" } ] } } { "SingleUserAuthentication": { "SecretKey": "e019cb7e-0a04-45e3-9316-5306faee4786", "SingleUserPassword": "" }, "Logging": { "LogLevel": { "Default": "Debug", "System": "Information", "Microsoft": "Information" } }, "Serilog": { "MinimumLevel": { "Default": "Debug", "Override": { "Microsoft": "Warning", "System": "Information" } }, "Enrich": [ "FromLogContext", "WithThreadId" ], "Properties": { "SourceContext": "Application", "ServiceName": "SheepIt" }, "WriteTo": [ { "Name": "Console", "Args": { "outputTemplate": "[{Timestamp:HH:mm:ss.fff} {Level:u3}] ({ThreadId:000}) {SourceContext} {Message}{NewLine}{Exception}" } } ], "SeqTemplate": "[{Timestamp:o} {Level:u3}] ({ServiceName}/{ThreadId:000}) {SourceContext} {Message}{NewLine}{Exception}" }, "ErrorHandling": { "DeveloperDetails": true } } { "name": "qrcode-npm", "version": "0.0.3", "description": "QRCode Generator for JavaScript", "main": "qrcode.js", "homepage": "https://github.com/cmanzana/qrcode-npm", "scripts": { "test": "tap test/*.js" }, "repository": { "type": "git", "url": ":cmanzana/qrcode-npm.git" }, "keywords": [ "qrcode" ], "devDependencies": { "tap": "" }, "author": " <>", "license": "MIT" } {"name":"right","subject":1004,"date":"2122009-105001","paths":{"Pen":{"strokes":[{"x":-869,"y":-64,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":0,"stroke_id":0},{"x":-869,"y":-64,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":1,"stroke_id":0},{"x":-869,"y":-64,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":2,"stroke_id":0},{"x":-879,"y":-80,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":3,"stroke_id":0},{"x":-879,"y":-80,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":4,"stroke_id":0},{"x":-879,"y":-80,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":5,"stroke_id":0},{"x":-879,"y":-80,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":6,"stroke_id":0},{"x":-879,"y":-80,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":7,"stroke_id":0},{"x":-863,"y":-75,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":8,"stroke_id":0},{"x":-846,"y":-73,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":9,"stroke_id":0},{"x":-816,"y":-69,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":10,"stroke_id":0},{"x":-776,"y":-68,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":11,"stroke_id":0},{"x":-720,"y":-66,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":12,"stroke_id":0},{"x":-648,"y":-68,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":13,"stroke_id":0},{"x":-561,"y":-69,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":14,"stroke_id":0},{"x":-462,"y":-73,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":15,"stroke_id":0},{"x":-348,"y":-78,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":16,"stroke_id":0},{"x":-228,"y":-84,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":17,"stroke_id":0},{"x":-95,"y":-89,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":18,"stroke_id":0},{"x":43,"y":-94,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":19,"stroke_id":0},{"x":178,"y":-98,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":20,"stroke_id":0},{"x":311,"y":-101,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":21,"stroke_id":0},{"x":433,"y":-102,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":22,"stroke_id":0},{"x":547,"y":-103,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":23,"stroke_id":0},{"x":647,"y":-106,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":24,"stroke_id":0},{"x":738,"y":-98,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":25,"stroke_id":0},{"x":798,"y":-102,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":26,"stroke_id":0},{"x":844,"y":-95,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":27,"stroke_id":0},{"x":865,"y":-89,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":28,"stroke_id":0},{"x":865,"y":-89,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":29,"stroke_id":0},{"x":855,"y":-74,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":30,"stroke_id":0},{"x":822,"y":-68,"w":null,"z":null,"alpha":null,"beta":null,"gamma":null,"t":31,"stroke_id":0}]}},"device":{"osBrowserInfo":"Fujitsu-Siemens Lenovo X61 Tablet PC","resolutionHeight":null,"resolutionWidth":null,"windowHeight":null,"windowWidth":null,"pixelRatio":null,"mouse":false,"pen":true,"finger":false,"acceleration":false,"webcam":false}}data/terrainbuilding/2020/05/t3_gild94.json0 { "author": { "id": "t2_ha002is", "name": "JGoodberry" }, "date": { "day": 1589241600, "full": 1589320578, "month": 1588291200, "week": 1589068800 }, "id": "t3_gild94", "picture": { "filesize": 565332, "fullUrl": "https://preview.redd.it/3xhamo7joey41.png?auto=webp&s=f04b9e77581446466ef6f8699d96663d86ac9bd2", "hash": "9d461eb1b3", "height": 435, "lqip": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAALCAIAAAD5gJpuAAAACXBIWXMAAAsTAAALEwEAmpwYAAACJklEQVQokQEbAuT9AHJ0b4eIiI+QlYaGjKKiqoF/hYV8eVxUUhoUExcSEkpAPTYcKgQCCgoKDg0JDA0KDACUlpWdnZ6ZlZd3fnZ3d1SBcFmKfHJuZmcoIR44ECN8NlRnM0chGBcbExUgFBQzHx0AnZuampGLrJ2GSUpJf3hboY05p52TlYqHd2ZUhl9XoGtmeGBMQCMUUTgrhnpnrJV4AKWYgrKhiq6dg09LQGdmZbGni4BxYYFuW5R/aJmDZ5N9XoNsVJ1+XrKTdMishZaBZQC1pH+pmnuYj4KDenONfW6Pe22OdWCkhmyniWief1eiglesh1uKblZ1YVR4bGBORkUAmaSJs6umfIaHhnppjnNXpotop5Jhp6Nipq17lqVktK5AppYebG9GUUtHU0dJXkpCAEVbN4WKc4ySWpKaZqCUVq6jHoOiL4Kyvn+06HuabbWsZYuFQoOaQ3aEOGJRPktBQQBQYB1cbShobjZzhTdwkStehRVxnhh0mlJPdIl8mCaHoD6KnFCAlUmAlENxfUI2OkAAgoBfdXxObncycpIlbaAeYJghepsni6ohTW9vbIk7eJ0kdJEqdpQico0xe401RkozAI5/bWx3P3KILWeZJW2aJX2YPImhSZewcpeTpaGWop6zr4O7yGmONmeIK3WJMVhbOgBwW0htcTdojCV1njGnqmjAsIifyNm1veDZusTbwtGgk6BKe6limZxqhjB0iTBeYkEg/edBTE9phAAAAABJRU5ErkJggg==", "url": "https://preview.redd.it/3xhamo7joey41.png?width=640&crop=smart&auto=webp&s=9374265462df8bb7be368e61740516a6d8c3346b", "width": 640 }, "score": { "comments": 3, "downs": 0, "ratio": 0.88, "ups": 27, "value": 27 }, "subreddit": { "id": "t5_2xy5e", "name": "TerrainBuilding" }, "tags": [], "title": "Careful, Lapras. Don't let them surround you. (WIP)", "url": "https://www.reddit.com/r/TerrainBuilding/comments/gild94/careful_lapras_dont_let_them_surround_you_wip/" } data/uso-styles/137282.json { "id": 137282, "name": "中国天气网 http://www.weather.com.cn 详情页面:简洁和美化", "description": "简洁和美化中国天气网 http://www.weather.com.cn 详情页面", "user": { "id": 379371, "name": "梅尘", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": "ccbyncsa" }, "updated": "2017-01-05T11:05:28.000Z", "weekly_install_count": 0, "total_install_count": 301, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/137282_after.png?r=1603786024", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": [ "https://userstyles.org/style_screenshots/137282_additional_24053.png?r=1603786024", "https://userstyles.org/style_screenshots/137282_additional_24054.png?r=1603786024", "https://userstyles.org/style_screenshots/137282_additional_24055.png?r=1603786024", "https://userstyles.org/style_screenshots/137282_additional_24056.png?r=1603786024" ], "license": null, "created": "2017-01-05T11:05:28.000Z", "category": "site", "raw_subcategory": "weather", "subcategory": "weather", "additional_info": null, "style_tags": [], "css": "@namespace url(http://www.w3.org/1999/xhtml);\r\n/* 记得启动 turn on 样式 */\r\n\r\n@-moz-document\r\n /* 单一 URL */\r\n url(\"http://www.weather.com.cn/weather40d/101200101.shtml\"),\r\n /* 带有相同前缀的一系列 URL */\r\n url-prefix(\"http://www.weather.com.cn/weather\"){\r\n \r\n \r\n /* 禁止内容出现 */\r\n \r\n /* 右侧栏 */\r\n .right,\r\n /* 顶部导航栏 */\r\n .weather_li,\r\n .topad_bg,\r\n /* 周边地区的天气 */\r\n #around,\r\n /* 高清图集 */\r\n .hdImgs,\r\n /* 近期重大天气事件 */\r\n .greatEvent,\r\n /* footer */\r\n .footer,\r\n /* 注释 */\r\n .explain,\r\n /* 意见建议 */\r\n #ab_yjfk,\r\n #ab_yjjy,\r\n #cyEmoji,\r\n /* 广告 */\r\n #adposter_6122,\r\n #adposter_6298,\r\n #adposter_6299,\r\n /* hot */\r\n #someDayNav > li:nth-child(4) > span:nth-child(2){\r\n display: none !important;\r\n }\r\n \r\n \r\n /* 美化 */\r\n \r\n /* header */\r\n .search-box{\r\n float: left !important;;\r\n }\r\n .w_li_logo{\r\n float: right;\r\n }\r\n /* 之所以不能水平居中的原因,正是这个 div 的宽度设置太大 */\r\n .con{\r\n border: 1px solid LightGrey;\r\n box-shadow: 0px 0px 20px 2px rgba(0, 0, 0, 0.1);\r\n border-radius: 10px;\r\n margin-top: 10px;\r\n margin-bottom: 40px !important;\r\n width: 700px !important;\r\n }\r\n \r\n /* 底部居中置顶固定预测时期选项栏 */\r\n #someDayNav{\r\n position: fixed;\r\n bottom: 0;\r\n right: 0;\r\n background-color: #3C3C41;\r\n z-index: 2 !important;\r\n width: 310px !important;\r\n border: 1px;\r\n border-radius: 10px;\r\n }\r\n #table td{\r\n z-index: 0;\r\n }\r\n #someDayNav a{\r\n color: white;\r\n text-decoration: none;\r\n padding: 5px;\r\n }\r\n #someDayNav a:hover{\r\n background-color: #00BF00;\r\n cursor: pointer;\r\n border: none;\r\n border-radius: 5px;\r\n }\r\n \r\n /* 内容栏 */\r\n .con .left .cnav li.on{\r\n background-color: #00BF00;\r\n cursor: pointer;\r\n border: none;\r\n border-radius: 5px;\r\n }\r\n .con .left .cnav li {\r\n background-color: #3C3C41;\r\n display: inline;\r\n color: #252525;\r\n cursor: pointer;\r\n float: left;\r\n height: 32px;\r\n line-height: 32px;\r\n margin-right: 2px;\r\n text-align: center;\r\n width: 60px !important;\r\n position: relative;\r\n border-radius: 10px;\r\n}\r\n .left{\r\n margin-bottom: 15px;\r\n margin-top: 10px;\r\n margin-left: 10px;\r\n }\r\n .con .left .cnav li a{\r\n display: inline;\r\n }\r\n .con .left .cnav li:hover {\r\n background-color: #00BF00; \r\n}\r\n .c7d ul.t {\r\n margin-left: 8px !important;\r\n}\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/137282/http-www-weather-com-cn.user.js", "style_settings": [] }0 [ { "interface": "extas\\interfaces\\plugins\\IPluginRepository", "class": "extas\\components\\plugins\\PluginRepository" }, { "interface": "extas\\interfaces\\extensions\\IExtensionRepository", "class": "extas\\components\\extensions\\ExtensionRepository" }, { "interface": "extas\\interfaces\\stages\\IStageRepository", "class": "extas\\components\\stages\\StageRepository" }, { "interface": "extas\\interfaces\\packages\\IPackageClassRepository", "class": "extas\\components\\packages\\PackageClassRepository" }, { "interface": "extas\\interfaces\\repositories\\IRepository", "class": "extas\\components\\repositories\\Repository" }, { "interface": "extas\\interfaces\\repositories\\drivers\\IDriverRepository", "class": "extas\\components\\repositories\\drivers\\DriverRepository" } ]devnet/rewarders/33rFsFAZNpEkRyhkdCYvqCT1BsQ8RD8gyyE7pq6Nxwca/quarries/1.json { "quarry": { "index": 1, "isReplica": false, "mergePool": "3H6dVWixvfVBTrR1pFUkQBFGmgjaQ3mLBucMw1JJaaJw", "primaryQuarries": [], "primaryToken": { "decimals": 6, "mint": "" }, "primaryTokenInfo": { "address": "", "chainId": 103, "decimals": 6, "name": "Token D2Fv", "symbol": "D2Fvs" }, "quarry": "FvJUKb2VcemuEXtZaSmbWGSnXj5z5DWRDHfdkcVdpn9r", "replicaMint": "", "replicaQuarries": [], "slug": "d2fvs", "stakedToken": { "decimals": 6, "mint": "" } }, "rewarder": { "authority": "", "mintWrapper": "", "rewardsToken": { "decimals": 6, "mint": "" }, "rewardsTokenInfo": { "address": "", "chainId": 103, "decimals": 6, "name": "Token Fvkz", "symbol": "FvkzZ" }, "slug": "33rFsFAZNpEkRyhkdCYvqCT1BsQ8RD8gyyE7pq6Nxwca" }, "rewardsToken": { "address": "", "chainId": 103, "decimals": 6, "name": "", "symbol": "FvkzZ" }, "stakedToken": { "address": "", "chainId": 103, "decimals": 6, "name": "Token D2Fv", "symbol": "D2Fvs" }, "underlyingTokens": [] }dataset/klga/20120502.json version https://git-lfs.github.com/spec/v1 oid sha256:ba4834cac97cfe1c44f528614baacd49a58cc973dc6f5a143b8123796b02133b size 29305 api/v1/33/6432.json {"prefecture":"岡山県","city":"西粟倉村","prefecture_kana":"オカヤマケン","city_kana":"ニシアワクラソン","code":"336432","code5":"33643"}{ "name": "shortn", "version": "1.0.0", "description": "A URL Shortener made with Node, Express and Mongoose", "scripts": { "test": "NODE_ENV=test nodemon app.js", "devStart": "nodemon app.js" }, "repository": { "type": "git", "url": "git+https://github.com/Kodatos/Shortn.git" }, "author": "Kodatos", "license": "MIT", "bugs": { "url": "https://github.com/Kodatos/Shortn/issues" }, "homepage": "https://github.com/Kodatos/Shortn#readme", "dependencies": { "dotenv": "^6.0.0", "express": "^4.16.3", "mongoose": "^5.1.7", "valid-url": "^1.0.9" }, "devDependencies": { "nodemon": "^1.17.5" } } { "Address": "MultiAddress", "LookupSource": "MultiAddress", "StoneIndexComponent": { "assetId": "AssetId", "weight": "u32" }, "StoneIndex": { "stoneId": "AssetId", "name": "Vec", "components": "Vec" } } phanghos/react-native-gradient-list0 { "name": "react-native-gradient-list", "version": "1.0.0", "description": "Custom ScrollView and FlatList components which will render a gradient at the bottom as long as there is scrollable content", "main": "dist/index.js", "types": "dist/index.d.ts", "files": [ "dist" ], "scripts": { "build": "tsc", "lint": "eslint src --ext .ts,.tsx" }, "keywords": [ "react", "react native", "scrollview", "flatlist", "gradient" ], "author": " <> (https://github.com/phanghos)", "license": "MIT", "devDependencies": { "@types/react": "^17.0.39", "@types/react-native": "^0.66.16", "eslint-config-taitasciore-react-native": "^1.0.0", "metro-react-native-babel-preset": "^0.68.0", "prettier": "^2.5.1", "react": "^17.0.2", "react-native": "^0.67.2", "react-native-linear-gradient": "^2.5.6", "react-native-reanimated": "^2.4.1", "typescript": "^4.5.5" }, "peerDependencies": { "react": ">=16.8", "react-native": ">=0.62", "react-native-linear-gradient": "^2.5.6", "react-native-reanimated": ">=2" } } { "authors": [ { "author": "" }, { "author": "" } ], "doi": "10.1186/1752-1947-4-123", "publication_date": "2010-05-01", "id": "EN117725", "url": "https://pubmed.ncbi.nlm.nih.gov/20429893", "source": "Journal of medical case reports", "source_url": "", "licence": "CC BY", "language": "en", "type": "pubmed", "description": "", "text": "A 36-year old Caucasian, normally fit woman presented with abdominal distension and vomiting five days post-normal vaginal delivery at term. Localised peritonitis in the right iliac fossa developed in the next few days, and caecal perforation was found at laparotomy, without evidence of appendicitis or colitis." }src/Platformus.Domain.Backend/bundleconfig.json [ { "outputFileName": "wwwroot/areas/backend/css/platformus.domain.min.css", "inputFiles": [ "Areas/Backend/Styles/image_editor.css", "Areas/Backend/Styles/relation_editor.css", "Areas/Backend/Styles/image_drag_and_drop_area.css", "Areas/Backend/Styles/image_uploader_pop_up_form.css", "Areas/Backend/Styles/object_selector_pop_up_form.css" ] }, { "outputFileName": "wwwroot/areas/backend/js/platformus.domain.min.js", "inputFiles": [ "Areas/Backend/Scripts/image_uploader_behavior.js", "Areas/Backend/Scripts/image_uploader_form.js", "Areas/Backend/Scripts/object_selector_form.js", "Areas/Backend/Scripts/editors.js", "Areas/Backend/Scripts/base_editor.js", "Areas/Backend/Scripts/single_line_plain_text_editor.js", "Areas/Backend/Scripts/multiline_plain_text_editor.js", "Areas/Backend/Scripts/html_editor.js", "Areas/Backend/Scripts/date_editor.js", "Areas/Backend/Scripts/image_editor.js", "Areas/Backend/Scripts/relation_editor.js" ], "minify": { "enabled": true, "renameLocals": true }, "sourceMap": false } ]{"word":"font","definition":"A complete assortment of printing type of one size, including a due proportion of all the letters in the alphabet, large and small, points, accents, and whatever else is necessary for printing with that variety of types; a fount.\n\n1. A fountain; a spring; a source. Bathing forever in the font of bliss. Young. 2. A basin or stone vessel in which water is contained for baptizing. That name was given me at the font. Shak."}{"id": 39979, "date": "2014-12-31 11:12:54", "user": "fLoo", "post": "I'd like to give away my ST34 from Hetzner. It was a special Promo and instantly sold out. \r\n\r\nIntel\u00ae Core\u2122 i7-3770\r\nQuad-Core Ivy Bridge\r\ninkl. Hyper-Threading-Technologie\r\n\r\nArbeitsspeicher 32 GB RAM\r\nFestplatten 2 x 3 TB SATA 6 Gb/s 7200 rpm\r\nHDD (Software-RAID 1)\r\nAnbindung 1 GBit/s-Port\r\nBandbreite garantiert 200 MBit/s\r\nTraffic inklusive 20 TB\r\n\r\nI'd be happy to receive 20 \u20ac setup-fees. Server is payed until tomorrow (01.01.2015). So be fast.\r\n\r\nContact me @ fs -at- coresec.de or leave a reply down below.\r\n\r\nHappy new year every1 - stay healthy.\r\n\r\nFlorian"}data/raw/2018-04-22/1524391201.json [{"liveId":"5adc57500cf23c5f6329baf5","title":"王晓佳的直播间","subTitle":"在宿舍煮个自热火锅!","picPath":"/mediasource/live/1524389712055butPyx10Rq.jpg","startTime":1524389712223,"memberId":6742,"liveType":1,"picLoopTime":0,"lrcPath":"/mediasource/live/lrc/5adc57500cf23c5f6329baf5.lrc","streamPath":"http://2519.liveplay.myqcloud.com/live/2519_3573530.flv","screenMode":0,"roomId":"3866809","bwqaVersion":0}] {"date":"2008-03-01","platform":"剧场版","images":{"small":"https://lain.bgm.tv/pic/cover/s/08/f9/2000_UkduK.jpg","grid":"https://lain.bgm.tv/pic/cover/g/08/f9/2000_UkduK.jpg","large":"https://lain.bgm.tv/pic/cover/l/08/f9/2000_UkduK.jpg","medium":"https://lain.bgm.tv/pic/cover/m/08/f9/2000_UkduK.jpg","common":"https://lain.bgm.tv/pic/cover/c/08/f9/2000_UkduK.jpg"},"summary":"草帽海贼团的航海士娜美突然病倒了,一行人在没有船医的情况下,来到了有医疗大国之称的“磁鼓岛”,但不幸的是,岛上所有的医生皆因现任国王瓦波尔的恶政而遭到放逐,只剩下一位技术高超的130岁女医生住在磁鼓山上,鲁夫历经千辛万苦终于来到女医生的住所,在那里他们发现女医生身旁一只有着蓝色鼻子、精通人类语言而且还会用双脚站立的驯鹿多尼多尼·乔巴……","name":"ONE PIECE THE MOVIE エピソードオブチョッパー 冬に咲く、奇跡の桜","name_cn":"海贼王 绽放在寒冬的奇迹樱花","tags":[{"name":"剧场版","count":216},{"name":"海贼王","count":171},{"name":"乔巴","count":97},{"name":"催泪","count":70},{"name":"2008","count":56},{"name":"东映","count":41},{"name":"ONEPIECE","count":40},{"name":"JUMP","count":29},{"name":"草帽海贼团","count":26},{"name":"樱花","count":22},{"name":"热血","count":17},{"name":"漫画改","count":15},{"name":"剧场","count":13},{"name":"路飞","count":10},{"name":"日本","count":9},{"name":"東映アニメーション","count":7},{"name":"尾田荣一郎","count":5},{"name":"志水淳儿","count":5},{"name":"战斗","count":5},{"name":"海贼","count":5},{"name":"中井和哉","count":4},{"name":"冒险","count":4},{"name":"动画","count":4},{"name":"幻想","count":4},{"name":"漫改","count":4},{"name":"2008年3月","count":3},{"name":"东映动画","count":3},{"name":"志水淳児","count":3},{"name":"日本动画","count":3},{"name":"田中公平","count":3}],"infobox":[{"key":"中文名","value":"海贼王 绽放在寒冬的奇迹樱花"},{"key":"上映年度","value":"2008年3月1日"},{"key":"片长","value":"113分"},{"key":"官方网站","value":"http://www.toei-anim.co.jp/movie/2008_onepiece/"},{"key":"Copyright","value":"「2008ワンピース」製作委員会(日本)"},{"key":"话数","value":"1"},{"key":"原作","value":"尾田栄一郎"},{"key":"导演","value":"志水淳児"},{"key":"企划","value":"柴田宏明、新部分:狩野雄太(フジテレビ)、櫻田博之(東映アニメーション)"},{"key":"脚本","value":"上坂浩彦"},{"key":"音乐","value":"田中公平"},{"key":"人物设定","value":"舘直樹"},{"key":"美术监督","value":"平間由香"},{"key":"美术设定","value":"吉池隆司"},{"key":"色彩设计","value":"塚田劭"},{"key":"制作担当","value":"藤岡和実"},{"key":"演出","value":"新部分:小牧文"},{"key":"作画监督","value":"舘直樹"},{"key":"美术","value":"吉池隆司"},{"key":"制作","value":"フジテレビジョン、東映アニメーション"},{"key":"整体制作","value":"狩野雄太(フジテレビ)・坂上真倫(フジテレビ)・櫻田博之(東映アニメーション)"},{"key":"制作辅佐","value":"出樋昌稔(フジテレビ)、日高峻(フジテレビ)、小山弘起(東映アニメーション)"},{"key":"企画","value":"尾田荣一郎"}],"rating":{"rank":1329,"total":622,"count":{"1":0,"2":0,"3":1,"4":4,"5":34,"6":85,"7":232,"8":190,"9":46,"10":30},"score":7.3},"total_episodes":1,"collection":{"on_hold":11,"dropped":22,"wish":44,"collect":1010,"doing":4},"id":2000,"eps":1,"volumes":0,"locked":false,"nsfw":false,"type":2}{"_id":"beer_Urban_Wilderness_English_Pale","brewery":"Sleeping Lady Brewing Company","name":"Urban Wilderness English Pale","abv":"5.1","description":"This tasty English pale ale gets its charm from Imported Maris Otter Pale Malt, East Kent goldings and a few secret ingredients to keep our competitiors guessing. Styled after a classic English Bitter, Urban Wilderness is supremely balanced, smooth and easy to drink.","category":"British Ale","style":"Classic English-Style Pale Ale","updated":"2010-07-22 20:00:20"}1-10 {"name":"Holstentor-Gemeinschaftsschule","id":"SH-0707109","address":"Wendische Straße 55, 23558 Lübeck, Hansestadt","school_type":"Gemeinschaftsschule ohne Oberstufe","legal_status":1,"fax":"0451 - 1228467","phone":"0451 - 1228460","provider":"Hansestadt Lübeck","headmaster":"","state":"SH","programs":{"programs":[]},"full_time_school":true,"lon":10.659062,"lat":53.857395} { "id": 175154, "info": { "name": "GitHub Enhanced", "description": "Enables style upgrades for the latest version of Asana web desktop edition. Enhancements include slim top nav bar, rounded labels, label styling and support for wider monitors.", "additionalInfo": "Version 1.0.2", "format": "uso", "category": "github", "createdAt": "2019-09-09T17:47:25.000Z", "updatedAt": "2019-09-10T13:15:11.000Z", "license": "CC-BY-NC-SA-4.0", "author": { "id": 845064, "name": "seanfuture" } }, "stats": { "installs": { "total": 82, "weekly": 1 } }, "screenshots": { "main": { "name": "175154_after.png", "archived": true } }, "discussions": { "stats": { "discussionsCount": 0, "commentsCount": 0 }, "data": [] }, "style": { "css": "@-moz-document url-prefix(\"https://github.com\") {\r\n \r\n\t/* Support for wide monitors */\r\n\t.container-lg,\r\n\t.container {\r\n\t\twidth: auto;\r\n\t\tmax-width: 1360px;\r\n\t\tpadding-right: 16px!important;\r\n\t\tpadding-left: 16px!important;\r\n\t}\r\n\t\r\n\t/* Narrow top bar */\r\n\t.pagehead {\r\n\t\tpadding-top: 15px !important;\r\n\t}\r\n\t\r\n\t/* Rounded corner labels w/ white text at all times */\r\n\t.label-link,\r\n\t.IssueLabel {\r\n\t\tborder-radius: 50px;\r\n\t\tfont-size: 12px;\r\n\t\theight: 21px;\r\n\t\tline-height: 21px;\r\n\t\tpadding: 0px 10px 0 10px;\r\n\t\tcolor: #fff !important;\r\n\t\tbox-shadow: none;\r\n\t}\r\n\t\r\n\t/* Larger rounded corner label previews */\r\n\t.label-link {\r\n\t\tfont-size: 16px;\r\n\t\theight: 31px;\r\n\t\tline-height: 31px;\r\n\t\tpadding: 0px 15px 0 15px;\r\n\t}\r\n\t\r\n\t/* Downshift of labels in views */\r\n\t.labels {\r\n\t\tposition: relative;\r\n\t\ttop: 1px;\r\n\t}\r\n\r\n\t/* Margin separating titles and labels */\r\n\ta.h4 {\r\n\t\tmargin-right: 4px;\r\n\t}\r\n\t\r\n\r\n\t/* Fix for timeline top border being clipped */\r\n\t.timeline-comment {\r\n\t\tmargin-top: 1px;\r\n\t}\r\n\r\n}" } }jjatinggoyal/accessbility-indicators0 {"html_attributions": [], "results": [{"business_status": "OPERATIONAL", "geometry": {"location": {"lat": 24.9058802, "lng": 86.32905889999999}, "viewport": {"northeast": {"lat": 24.9072577302915, "lng": 86.3304221302915}, "southwest": {"lat": 24.9045597697085, "lng": 86.32772416970849}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/hospital-71.png", "icon_background_color": "#F88181", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/hospital-H_pinlet", "name": "", "photos": [{"height": 4000, "html_attributions": [""], "photo_reference": "Aap_uEB0cqYovN5nNWO3NMMZoZJD1ud84HSMqK0Eerlxar58GIeBvlqFY41WiRsLgZzVaYou_NVUY_DR0aDMO3HhqBabAorG7C4c6nRD8tytAqBtuhPWJX4l5t3C40NYSNwDcfAn8jLsgodVtIGOfyGmHYR-MhwlkpTAPRucFY5T1DTcBZBU", "width": 2000}], "place_id": "ChIJd8lAl4KB8TkRRE7Wht02Ibo", "plus_code": {"compound_code": "W84H+9J Purbi Guguldih, Bihar, India", "global_code": "7MP8W84H+9J"}, "rating": 5, "reference": "ChIJd8lAl4KB8TkRRE7Wht02Ibo", "scope": "GOOGLE", "types": ["hospital", "health", "point_of_interest", "establishment"], "user_ratings_total": 1, "vicinity": "Guguldih-Kebal Road, Purbi Guguldih"}, {"business_status": "OPERATIONAL", "geometry": {"location": {"lat": 24.9071159, "lng": 86.3251081}, "viewport": {"northeast": {"lat": 24.9086239802915, "lng": 86.32638813029152}, "southwest": {"lat": 24.9059260197085, "lng": 86.3236901697085}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/hospital-71.png", "icon_background_color": "#F88181", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/hospital-H_pinlet", "name": "", "photos": [{"height": 1920, "html_attributions": ["Shivam 9534k"], "photo_reference": "Aap_uEBIkidgyRGn_d-XL34x7CSFR7Z61ZNaCWiNZtQ0WzFa6Ma5-I8ERj36ggc-GOqZ0DfEPDbLh1zKjSmPUC1sijgWbHNOT7K_R1pBG0k3WTtFD5OeAehJpv4QABOGlTyM7-cmucnX21tTTrBmQfo8CqGTW99PYrn92m630qfumCg-yDDh", "width": 1080}], "place_id": "ChIJ98PHdKyB8TkRoh7LZZQkWCY", "rating": 5, "reference": "ChIJ98PHdKyB8TkRoh7LZZQkWCY", "scope": "GOOGLE", "types": ["hospital", "health", "point_of_interest", "establishment"], "user_ratings_total": 1, "vicinity": "W84G+R2X, Purbi Guguldih"}, {"business_status": "OPERATIONAL", "geometry": {"location": {"lat": 24.9074717, "lng": 86.3226167}, "viewport": {"northeast": {"lat": 24.90874643029151, "lng": 86.3238515302915}, "southwest": {"lat": 24.90604846970851, "lng": 86.32115356970849}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/hospital-71.png", "icon_background_color": "#F88181", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/hospital-H_pinlet", "name": "", "place_id": "ChIJYQwozuGB8TkRpdEPeW69RdU", "reference": "ChIJYQwozuGB8TkRpdEPeW69RdU", "scope": "GOOGLE", "types": ["hospital", "health", "point_of_interest", "establishment"], "vicinity": "W84F+X2Q, Purbi Guguldih"}, {"business_status": "OPERATIONAL", "geometry": {"location": {"lat": 24.9074252, "lng": 86.3209242}, "viewport": {"northeast": {"lat": 24.9087766302915, "lng": 86.3222872802915}, "southwest": {"lat": 24.9060786697085, "lng": 86.3195893197085}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/hospital-71.png", "icon_background_color": "#F88181", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/hospital-H_pinlet", "name": "", "opening_hours": {"open_now": true}, "photos": [{"height": 4000, "html_attributions": [""], "photo_reference": "Aap_uEAOrrPjsjzl2HYeAXvt21WsDn3U94qYX4WPsUCXG29O-HqW9SFRM40jsOl4zf13ms6oacGF6jw0rFSl2s97dsCX-vm1gC0O03KIQjIfQeGK9P97ExzU_N9RPCWGq0THlkHdP-g_0ooX4gpBNy4QaSm6h6nmAl4ElOa", "width": 3000}], "place_id": "ChIJNZyTRF6B8TkRjXn_gOgPaOQ", "rating": 5, "reference": "ChIJNZyTRF6B8TkRjXn_gOgPaOQ", "scope": "GOOGLE", "types": ["hospital", "health", "point_of_interest", "establishment"], "user_ratings_total": 20, "vicinity": "W84C+X9F, Purbi Guguldih"}], "status": "OK"}beeonlinenz/silverstripe-fieldcounter { "name": "plato-creative/silverstripe-fieldcounter", "description": "Adds a counter to defined fields to show remaining characters", "type": "silverstripe-module", "keywords": ["silverstripe", "fields", "counter", "limit", "plato", "creative"], "authors": [{ "name": "", "email": "" }], "require": { "silverstripe/framework": "~4.0" }, "extra": { "installer-name": "fieldcounter" } } 10-100 {"PREVALENCE_BY_GENDER_AGE_YEAR":{"TRELLIS_NAME":[],"SERIES_NAME":[],"X_CALENDAR_YEAR":[],"Y_PREVALENCE_1000PP":[]},"PREVALENCE_BY_MONTH":{"X_CALENDAR_MONTH":[],"Y_PREVALENCE_1000PP":[]},"CONDITIONS_BY_TYPE":{"CONCEPT_NAME":"Claim- Inpatient: Secondary diagnosis","COUNT_VALUE":295},"AGE_AT_FIRST_DIAGNOSIS":{"CATEGORY":["MALE","FEMALE"],"MIN_VALUE":[0,0],"P10_VALUE":[41,39],"P25_VALUE":[55,56],"MEDIAN_VALUE":[63,66],"P75_VALUE":[72,78],"P90_VALUE":[79,84],"MAX_VALUE":[88,302]}} { "name": "node-url-fuzz", "version": "0.0.1", "description": "Fuzz URL using Node.js !!", "exports": "./src/index.ts", "type": "module", "repository": { "type": "git", "url": "git+https://github.com/mohanagy/node-url-fuzz.git" }, "keywords": [ "Fuzzing", "Node.js", "Scrapping" ], "author": "", "license": "ISC", "bugs": { "url": "https://github.com/mohanagy/node-url-fuzz/issues" }, "homepage": "https://github.com/mohanagy/node-url-fuzz#readme", "bin": { "nfu": "./dist/esbuild/cli.js" }, "scripts": { "cli": "ts-node src/cli.ts", "lint": "eslint src/ --ext .js,.jsx,.ts,.tsx", "test": "jest", "clean": "rm -rf dist build package", "ts-node": "ts-node", "docs": "typedoc --entryPoints src/main.ts", "build": "tsc -p tsconfig.json", "build-all": "yarn clean && yarn build && yarn esbuild-node && yarn esbuild-browser", "esbuild-browser": "esbuild src/browser.ts --bundle --minify --sourcemap=external --outfile=dist/esbuild/browser.js", "esbuild-browser:dev": "esbuild src/browser.ts --bundle --outfile=dist/esbuild/browser.js", "esbuild-browser:watch": "esbuild src/browser.ts --bundle --watch --outfile=dist/esbuild/browser.js", "esbuild-node": "esbuild src/cli.ts --bundle --platform=node --minify --sourcemap=external --outfile=dist/esbuild/cli.js", "esbuild-node:dev": "esbuild src/index.ts --bundle --sourcemap=external --outfile=dist/esbuild/index.js", "esbuild-node:watch": "esbuild src/index.ts --bundle --watch --sourcemap=external --outfile=dist/index.js", "start": "npm run build && node ./dist/index.js", "postinstall": "npm run build" }, "devDependencies": {}, "dependencies": { "@types/express": "^4.17.13", "@types/jest": "^27.0.2", "@types/node": "^16.11.7", "@types/nodemailer": "^6.4.4", "@typescript-eslint/eslint-plugin": "^5.3.1", "@typescript-eslint/parser": "^5.3.1", "axios": "^0.24.0", "body-parser": "^1.19.0", "cheerio": "^1.0.0-rc.10", "emoji-strip": "^1.0.1", "esbuild": "^0.13.13", "eslint": "^8.2.0", "express": "^4.17.1", "express-handlebars": "^6.0.1", "inquirer": "^8.2.0", "jest": "^27.3.1", "morgan": "^1.10.0", "node-html-parser": "^5.1.0", "nodemailer": "^6.7.1", "puppeteer": "^11.0.0", "ts-jest": "^27.0.7", "ts-node": "^10.4.0", "typedoc": "^0.22.8", "typescript": "^4.4.4", "yargonaut": "^1.1.4", "yargs": "^17.2.1" }, "engines": { "node": "^12.20.0 || ^14.13.1 || >=16.0.0" } } {"nom":"Aubigny-sur-Nère","circ":"1ère circonscription","dpt":"Cher","inscrits":4132,"abs":1960,"votants":2172,"blancs":38,"nuls":14,"exp":2120,"res":[{"nuance":"REM","nom":"","voix":768},{"nuance":"LR","nom":"M. ","voix":600},{"nuance":"FN","nom":"M. ","voix":341},{"nuance":"FI","nom":"M. ","voix":180},{"nuance":"SOC","nom":"Mme ","voix":126},{"nuance":"ECO","nom":"Mme ","voix":41},{"nuance":"UDI","nom":"M. ","voix":26},{"nuance":"EXG","nom":"Mme ","voix":21},{"nuance":"DIV","nom":"M. ","voix":17}]}json/pt/cota/2ebbdc82-d456-4e71-9747-eaea598f4645.json {"id":"2ebbdc82-d456-4e71-9747-eaea598f4645","card_title":"Pergaminho de Möbius","house":"Logos","card_type":"Artifact","front_image":"https://cdn.keyforgegame.com/media/card_front/pt/341_130_JRGHQ4HQ5QHC_pt.png","card_text":"Ação: Arquive o Pergaminho de Möbius e até 2 cartas da sua mão.","traits":"Item","amber":0,"power":"0","armor":"0","rarity":"Rare","flavor_text":"Era uma vez um pergaminho que dizia assim: _x000D_Era uma vez…","card_number":"130","expansion":341,"is_maverick":false,"is_anomaly":false}0 {"title": "连续3个赛季,阿森纳单赛季输球场次均上双;近40年来首次", "source": "天空体育", "time": "2020-07-22 08:28:18", "content": "虎扑7月22日讯 在今天凌晨结束的英超比赛中,阿斯顿维拉主场1-0击败阿森纳。\n就此,阿森纳本赛季联赛已经吞下10场失利,这也是他们连续第三个赛季单赛季输了10场联赛,这也是枪手自1981/82到1987/88赛季以来,首次解锁这一尴尬纪录。\n  (编辑:姚凡)"}{ "name": "goetas-webservices/soap-client", "authors": [ { "name": "", "email": "" } ], "license": "MIT", "require": { "php": "^5.6|^7.0", "psr/http-message": "^1.0", "php-http/httplug": "^1.0", "php-http/discovery": "^1.0", "php-http/message-factory": "^1.0.2", "symfony/dependency-injection": "^2.4|^3.0|^4.0|^5.0", "doctrine/instantiator": "^1.0.3", "goetas-webservices/xsd2php-runtime": "^0.2.2", "jms/serializer": "^1.2|^2.0|^3.0" }, "require-dev": { "phpunit/phpunit": "^4.8|^5.0", "goetas-webservices/wsdl2php": "^0.4", "goetas-webservices/wsdl-reader": "^0.3.1", "php-http/guzzle6-adapter": "^1.0", "php-http/message": "^1.0" }, "autoload": { "psr-4": { "GoetasWebservices\\SoapServices\\SoapClient\\": "src" } }, "autoload-dev": { "psr-4": { "GoetasWebservices\\SoapServices\\SoapClient\\Tests\\": "tests", "GoetasWebservices\\Xsd\\XsdToPhp\\Tests\\": "vendor/goetas-webservices/xsd2php/tests", "GoetasWebservices\\WsdlToPhp\\Tests\\": "vendor/goetas-webservices/wsdl2php/tests" } }, "extra": { "branch-alias": { "dev-master": "0.2-dev" } }, "bin": [ "bin/soap-client" ] } 1-10 [{"id":928535,"idParent":928087,"namaWilayah":"","tingkatWilayah":4,"idPro":928068,"idKab":65675,"idKec":928087,"idKel":928535,"namaPro":"","namaKab":"","namaKec":"","namaKel":"","kodeWilayah":"65.04.05.2003"},{"id":928537,"idParent":928087,"namaWilayah":"KAPUAK","tingkatWilayah":4,"idPro":928068,"idKab":65675,"idKec":928087,"idKel":928537,"namaPro":"","namaKab":"","namaKec":"","namaKel":"KAPUAK","kodeWilayah":"65.04.05.2005"},{"id":929148,"idParent":928087,"namaWilayah":"RIAN","tingkatWilayah":4,"idPro":928068,"idKab":65675,"idKec":928087,"idKel":929148,"namaPro":"","namaKab":"","namaKec":"","namaKel":"RIAN","kodeWilayah":"65.04.05.2002"},{"id":928536,"idParent":928087,"namaWilayah":"","tingkatWilayah":4,"idPro":928068,"idKab":65675,"idKec":928087,"idKel":928536,"namaPro":"","namaKab":"","namaKec":"","namaKel":"","kodeWilayah":"65.04.05.2004"},{"id":928538,"idParent":928087,"namaWilayah":"SAPARI","tingkatWilayah":4,"idPro":928068,"idKab":65675,"idKec":928087,"idKel":928538,"namaPro":"","namaKab":"","namaKec":"","namaKel":"SAPARI","kodeWilayah":"65.04.05.2006"},{"id":929147,"idParent":928087,"namaWilayah":"SEPUTUK","tingkatWilayah":4,"idPro":928068,"idKab":65675,"idKec":928087,"idKel":929147,"namaPro":"","namaKab":"","namaKec":"","namaKel":"SEPUTUK","kodeWilayah":"65.04.05.2001"}]jeongtae/TypeScript-Websitepackages/ts-twoslasher/test/results/unknown_compiler_value.json ## Invalid inline compiler value Got TS2015 for target but it is not a supported value by the TS compiler. Allowed values: es3,es5,es6,es2015,es2016,es2017,es2018,es2019,es2020,es2021,es2022,esnext0 { "name": "pcg-wasm", "version": "0.1.0", "description": "An implementation of the permuted congruential generator PRNG in C, compiled to WebAssembly and exported as a JavaScript package", "main": "dist/random.cjs.js", "module": "dist/random.esm.js", "license": "MIT", "devDependencies": { "@rollup/plugin-wasm": "^3.0.0", "rollup": "^1.29.0" }, "scripts": { "prebuild": "mkdir -p dist", "build": "emcc -Os pcg.c -o dist/pcg.wasm && rollup -c" } } {"lodash.backbone.js":","lodash.backbone.min.js":","lodash.compat.js":","lodash.compat.min.js":","lodash.js":"sha512-mHVcMw7e0g+WksE1Nuj6xvblSHt7yojRD9GtT82LIg9vweHWRfSVLfDjmRFLaEzx4YvTvO+3UT6zhv5wSmvsBA==","lodash.legacy.js":","lodash.legacy.min.js":"sha512-H623PWaBmoAPqYdmZhJDGsgm8jvprkRM6GAAdsGPVnFwvMsBdfv+sZFbvAiw4KtNd6MvQYHeHflaKCASmEYG6Q==","lodash.min.js":","lodash.mobile.js":","lodash.mobile.min.js":","lodash.underscore.js":","lodash.underscore.min.js":"sha5}{"date":20200717,"state":"DE","positive":13337,"negative":135273,"pending":null,"hospitalizedCurrently":55,"hospitalizedCumulative":null,"inIcuCurrently":7,"inIcuCumulative":null,"onVentilatorCurrently":null,"onVentilatorCumulative":null,"recovered":7315,"dataQualityGrade":"A+","lastUpdateEt":"7/16/2020 18:00","dateModified":"2020-07-16T18:00:00Z","checkTimeEt":"07/16 14:00","death":521,"hospitalized":null,"dateChecked":"2020-07-16T18:00:00Z","totalTestsViral":null,"positiveTestsViral":null,"negativeTestsViral":null,"positiveCasesViral":12355,"deathConfirmed":463,"deathProbable":58,"totalTestEncountersViral":null,"totalTestsPeopleViral":148610,"totalTestsAntibody":null,"positiveTestsAntibody":null,"negativeTestsAntibody":null,"totalTestsPeopleAntibody":null,"positiveTestsPeopleAntibody":null,"negativeTestsPeopleAntibody":null,"totalTestsPeopleAntigen":null,"positiveTestsPeopleAntigen":null,"totalTestsAntigen":null,"positiveTestsAntigen":null,"fips":"10","positiveIncrease":223,"negativeIncrease":3517,"total":148610,"totalTestResultsSource":"posNeg","totalTestResults":148610,"totalTestResultsIncrease":3740,"posNeg":148610,"deathIncrease":0,"hospitalizedIncrease":0,"hash":"8a9175089eeaae58371a7ba90f75a196d3edda4b","commercialScore":0,"negativeRegularScore":0,"negativeScore":0,"positiveScore":0,"score":0,"grade":""} package.json { "name": "@streetmix/illustrations", "version": "0.9.0", "description": "Streemix artwork SVG build process", "repository": { "type": "git", "url": "https://github.com/streetmix/illustrations.git" }, "files": [ "images", "swatches" ], "scripts": { "start": "echo Nothing to run!" }, "license": "BSD-3-Clause", "engines": { "node": ">=12.0.0", "npm": ">=6.0.0" }, "devDependencies": {} } { "saying": "şu'nu yaptırıyoruz Büşra'ya.", "season": 2, "episode": 5, "episodeName": "", "saidAt": "03:58", "saidBy": "Ersoy" }DOREMUS-ANR/recommender1-10 http://data.doremus.org/artist/2c0a13cd-aff7-3755-8862-6e5e76c41da3 http://data.doremus.org/artist/cf1f8bc2-1b7b-3bdf-8d2e-3f09d6753dfa http://data.doremus.org/artist/e2622e4e-d2c0-32b4-8648-87b345f4d1b1 http://data.doremus.org/artist/b65c372d-bce4-378a-a571-a05d59326329 http://data.doremus.org/artist/c3236926-33ba-3060-b232-69cf2013234e http://data.doremus.org/artist/d285fe9e-24ba-3514-ae9b-e9d1eddd1e16 http://data.doremus.org/artist/99f9cf35-a7be-33a6-a5a0-8fa8bfe857b6 http://data.doremus.org/artist/29d8b2f8-6c43-3f5d-94fd-0b2eb4f2ea34 http://data.doremus.org/artist/187ca331-d6c4-3269-96a4-aa9eee4fc6a9 http://data.doremus.org/artist/cc1906b8-116f-3407-b3e1-d25c7c021f2b http://data.doremus.org/artist/2c7d99b7-a3fb-3605-a438-84f4ef65e86b http://data.doremus.org/artist/e0b2ddb4-9810-36ab-a4c2-99c1ea8ca64c33kk/uso-archive { "id": 6759, "name": "Wowhead.com Wide", "description": "Self explanatory", "user": { "id": 8239, "name": "Vlet", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": null }, "updated": "2008-04-28T08:31:24.000Z", "weekly_install_count": 0, "total_install_count": 505, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/6759_after.png?r=1606032444", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": null, "license": null, "created": "2008-04-28T08:31:24.000Z", "category": "site", "raw_subcategory": "wowhead", "subcategory": "wowhead", "additional_info": null, "style_tags": [], "css": "@namespace url(http://www.w3.org/1999/xhtml);\r\n\r\n@-moz-document domain(\"wowhead.com\") {\r\n #layout { max-width: 100% !important; }\r\n #wrapper { margin: 0px !important; }\r\n #sidebar { visibility: hidden !important; }\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/6759/wowhead-com-wide.user.js", "style_settings": [] }{"vizceral.js":","vizceral.min.js":"}{"title": "Limits of energy saving for the allocation of data center resources to networked applications.", "fields": ["data center", "complete information", "data modeling", "resource allocation", "server"], "abstract": "Energy related costs are becoming one of the largest contributors to the overall cost of operating a data center, whereas the degree of data center utilization continues to be very low. Energy-aware dynamic provision of resources based on the consolidation of existing application instances can simultaneously address under-utilization of servers while highly reducing energy costs. Thus, energy costs cannot be treated separately from resource provision and allocation. However, current scheduling techniques based on market mechanisms do not specifically deal with such scenario. In this paper we model the problem of minimizing energy consumption of the allocation of resources to networked applications as a Stackelberg leadership game to find an upper bound of energy saving. The model is applied to a proportional-share mechanism where resource providers can maximize their profit by minimizing energy costs while users can select resources ensuring the minimum requirements are satisfied. We show that our mechanism can determine the optimal set of resources on and off, even in realistic conditions considering incomplete information, and heterogeneous applications.", "citation": "Citations (15)", "departments": ["Polytechnic University of Catalonia", "Polytechnic University of Catalonia"], "authors": ["\u00f3n.....http://dblp.org/pers/hd/l/Le=oacute=n:Xavier", ".....http://dblp.org/pers/hd/n/Navarro:Leandro"], "conf": "infocom", "year": "2011", "pages": 5}{"organizations": [], "uuid": "fa17866e4fd2a087a6375c35fdf5da631cdfe2c5", "thread": {"social": {"gplus": {"shares": 0}, "pinterest": {"shares": 0}, "vk": {"shares": 0}, "linkedin": {"shares": 0}, "facebook": {"likes": 0, "shares": 0, "comments": 0}, "stumbledupon": {"shares": 0}}, "site_full": "www.tripadvisor.com", "main_image": "https://media-cdn.tripadvisor.com/media/photo-s/0b/41/35/92/the-hotel--v11753430.jpg", "site_section": "https://www.tripadvisor.com/Hotel_Review-g187791-d232844-Reviews-Hotel_Julia-Rome_Lazio.html", "section_title": "Hotel Julia - UPDATED 2017 Reviews & Price Comparison (Rome, Italy) - TripAdvisor", "url": "https://www.tripadvisor.com/ShowUserReviews-g187791-d232844-r470998313-Hotel_Julia-Rome_Lazio.html", "country": "US", "domain_rank": 189, "title": "Perfect location", "performance_score": 0, "site": "tripadvisor.com", "participants_count": 1, "title_full": "Perfect location - Review of Hotel Julia, Rome, Italy - TripAdvisor", "spam_score": 0.0, "site_type": "discussions", "published": "2017-03-28T03:00:00.000+03:00", "replies_count": 0, "uuid": "fa17866e4fd2a087a6375c35fdf5da631cdfe2c5"}, "author": "Stefan P", "url": "https://www.tripadvisor.com/ShowUserReviews-g187791-d232844-r470998313-Hotel_Julia-Rome_Lazio.html", "ord_in_thread": 0, "title": "Perfect location", "locations": [], "entities": {"persons": [], "locations": [], "organizations": []}, "highlightText": "", "language": "english", "persons": [], "text": "What should you look for when booking a hotel in Rome? Proximity and easy access. When we visited Rome in March for 4 full days we looked for a hotel situated in the center of the action, right next to the Fontana di Trevi.\nAfter doing some research, we decided that would be a lovely place to be at, especially as we were really excited about viewing some of the surrounding areas at night. After we decided upon the area, we found a good deal on Agoda and we booked a small room at Hotel Julia.\nAccess:\nFor those coming in Rome by plane, from each of the two airports (Ciampino or Fiumicino) there are bus shuttles that takes you to Termini Train Station. From there you can take the subway on the red line for two stations, until you reach Barberini Metro Station. You exit left and keep left for about 100 meters where you turn left again, on Via delle Quattro Fontane. Second street right is Via Rasella, where the Hotel is situated at number 29.\nDescription:\nNow, before moving forward with our review, you have to understand the fact that Hotel Julia is not a modern hotel, it is not made out of steel and glass and does not boast with the latest technologies.\nWhat this hotel is, is a building from 1949, situated on a street not very circulated, between Fontana di Trevi, the Spanish Steps, Piazza Barberini and the famous Via Veneto.\nThe breakfast is served daily between 7:30 and 10:30 AM, offering a range of pastry, fruits and sandwich ingredients. We mostly enjoyed the cappuccino/espresso, which was served at request and you could order one per guest.\nStaff:\nFrom the moment we stepped inside for the first time we were greeted warmly. There is always someone at the reception who will answer any question and offer to help in any situation.\nThe cleaning staff was present daily with turndown service.\nWe interacted with 3 or 4 hotel staff at the reception, more than 5 cleaning personal and 2 waitresses. Everybody was really nice and always had a smile on their faces.\nProximity:\nAs we detailed above, the hotel is very close to many major sights as it is located in the historical center of Rome. You will have easy access to Trevi Fountain (450 m), Spanish Steps (700 m), Piazza Barberini (300 m) and Via Veneto (600 m). The Colosseum is located at 1.6 km away so it’s a short and enjoyable walk away.\nThis is the most important feature of the hotel. Because of it’s perfect location it gives you the option to visit everything at night, as they are all so close. Rome by night is an entire different city to visit.\nOur verdict:\nIf ever again in Rome we would definitely stay here. Situated so close to Fontana di Trevi, is a major advantage since the entire area is charming in the evening and at night.\nOverall, one of the nicest hotels we were ever accommodated in.", "external_links": [], "published": "2017-03-28T03:00:00.000+03:00", "crawled": "2017-03-29T21:47:45.561+03:00", "highlightTitle": ""}{"usageKey":2390,"scientificName":"Scrophulariaceae Juss.","canonicalName":"Scrophulariaceae","rank":"FAMILY","status":"ACCEPTED","confidence":96,"note":"Individual confidence: name=100; classification=-2; rank=0; status=1; nextMatch=10","matchType":"EXACT","alternatives":[{"usageKey":3170861,"scientificName":"Scrophularia L.","canonicalName":"Scrophularia","rank":"GENUS","status":"ACCEPTED","confidence":24,"note":"Individual confidence: name=25; classification=-2; rank=0; status=1","matchType":"FUZZY","kingdom":"Plantae","phylum":"Magnoliophyta","order":"Lamiales","family":"Scrophulariaceae","genus":"Scrophularia","kingdomKey":6,"phylumKey":49,"classKey":220,"orderKey":408,"familyKey":2390,"genusKey":3170861,"synonym":false,"class":"Magnoliopsida"},{"usageKey":3737395,"scientificName":"Scrophularia koeiei Rech.f.","canonicalName":"Scrophularia koeiei","rank":"SPECIES","status":"ACCEPTED","confidence":4,"note":"Individual confidence: name=5; classification=-2; rank=0; status=1","matchType":"FUZZY","kingdom":"Plantae","phylum":"Magnoliophyta","order":"Lamiales","family":"Scrophulariaceae","genus":"Scrophularia","species":"Scrophularia koeiei","kingdomKey":6,"phylumKey":49,"classKey":220,"orderKey":408,"familyKey":2390,"genusKey":3170861,"speciesKey":3737395,"synonym":false,"class":"Magnoliopsida"},{"usageKey":3739822,"scientificName":"Schrophularia Medik.","canonicalName":"Schrophularia","rank":"GENUS","status":"ACCEPTED","confidence":0,"note":"Individual confidence: name=0; classification=-2; rank=0; status=1","matchType":"FUZZY","kingdom":"Plantae","phylum":"Magnoliophyta","order":"Lamiales","family":"Scrophulariaceae","genus":"Schrophularia","kingdomKey":6,"phylumKey":49,"classKey":220,"orderKey":408,"familyKey":2390,"genusKey":3739822,"synonym":false,"class":"Magnoliopsida"}],"kingdom":"Plantae","phylum":"Magnoliophyta","order":"Lamiales","family":"Scrophulariaceae","kingdomKey":6,"phylumKey":49,"classKey":220,"orderKey":408,"familyKey":2390,"synonym":false,"class":"Magnoliopsida"}0 {"html_attributions": [], "results": [{"business_status": "OPERATIONAL", "geometry": {"location": {"lat": 23.8684337, "lng": 86.1501396}, "viewport": {"northeast": {"lat": 23.8698197302915, "lng": 86.15136518029149}, "southwest": {"lat": 23.8671217697085, "lng": 86.14866721970849}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/civic_building-71.png", "icon_background_color": "#7B9EB0", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/civic-bldg_pinlet", "name": "Police Station", "photos": [{"height": 2576, "html_attributions": ["pintu gupta"], "photo_reference": "Aap_uEAmpqkyRRJzYMGwnvA8YOrqX_B3oRtPvVNYaOfH0evIW0xkLTbGZ-WQCiW0seYmXdKFaPXa3YgfQzwwOmdXvZ4jTZW9D4gC-tuZK__dAfHA0k64baBa0ZBnQvUSVfa5PoeyhEot97Q1W3LcJgbyCp5fLnY5B1uvbjjbQcP07-W6tuY1", "width": 1932}], "place_id": "ChIJgXa_3Q0b9DkRJqagEEwvpSc", "plus_code": {"compound_code": "V592+93 Gomoh, Jharkhand, India", "global_code": "7MM8V592+93"}, "rating": 4, "reference": "ChIJgXa_3Q0b9DkRJqagEEwvpSc", "scope": "GOOGLE", "types": ["police", "point_of_interest", "establishment"], "user_ratings_total": 2, "vicinity": "Hariharpur, Gomoh"}, {"business_status": "OPERATIONAL", "geometry": {"location": {"lat": 23.8685238, "lng": 86.1502824}, "viewport": {"northeast": {"lat": 23.8699247802915, "lng": 86.1514580802915}, "southwest": {"lat": 23.8672268197085, "lng": 86.14876011970848}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/civic_building-71.png", "icon_background_color": "#7B9EB0", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/civic-bldg_pinlet", "name": "Hariharpur Police Station", "opening_hours": {"open_now": true}, "photos": [{"height": 3120, "html_attributions": ["Sristi kumari , 1 , 3C Kumari"], "photo_reference": "Aap_uEBUpXMjuNnVUMuNftMJ0O8CN57JrnLoFnukbUg6-JDL0El5TOaJih3URl4Cwa8dH7T30rUNuvwaZ07XpSHMm7-eKfx5N8C_B6sKQrrY2YwYeAgiIwU", "width": 4160}], "place_id": "ChIJsTxh3Q0b9DkRYAK06PazAeE", "plus_code": {"compound_code": "V592+C4 Gomoh, Jharkhand, India", "global_code": "7MM8V592+C4"}, "rating": 2, "reference": "ChIJsTxh3Q0b9DkRYAK06PazAeE", "scope": "GOOGLE", "types": ["police", "point_of_interest", "establishment"], "user_ratings_total": 5, "vicinity": "Saheb Colony, Gomoh"}, {"business_status": "CLOSED_TEMPORARILY", "geometry": {"location": {"lat": 23.871308, "lng": 86.1516999}, "viewport": {"northeast": {"lat": 23.8726403802915, "lng": 86.1530473302915}, "southwest": {"lat": 23.86994241970849, "lng": 86.1503493697085}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/civic_building-71.png", "icon_background_color": "#7B9EB0", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/civic-bldg_pinlet", "name": "R.P.F Office", "permanently_closed": true, "place_id": "ChIJB0Q6ZhIb9DkREUOhb3QqBtg", "reference": "ChIJB0Q6ZhIb9DkREUOhb3QqBtg", "scope": "GOOGLE", "types": ["police", "local_government_office", "point_of_interest", "establishment"], "vicinity": "V5C2+GMG, Gomoh"}, {"business_status": "OPERATIONAL", "geometry": {"location": {"lat": 23.8728469, "lng": 86.1553895}, "viewport": {"northeast": {"lat": 23.8741505802915, "lng": 86.1571351802915}, "southwest": {"lat": 23.8714526197085, "lng": 86.15443721970848}}}, "icon": "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/civic_building-71.png", "icon_background_color": "#7B9EB0", "icon_mask_base_uri": "https://maps.gstatic.com/mapfiles/place_api/icons/v2/civic-bldg_pinlet", "name": "", "photos": [{"height": 4160, "html_attributions": ["B.K.MANDAL"], "photo_reference": "", "width": 3120}], "place_id": "ChIJK_pJjO8b9DkRPuemyuvHsJg", "plus_code": {"compound_code": "V5F4+45 Gomoh, Jharkhand, India", "global_code": "7MM8V5F4+45"}, "reference": "ChIJK_pJjO8b9DkRPuemyuvHsJg", "scope": "GOOGLE", "types": ["police", "point_of_interest", "establishment"], "vicinity": "Shiv Mandir Road, Gomoh"}], "status": "OK"}{ "meta": { "title": "", "titleTemplate": "%s", "description": "Blog pessoal desenvolvido em Gatsby e Netlify CMS. Template adaptado de Stackrole.", "siteUrl": "https://fcortes.com.br", "image": "/assets/logo-icon.png", "iconimage": "/assets/logo-icon.png", "twitterUsername": "" }, "ga": "UA-XXXXXXXXX-X" }{"contributors": null, "truncated": false, "text": "5 things to think about since Paris https://t.co/i99Mn8xjeu", "is_quote_status": false, "in_reply_to_status_id": null, "id": 667854155034136576, "favorite_count": 0, "source": "Facebook", "retweeted": false, "coordinates": null, "entities": {"symbols": [], "user_mentions": [], "hashtags": [], "urls": [{"url": "https://t.co/i99Mn8xjeu", "indices": [36, 59], "expanded_url": "http://fb.me/Naoqt6nG", "display_url": "fb.me/Naoqt6nG"}]}, "in_reply_to_screen_name": null, "in_reply_to_user_id": null, "retweet_count": 0, "id_str": "667854155034136576", "favorited": false, "user": {"follow_request_sent": false, "has_extended_profile": false, "profile_use_background_image": true, "default_profile_image": false, "id": 16568402, "profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme12/bg.gif", "verified": false, "profile_text_color": "0C3E53", "profile_image_url_https": "https://pbs.twimg.com/profile_images/612857803158323200/B5pfCP1K_normal.jpg", "profile_sidebar_fill_color": "FFF7CC", "entities": {"url": {"urls": [{"url": "http://t.co/xYOTauxTwm", "indices": [0, 22], "expanded_url": "http://facebook.com/bevcowling", "display_url": "facebook.com/bevcowling"}]}, "description": {"urls": []}}, "followers_count": 10602, "profile_sidebar_border_color": "F2E195", "id_str": "16568402", "profile_background_color": "BADFCD", "listed_count": 160, "is_translation_enabled": false, "utc_offset": -21600, "statuses_count": 21778, "description": "Advocate 4 single payer health care, campaign finance reform, getting corporate money out of politics. Freedom,equality , retired USPS #uniteblue.com #NALC", "friends_count": 10384, "location": "O Fallon MO", "profile_link_color": "FF0000", "profile_image_url": "http://pbs.twimg.com/profile_images/612857803158323200/B5pfCP1K_normal.jpg", "following": false, "geo_enabled": true, "profile_banner_url": "https://pbs.twimg.com/profile_banners/16568402/1414595751", "profile_background_image_url": "http://abs.twimg.com/images/themes/theme12/bg.gif", "screen_name": "songbird_63366", "lang": "en", "profile_background_tile": false, "favourites_count": 6444, "name": "", "notifications": false, "url": "http://t.co/xYOTauxTwm", "created_at": "Thu Oct 02 23:01:50 +0000 2008", "contributors_enabled": false, "time_zone": "Central Time (US & Canada)", "protected": false, "default_profile": false, "is_translator": false}, "geo": null, "in_reply_to_user_id_str": null, "possibly_sensitive": false, "lang": "en", "created_at": "Fri Nov 20 23:56:53 +0000 2015", "in_reply_to_status_id_str": null, "place": null, "metadata": {"iso_language_code": "en", "result_type": "recent"}}{"mem":636723,"mem.free":107493,"processors":8,"instance.uptime":96910377,"uptime":96944998,"systemload.average":9.58,"heap.committed":522752,"heap.init":514048,"heap.used":415258,"heap":522752,"nonheap.committed":116160,"nonheap.init":2496,"nonheap.used":113971,"nonheap":0,"threads.peak":345,"threads.daemon":70,"threads.totalStarted":2414,"threads":72,"classes":11152,"classes.loaded":11238,"classes.unloaded":86,"gc.ps_scavenge.count":4382,"gc.ps_scavenge.time":112902,"gc.ps_marksweep.count":6,"gc.ps_marksweep.time":2102,"normalized.servo.rest.totaltime":0.0,"normalized.servo.rest.count":0.0,"gauge.servo.rest.min":0.0,"gauge.servo.rest.max":0.0,"gauge.servo.response.subscriptions.services":165.0,"gauge.servo.response.metrics":3.0,"gauge.servo.response.management.subscriptions.file":123.0,"gauge.servo.response.management.repository":143.0,"gauge.servo.response.unmapped":9.0,"gauge.servo.response.subscriptions.brokers":176.0,"httpsessions.max":-1,"httpsessions.active":0}[{"namaKab":"KOTA PROBOLINGGO","originalFilename":"foto_4x6.jpg","namaPartai":"Partai Bulan Bintang","id":283354,"noUrut":1,"nama":"","stringJenisKelamin":"Laki-Laki"},{"namaKab":"KOTA PROBOLINGGO","originalFilename":"foto ruli.jpg","namaPartai":"Partai Bulan Bintang","id":282305,"noUrut":2,"nama":".","stringJenisKelamin":"Perempuan"}]src/main/resources/assets/immersiveweapons/blockstates/cyan_stained_bulletproof_glass.json { "variants": { "": { "model": "immersiveweapons:block/cyan_stained_bulletproof_glass" } } }{ "register": "Registra·ti", "help.email": "S'email tua est cuada pro su pùblicu in manera predefinida.", "help.username_restrictions": "Unu nùmene de impitadore ùnicu intre %1 e %2 caràtere. Is àteros t'ant a pòdere mentovare cun @nùmeneimpitadore.", "help.minimum_password_length": "Sa password depet èssere a su mancu de %1 caràteres.", "email_address": "Indiritzu Email", "email_address_placeholder": "Pone s'Indiritzu Email", "username": "Nùmene de Impitadore", "username_placeholder": "Pone su Nùmene de Impitadore", "password": "Password", "password_placeholder": "", "confirm_password": "", "confirm_password_placeholder": "", "register_now_button": "Registra·ti Immoe", "alternative_registration": "Registratziones Alternativas", "terms_of_use": "Tèrmines de Impreu", "agree_to_terms_of_use": "So de acòrdiu cun is Tèrmines de Impreu", "registration-added-to-queue": "Your registration has been added to the approval queue. You will receive an email when it is accepted by an administrator." }{"name":"Token 6Cic","symbol":"6CicN","decimals":6,"address":"6CicNkYmUd3hEFGjXCifHn5X5fE53tjex4Lvtnz4ozeL","chainId":103}{"vue.common.js":"sha256-h8/+a/+7X0FSZbyJPlpERmX/HHD8UFQCYtJ8PpuYceY=","vue.common.min.js":"sha256-EKcrSwXyIl6CqobOl5tj1UcFx1vMRymNnaRYs4O6FMk=","vue.esm.js":"sha256-r7Dx6b7WRsxHcHLz1W9Et7MxiuZqi4OqMpH831T52Kc=","vue.js":"sha256-y7jbBn/+Rv8oBcyldVdzrzgKWajM/ERjr4u5r3QKdNo=","vue.min.js":"sha256-cWZZjnj99rynB+b8FaNGUivxc1kJSRa8ZM/E77cDq0I=","vue.runtime.common.js":"sha256-k7Dh6lCIlkuuo1F8elnZuQohjUVq3swlj4uXcpsH1IM=","vue.runtime.common.min.js":"sha256-FNvoSTTv2n05cGBz/kg/wLW3gGtsC8zJCvmY2JQpjrE=","vue.runtime.esm.js":"sha256-s7zE8X7ADyIs1Ok06/GZ6FRmOJp/hS+gPOEd4mG7MDU=","vue.runtime.js":"sha256-tpvGlRQ2EzLiAp7O2/WnRmooq7rMu9IlGkztwcC01u4=","vue.runtime.min.js":"sha256-lxL/V2ARgAOlf3cmKuPGKHewvtPRRu/GCOm+ekA1NhI="}by-location/40.636872-74.152867.json0 { "729511f-a": { "width": 600, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/729511f-a.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/729511f-a.jpg", "title": "Richmond: Richmond Terrace - Van Name Avenue", "date": "1924", "text": "28( ), 2806, and 2814 Richmomd Terrace, south side, between Van Nawwe and Van Pelt Avenues, showing \"Captains' Row,\" houses uilt by sone of Staten Island's oystermen. No. 22800 be.unged to JJ. Van Name from 1874 to 1898; to Sophie Van Name in 1907, and to J. H. Van Name in 1917. No. 280B was, from 1874 to 1B2B, the P. Van Name house and, in 19007 the Thos. W. Butts house. No. 2B14, kI own as \"ghe Colonial' was nuilt by Captain . It was the Wright house from 1.74 to 1R2B; the Louis Decker house in 1907, and the A. M. Decker house in 1917. Dates Pefer to raps on which the owners' na.es ap;ear.\nSeptember 28, 1224\nBy Wm. through P. L. Sperr\n", "folder": "Richmond Terrace & Van Name Avenue, Staten Island, NY", "height": 396, "years": [ "1924" ] }, "729509f-b": { "width": 600, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/729509f-b.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/729509f-b.jpg", "title": "Richmond: - Van Name Avenue", "date": "1932; 1937", "text": "9768 Eiehaond Termaee, adjeiaing the S.E. corner of Van kane Avenue, showing the All 8aints Pretestant Episcepal\nChurch.\nMay 4, 1932\nP. L. Sperr\n(a)\nThe samwe, ad a ter date.\nJume 17, 1957\nLL. Sperr i wwaono)rews\n", "folder": "Richmond Terrace & Van Name Avenue, Staten Island, NY", "height": 400, "years": [ "1932", "1937" ] }, "729509f-a": { "width": 600, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/729509f-a.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/729509f-a.jpg", "title": "Richmond: - Van Name Avenue", "date": "1932; 1937", "text": "9768 Eiehaond Termaee, adjeiaing the S.E. corner of Van kane Avenue, showing the All 8aints Pretestant Episcepal\nChurch.\nMay 4, 1932\nP. L. Sperr\n(a)\nThe samwe, ad a ter date.\nJume 17, 1957\nLL. Sperr i wwaono)rews\n", "folder": "Richmond Terrace & Van Name Avenue, Staten Island, NY", "height": 402, "years": [ "1932", "1937" ] }, "729510f-a": { "width": 600, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/729510f-a.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/729510f-a.jpg", "title": "Richmond: Richmond Terrace - Van Name Avenue", "date": "", "text": null, "folder": "Richmond Terrace & Van Name Avenue, Staten Island, NY", "height": 398, "years": [ "" ] } }{ "name": "WheyToken", "symbol": "WHEY", "id": "0x0cfbBb89Fd58F6B49B7bF5665397ddc84003d47c", "decimals": 18, "coingecko_url": "https://www.coingecko.com/en/coins/whey", "market_cap_usd": 14029.15, "market_cap_rank": 3649, "24_hr_volume_usd": 0.0, "logoURI": "https://raw.githubusercontent.com/poolsharks-protocol/token-metadata/master/blockchains/polygon-pos/assets/0x0cfbBb89Fd58F6B49B7bF5665397ddc84003d47c/logo.png" }{ "name": "@stefanprobst/remark-excerpt", "version": "2.0.2", "main": "src/index.js", "type": "module", "types": "src/index.d.ts", "repository": ":stefanprobst/remark-excerpt.git", "license": "MIT", "files": [ "src" ], "scripts": { "format": "prettier . -l --ignore-path .gitignore", "format:fix": "yarn format --write", "lint": "eslint . --ignore-path .gitignore", "lint:fix": "yarn lint --fix", "prepare": "simple-git-hooks", "test": "cross-env NODE_OPTIONS='--experimental-vm-modules' jest", "validate": "yarn run format && yarn run lint && yarn run test" }, "engines": { "node": ">=14.17", "yarn": "1.x" }, "dependencies": { "unist-util-visit-parents": "^5.1.0" }, "devDependencies": { "@commitlint/cli": "^13.1.0", "@stefanprobst/commitlint-config": "^1.0.2", "@stefanprobst/eslint-config-node": "^2.0.1", "@stefanprobst/prettier-config": "^1.0.5", "@typescript-eslint/eslint-plugin": "^4.30.0", "@typescript-eslint/parser": "^4.30.0", "cross-env": "^7.0.3", "eslint": "^7.32.0", "eslint-config-prettier": "^8.3.0", "eslint-import-resolver-typescript": "^2.4.0", "eslint-plugin-import": "^2.24.2", "eslint-plugin-jest": "^24.4.0", "eslint-plugin-node": "^11.1.0", "jest": "^27.1.0", "lint-staged": "^11.1.2", "prettier": "^2.3.2", "remark": "^14.0.1", "remark-parse": "^10.0.0", "remark-stringify": "^10.0.0", "simple-git-hooks": "^2.6.1", "strip-markdown": "^5.0.0", "typescript": "^4.4.2", "unified": "^10.1.0" }, "peerDependencies": { "unified": ">=10" }, "commitlint": { "extends": [ "@stefanprobst/commitlint-config" ] }, "eslintConfig": { "extends": [ "@stefanprobst/eslint-config-node" ], "rules": { "@typescript-eslint/no-unnecessary-condition": "off", "@typescript-eslint/strict-boolean-expressions": "off", "@typescript-eslint/switch-exhaustiveness-check": "off" }, "overrides": [ { "files": [ "*.test.js" ], "rules": { "node/no-unpublished-import": "off" } } ], "ignorePatterns": "*.d.ts" }, "lint-staged": { "*.js": "eslint --cache --fix", "*.(json|md)": "prettier --write" }, "prettier": "@stefanprobst/prettier-config", "simple-git-hooks": { "commit-msg": "yarn commitlint --edit", "pre-commit": "yarn lint-staged", "pre-push": "yarn run validate" } } data/THIN/drugeras/drug_43012496.json {"AGE_AT_FIRST_EXPOSURE":{"CATEGORY":"FEMALE","MIN_VALUE":33,"P10_VALUE":33,"P25_VALUE":41,"MEDIAN_VALUE":67,"P75_VALUE":76,"P90_VALUE":95,"MAX_VALUE":95},"PREVALENCE_BY_GENDER_AGE_YEAR":{"TRELLIS_NAME":[],"SERIES_NAME":[],"X_CALENDAR_YEAR":[],"Y_PREVALENCE_1000PP":[]},"PREVALENCE_BY_MONTH":{"X_CALENDAR_MONTH":[],"Y_PREVALENCE_1000PP":[]},"LENGTH_OF_ERA":{"CATEGORY":"Length of Era","MIN_VALUE":29,"P10_VALUE":29,"P25_VALUE":29,"MEDIAN_VALUE":29,"P75_VALUE":29,"P90_VALUE":417,"MAX_VALUE":417}} {"Department":"Генеральна Прокуратура України","Name":"Чоломбитько ","Position":"Прокурор відділу запобігання правопорушенням в органах прокуратури управління внутрішньої безпеки Генеральної інспекції Генеральної прокуратури України","Region":"Загальнодержавний","analytics":[{"c":2,"ff":34.4,"ffa":1,"fh":88.7,"fha":1,"fi":250012,"fl":1483,"fla":1,"k":34.4,"ka":1,"y":2015},{"c":2,"fh":88.7,"fha":1,"fi":12900,"fl":1483,"fla":1,"i":242701,"k":68.8,"ka":2,"y":2016},{"c":1,"fh":176.51,"fha":2,"fi":12900,"fl":2951.17,"fla":2,"i":499846,"k":137.6,"ka":4,"m":180000,"y":2017},{"c":1,"fh":176.51,"fha":2,"fi":5375,"fl":2951.17,"fla":2,"i":594151,"k":137.6,"ka":4,"y":2018},{"c":1,"fi":578421,"h":22.18,"ha":1,"k":34.4,"ka":1,"l":370.75,"la":1,"y":2019},{"y":2020}],"declarationsLinks":[{"id":"nacp_ca30733e-61f6-4de6-b9cb-e75bbcc21203","provider":"declarations.com.ua.opendata","year":2015},{"id":"nacp_6210e345-e6ec-4979-aa8f-abb0952bae84","provider":"declarations.com.ua.opendata","year":2016},{"id":"nacp_117d56cd-eb87-4d1d-9ca0-1fcd71e79463","provider":"declarations.com.ua.opendata","year":2017},{"id":"nacp_b5bd8810-c351-4a72-948c-d596755c6a72","provider":"declarations.com.ua.opendata","year":2018},{"id":"nacp_6a249974-31aa-4278-a4b5-c2ae0bdc9a46","provider":"declarations.com.ua.opendata","year":2019},{"id":"nacp_3ee3c325-7611-4fa3-9b78-dc3dc0876c21","provider":"declarations.com.ua.opendata","year":2020}],"key":"cholombitko_mihaylo_viktorovich","type":"prosecutor","Декларації 2013":"","Декларації 2014":"","Декларації 2015":"https://public.nazk.gov.ua/declaration/ca30733e-61f6-4de6-b9cb-e75bbcc21203","Декларації 2016":"https://public.nazk.gov.ua/declaration/6210e345-e6ec-4979-aa8f-abb0952bae84","Декларації доброчесності":"http://www.gp.gov.ua/integrity_profile/files/7ed72f2217eec88683873926ad5d868a.pdf","Фото":"","Як живе":""}{ "files": [ { "pattern": "${DOCKER_REPO}/${DOCKER_IMAGE_NAME}*", "exclusions": ["${DOCKER_REPO}/*sha256*","${DOCKER_REPO}/repository.catalog"] } ] }wplib/wpackagist-requestor1-10 {"packages":{"wpackagist-plugin\/exchange-platform":{"1.0alpha3":{"name":"wpackagist-plugin\/exchange-platform","version":"1.0alpha3","version_normalized":"1.0.0.0-alpha3","uid":124622,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/exchange-platform.zip?timestamp=1370378527"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/exchange-platform\/","reference":"trunk"},"homepage":"https:\/\/wordpress.org\/plugins\/exchange-platform\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.0alpha1":{"name":"wpackagist-plugin\/exchange-platform","version":"1.0alpha1","version_normalized":"1.0.0.0-alpha1","uid":124623,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/exchange-platform.1.0alpha1.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/exchange-platform\/","reference":"tags\/1.0alpha1"},"homepage":"https:\/\/wordpress.org\/plugins\/exchange-platform\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.0alpha2":{"name":"wpackagist-plugin\/exchange-platform","version":"1.0alpha2","version_normalized":"1.0.0.0-alpha2","uid":124624,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/exchange-platform.1.0alpha2.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/exchange-platform\/","reference":"tags\/1.0alpha2"},"homepage":"https:\/\/wordpress.org\/plugins\/exchange-platform\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.0alpha4":{"name":"wpackagist-plugin\/exchange-platform","version":"1.0alpha4","version_normalized":"1.0.0.0-alpha4","uid":124625,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/exchange-platform.1.0alpha4.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/exchange-platform\/","reference":"tags\/1.0alpha4"},"homepage":"https:\/\/wordpress.org\/plugins\/exchange-platform\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.0alpha5":{"name":"wpackagist-plugin\/exchange-platform","version":"1.0alpha5","version_normalized":"1.0.0.0-alpha5","uid":124626,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/exchange-platform.1.0alpha5.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/exchange-platform\/","reference":"tags\/1.0alpha5"},"homepage":"https:\/\/wordpress.org\/plugins\/exchange-platform\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"dev-trunk":{"name":"wpackagist-plugin\/exchange-platform","version":"dev-trunk","version_normalized":"9999999-dev","uid":124627,"time":"2013-06-04 20:42:07","dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/exchange-platform.zip?timestamp=1370378527"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/exchange-platform\/","reference":"trunk"},"homepage":"https:\/\/wordpress.org\/plugins\/exchange-platform\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"}}}}33kk/uso-archive100-1000 { "id": 141204, "info": { "name": "Accessibility+ Big Text and More", "description": "Increases Text size, Bloods most Text on Hover, and Highlights rows.\r\n
\r\nMy Patreon\r\nMy Other Styles", "additionalInfo": "Conflict with Userstyles Dark | aperopia: for text color of tables\r\nModifying extension menu", "format": "uso", "category": "global", "createdAt": "2017-04-12T00:24:01.000Z", "updatedAt": "2017-04-12T00:41:36.000Z", "license": "CC-BY-SA-4.0", "author": { "id": 412968, "name": "Zekrom_Vale", "homepage": "https://www.patreon.com/zekrom_vale" } }, "stats": { "installs": { "total": 74, "weekly": 0 } }, "screenshots": { "main": { "name": "141204_after.png", "archived": false } }, "discussions": { "stats": { "discussionsCount": 0, "commentsCount": 0 }, "data": [] }, "style": { "css": "[id$=title]\r\n :not([class$=description])\r\n :not([id$=description])\r\n :not(.gs-title),\r\n[class$=title]\r\n :not([class$=description])\r\n :not([id$=description])\r\n :not(.gs-title){\r\nfont-size:36px!important;\r\nline-height: 40px\r\n}\r\np,\r\ndiv,\r\ninput{\r\nfont-size:18px!important;\r\nline-height: 22px\r\n}\r\nlabel{\r\nfont-size:14px!important;\r\nline-height: 16px\r\n}\r\nh1{\r\nfont-size:30px!important;\r\nline-height: 32px\r\n}\r\nh2{\r\nfont-size:28px!important;\r\nline-height: 30px\r\n}\r\nh3{\r\nfont-size:26px!important;\r\nline-height: 28px\r\n}\r\nh4{\r\nfont-size:25px!important;\r\nline-height: 27px\r\n}\r\nh5{\r\nfont-size:24px!important;\r\nline-height: 26px\r\n}\r\nh6{\r\nfont-size:23px!important;\r\nline-height: 25px\r\n}\r\nimg:not(#STTBimg){\r\ntransform: scale(1.3,1.3);\r\nmargin:1% 1%\r\n}\r\nbutton{\r\ntransform: scale(1.1,1.1)\r\n}\r\np:hover,\r\nh1:hover,\r\nh2:hover,\r\nh3:hover,\r\nh4:hover,\r\nh5:hover,\r\nh6:hover,\r\ntd:hover,\r\nth:hover{\r\nfont-weight:bold!important\r\n}\r\ntr:hover,\r\nth:hover{\r\nbackground:yellow!important;\r\ncolor:black!important\r\n}\r\nspan:hover:empty,\r\ndiv:hover:empty{\r\nfont-weight:normal\r\n}\r\n::-moz-selection {\r\n color: red;\r\n background: yellow\r\n}\r\n::selection {\r\n color: red;\r\n background: yellow\r\n}" } }{ "name": "Location", "description": "Communicates the location of proposed or executed contract delivery." } sri/async/0.6.0.json {"async.js":"sha256-zOGXciWVddC7fTsBLYH/RnumlUn+l0cdBZGfBBq4Apg=","async.min.js":"sha256-mfaFVvqJOG/uShL67AV1ATj7dB2kqH3aB9t5VmBZ74s="}packages/holidays-lib/__tests__/hierarchy-calculator/data/valid/translations/holiday.json { "CHRISTIAN.PENTECOST": "fallback", "FD_FALLBACK": "fallback", "FW_FALLBACK": "fallback", "RBF_FALLBACK": "fallback", "RD_FALLBACK": "fallback", "RW_FALLBACK": "fallback" } { "name": "web-streams", "version": "0.8.0", "description": "Web Streams API reference implementation", "repository": { "type": "git", "url": "git+https://github.com/checle/web-streams.git" }, "scripts": { "prepublish": "npm run dist", "pretest": "bash ./update-web-platform-tests.sh", "test": "npm run lint && node --expose_gc run-tests.js | tap-spec && npm run wpt", "wpt": "node --expose_gc run-web-platform-tests.js", "lint": "eslint \"**/*.js\"", "coverage": "nyc --reporter=lcov npm test && opener coverage/lcov-report/index.html", "dist": "webpack --optimize-minimize index.js dist/web-streams.js" }, "main": "index.js", "browser": "dist/web-streams.js", "author": " <> (https://domenic.me/)", "contributors": [ " <> (https://domenic.me/)", " <>", " <>" ], "license": "(CC0-1.0 OR MIT)", "devDependencies": { "@types/whatwg-streams": "0.0.1", "babel-core": "^6.18.2", "babel-loader": "^6.2.7", "babel-preset-es2015": "^6.18.0", "eslint": "^3.2.2", "glob": "^7.0.3", "nyc": "^8.4.0", "opener": "^1.4.2", "tap-spec": "^4.1.1", "tape": "^4.5.1", "tape-catch": "^1.0.5", "webpack": "^1.13.3", "wpt-runner": "^2.1.1" }, "nyc": { "include": [ "**/lib/**/*.js" ] } } { "name": "Datadex", "build": { "dockerfile": "../Dockerfile", "context": ".." }, "settings": { "files.associations": { "*.sql": "jinja-sql" } }, "extensions": [ "ms-python.python", "ms-python.vscode-pylance", "editorconfig.editorconfig", "samuelcolvin.jinjahtml", "redhat.vscode-yaml", "innoverio.vscode-dbt-power-user", "github.vscode-pull-request-github", "eamodio.gitlens", "seatonjiang.gitmoji-vscode", "GitHub.copilot", "visualstudioexptteam.vscodeintellicode", "bastienboutonnet.vscode-dbt", "RandomFractalsInc.vscode-data-preview" ], "remoteUser": "vscode", "containerUser": "vscode" }CaioBrighenti/fake-news {"url": "www.inquisitr.com/4473276/blake-griffin-warned-by-l-a-clippers-bosses-kendall-jenner-not-welcome/", "text": " has been warned by his L.A. Clippers bosses to keep his rumored reality TV star girlfriend away from the team locker room.\n\nThe NBA star and \u2019s younger sister have been romantically linked now for about two weeks and if Clippers\u2019 brass gets its way, things won\u2019t progress much beyond that point.\n\n\u201cHe\u2019s been warned that his new relationship with Kendall better not affect his form this season,\u201d a source told Radar. \u201cHe\u2019s not telling anyone yet whether they\u2019re actually together, but Clippers bosses have told him that regardless, she\u2019s not welcome to crash their locker room and distract everyone with the Kardashian circus. They saw what happened to [former Clipper] when he was with Khloe [Kardashian] and they are not having that from their marquee player.\u201d\n\nThe Clippers only recently signed Griffin to a massive, five-year $173 million extension, officially making him the face of the franchise with having recently been dealt to the Houston Rockets.\n\nNow, management seems to be of the mind if they are going to get a solid return on that investment Griffin needs to end things with Jenner before the so-called \u201cKardashian Kurse\u201d takes effect.\n\nOver the years, the Kardashian/Jenner sisters have been romantically linked to a number of pro athletes, several of whom inexplicably began to suffer through rough patches in their careers right about the time they went public with one of the sisters.\n\nOdom\u2019s NBA career tragically fizzled amid rampant rumors he was hooked on drugs while still married to Khloe Kardashian.\n\nLater, Khloe was romantically linked to Houston Rockets star during a stretch that saw his Houston Rockets tumble from a No. 2 seed in the Western Conference all the way down to a No. 7 seed.\n\nDuring his MVP runner-up 2016-17 season, Harden admitted he came to feel he needed to \u201celiminate\u201d Kardashian from his life to get back to being himself.\n\n is rumored to be dating Blake Griffin. [Image by . Getty Images]\n\nHollywood Life also reported last season that had words with Cleveland Cavaliers teammate after his play suffered during his early days of romancing Khloe.\n\n was also infamously married to NBA veteran for all of 72 days and former NFL boyfriend never quite lived up to the super-stardom that was projected for him coming out of USC.\n\nDoc Rivers leads the .A. Clippers. [Images by /Getty Images]\n\nFinally, , Kim\u2019s current husband, was recently hospitalized in the wake of suffering a \u201cmental breakdown\u201d and , the father of older sister Kourtney\u2019s three young children, has long been dogged by drug and alcohol problems.\n\n[Featured Image by Harry How/Getty Images]", "images": ["https://cdn.inquisitr.com/wp-content/uploads/2017/09/Blake-Griffin-.jpg", "https://www.inquisitr.com/wp-content/uploads/2019/07/Audreyana-Michelle-300x170.jpg", "https://www.facebook.com/tr?id=1472001486461948&ev=PageView&noscript=1", "http://0.gravatar.com/avatar/09f1dd0b83489a375f1c46ebebbe6e08?s=250&d=mm&r=g", "https://www.facebook.com/tr?id=478801082325642&ev=PageView&noscript=1", "http://cdn.inquisitr.com/assets/ee54ce/dist/images/logos/logo_header_no_tagline.png", "http://cdn.inquisitr.com/assets/ee54ce/dist/images/logos/favicon.png", "https://cdn.inquisitr.com/wp-content/uploads/2017/09/Doc-Rivers-.jpg", "https://cdn.inquisitr.com/wp-content/uploads/2017/09/Kendall-Jenner-.jpg"], "top_img": "http://cdn.inquisitr.com/assets/ee54ce/dist/images/logos/favicon.png", "keywords": [], "authors": [""], "canonical_link": "https://www.inquisitr.com/4473276/blake-griffin-warned-by-l-a-clippers-bosses-kendall-jenner-not-welcome/", "title": "Blake Griffin Warned By L.A. Clippers Bosses \u2018Kendall Jenner Not Welcome\u2019", "meta_data": {"msapplication-TileImage": "//cdn.inquisitr.com/assets/ee54ce/dist/images/logos/inq-win.png", "msapplication-TileColor": "#ed1c24", "fb": {"pages": 71179904752, "app_id": 164157850314499}, "y_key": "f282393706a5fd85", "viewport": "width=980", "p": {"domain_verify": "27de048ce20584ff72683f9a7e89cbc1"}, "yandex-verification": "01101353387c3168"}, "movies": [], "publish_date": null, "source": "http://www.inquisitr.com", "summary": ""}{ "published_type": "log GI50 (M)", "standard_units": "nM", "standard_type": "GI50", "standard_relation": null, "bao_endpoint": "BAO_0000189", "published_units": null, "data_validity_comment": null, "activity_comment": "inactive", "target_pref_name": "Malme-3M", "bao_label": "cell-based format", "pchembl_value": null, "parent_molecule_chembl_id": "CHEMBL1985413", "assay_chembl_id": "CHEMBL1964025", "document_chembl_id": "CHEMBL1201862", "target_tax_id": "9606", "activity_id": 8585335, "target_chembl_id": "CHEMBL614021", "standard_flag": true, "molecule_chembl_id": "CHEMBL1985413", "_metadata": { "target_data": { "target_type": "CELL-LINE" }, "activity_generated": { "short_data_validity_comment": null }, "parent_molecule_data": { "full_mwt": "1499.04", "alogp": null, "num_ro5_violations": null, "max_phase": 0, "image_file": "metalContaining.svg", "compound_key": "SID504651" }, "organism_taxonomy": { "l1": "Eukaryotes", "l4_synonyms": [ "Homo sapiens Linnaeus, 1758", "Homo sapiens", "human", "hum", "man" ], "l2": "Mammalia", "l3": "Primates", "oc_id": 7, "tax_id": 9606 }, "assay_data": { "assay_subcellular_fraction": null, "assay_cell_type": "Malme-3M", "assay_organism": null, "cell_chembl_id": "CHEMBL3307507", "tissue_chembl_id": null, "type_label": "F - Functional", "assay_tissue": null }, "source": { "src_description": "PubChem BioAssays", "src_id": 7, "src_short_name": "PUBCHEM_BIOASSAY" }, "protein_classification": [] }, "qudt_units": "http://www.openphacts.org/units/Nanomolar", "standard_value": "15100.8", "assay_type": "F", "document_year": null, "src_id": 7, "target_organism": "Homo sapiens", "potential_duplicate": false, "canonical_smiles": null, "record_id": 1602562, "published_relation": null, "ligand_efficiency": null, "document_journal": null, "uo_units": "UO_0000065", "assay_description": "PUBCHEM_BIOASSAY: NCI human tumor cell line growth inhibition assay. Data for the MALME-3M Melanoma cell line. (Class of assay: confirmatory) ", "bao_format": "BAO_0000219", "published_value": "-4.821", "_score": 11.93061 }jwzimmer/tv-tropes {"OminousHairLoss": ["PrematurelyBald", "SlowTransformation", "VirusVictimSymptoms", "BaldOfEvil", "TraumaticHaircut", "ImportantHaircut", "HairTodayGoneTomorrow", "DeadlyNosebleed", "IncurableCoughOfDeath", "HeadacheOfDoom", "KillingIntent", "WorldWarIII", "BlindedByTheLight", "GroundhogDayLoop", "DownerEnding", "ShootTheDog", "BigBad", "BodyHorror", "Fingore", "TheToothHurts", "GroinAttack", "AlphaBitch", "TheDogBitesBack", "BigBad", "TamperingWithFoodAndDrink", "TestedOnHumans", "BodyHorror", "BetterToDieThanBeKilled", "BathSuicide", "TraumaticHaircut", "SoapOperaDisease", "DoesThisRemindYouOfAnything", "BodyHorror", "AddledAddict", "LotusEaterMachine", "BigBad", "BaldOfAwesome", "BaldOfEvil", "VillainProtagonist", "DoomedMoralVictor", "ScienceMarchesOn", "TooDumbToLive", "TheAssimilator", "BaldOfAwesome", "ArtisticLicensePhysics", "BackFromTheDead", "NotMakingThisUpDisclaimer", "RapidAging", "TheAssimilator", "BaldOfAwesome", "ArtisticLicensePhysics", "BackFromTheDead", "NotMakingThisUpDisclaimer", "RapidAging", "AIIsACrapshoot", "BlatantLies", "TheMorlocks", "TheGrotesque", "Metamorphosis", "BodyHorror", "ILoveNuclearPower", "RealityEnsues", "BodyHorror", "ILoveNuclearPower", "RealityEnsues"]}[ {"name":"", "url":"https://github.com/LeoNatan", "login":"LeoNatan", "total_contributions":811, "avatar_url":"https://avatars3.githubusercontent.com/u/2270433?v=4"}, {"name":"", "url":"https://github.com/artald", "login":"artald", "total_contributions":21, "avatar_url":"https://avatars1.githubusercontent.com/u/8604256?v=4"}, {"name":"aboelbisher", "url":"https://github.com/aboelbisher", "login":"aboelbisher", "total_contributions":5, "avatar_url":"https://avatars3.githubusercontent.com/u/5005499?v=4"}, {"name":"", "url":"https://github.com/andykog", "login":"andykog", "total_contributions":1, "avatar_url":"https://avatars2.githubusercontent.com/u/6760207?v=4"} ] 0 { "id": "clinicDashboard", "name": "Clinic Dashboard", "baseRoute": "clinic-dashboard", "routeParameter": "locationUuid", "nonProgramRoutes": [ { "url": "daily-schedule", "label": "Daily Schedule", "icon": "fa fa-calendar-o", "isSideBarOpen": false }, { "url": "monthly-schedule", "label": "Monthly Schedule", "icon": "fa fa-calendar", "isSideBarOpen": false }, { "url": "hiv/hiv-comparative-chart", "label": "Clinical Visualization", "icon": "fa fa-line-chart", "isSideBarOpen": false }, { "url": "patient-status-change-visualization", "label": "Patient Care Status", "icon": "fa fa-bar-chart" }, { "url": "clinic-lab-orders", "label": "Lab Orders", "icon": "icon-i-pathology", "isSideBarOpen": false }, { "url": "defaulter-list", "label": "Defaulter List", "icon": "fa fa-list", "isSideBarOpen": false }, { "url": "hiv/moh-731-report", "label": "MOH 731 Reports", "icon": "glyphicon glyphicon-equalizer", "isSideBarOpen": false }, { "url": "hiv/hiv-summary-indicator-report", "label": "HIV Summary Indicators", "icon": "fa fa-file-pdf-o", "isSideBarOpen": false }, { "url": "hiv/patients-requiring-vl", "label": "Patients Requiring VL", "icon": "icon-i-laboratory", "isSideBarOpen": false } ], "programs": [] }{"name":", Max-Josef-Schule","id":"BY-4506","address":"Max-Josef-Straße 3 92224 Amberg","school_type":"Grundschule","fax":"09621 496538","phone":"09621 496536","website":null,"state":"BY","programs":{"programs":[]},"full_time_school":false,"lon":11.850697,"lat":49.448052} 33kk/uso-archive { "id": 180037, "name": "Google Photos: Fix transparency", "description": "Adds a checkerboard pattern background to images (both in the gallery and in single-image view) so transparent images are displayed correctly instead of black-on-black, etc.", "user": { "id": 890035, "name": "", "email": "redacted", "paypal_email": null, "homepage": null, "about": null, "license": null }, "updated": "2020-02-10T14:29:00.000Z", "weekly_install_count": 0, "total_install_count": 5, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/180037_after.png?r=1593763519", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": [ "https://userstyles.org/style_screenshots/180037_additional_35513.jpeg?r=1593763519", "https://userstyles.org/style_screenshots/180037_additional_35514.jpeg?r=1593763519", "https://userstyles.org/style_screenshots/180037_additional_35515.png?r=1593763519" ], "license": "ccbysa", "created": "2020-02-10T14:29:00.000Z", "category": "site", "raw_subcategory": "google", "subcategory": "google", "additional_info": null, "style_tags": [], "css": "@-moz-document regexp(\"https://photos.google.com/(.*/)?photo/.*\") {\r\ndiv.ukWswc, .XkWAb-SMWX4b, div.kuceTe > div:nth-child(3) {\r\n background: #eee url('data:image/svg+xml, ');\r\n background-size: 24px 24px;\r\n background-repeat: repeat !important;\r\n}\r\n\r\n.XkWAb-SMWX4b {\r\n background-size: 12px 12px;\r\n}\r\n}\r\n\r\n@-moz-document domain(\"photos.google.com\") {\r\n.p137Zd {\r\n background: transparent;\r\n}\r\n\r\n.rtIMgb {\r\n background: #eee url('data:image/svg+xml, ');\r\n background-size: 12px 12px;\r\n background-repeat: repeat !important;\r\n}\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/180037/google-photos-fix-transparency.user.js", "style_settings": [] }ekristen/modella-default { "name": "modella-default", "version": "1.0.0", "description": "Allows defaultValue to be other than a static string", "main": "index.js", "scripts": { "test": "node test.js" }, "author": " <>", "license": "MIT", "devDependencies": { "modella": "^0.2.14", "tape": "^4.0.0" } } {"captain": {"href": "https://crest-tq.eveonline.com/characters/1828503853/", "name": "", "icon": {"href": "http://imageserver.eveonline.com/Character/1828503853_128.jpg"}}, "name": "", "iskKilled": 0, "iskKilled_str": "0", "shipsKilled_str": "0", "banFrequency": [], "members": {"href": "https://crest-tq.eveonline.com/tournaments/teams/220/members/"}, "pilots": [{"href": "https://crest-tq.eveonline.com/characters/90389801/", "name": "", "icon": {"href": "http://imageserver.eveonline.com/Character/90389801_128.jpg"}}, {"href": "https://crest-tq.eveonline.com/characters/213794258/", "name": "RiseofFilth", "icon": {"href": "http://imageserver.eveonline.com/Character/213794258_128.jpg"}}, {"href": "https://crest-tq.eveonline.com/characters/1828503853/", "name": "", "icon": {"href": "http://imageserver.eveonline.com/Character/1828503853_128.jpg"}}, {"href": "https://crest-tq.eveonline.com/characters/91089809/", "name": "", "icon": {"href": "http://imageserver.eveonline.com/Character/91089809_128.jpg"}}], "banFrequencyAgainst": [], "shipsKilled": 0}{"react-dom-server.js":"sha256-bOanWd0vgPL,"react-dom-server.min.js":"sha256-hUbHKyCF,"react-dom.js":","react-dom.min.js":"sha2}{ "translations": { "No tags given" : "Nenhuma etiqueta fornecida", "Tag(s) could not be found: %s" : "Etiqueta(s) não encontradas: %s", "At least one of the given tags is invalid" : "Pelo menos uma das etiquetas fornecidas é inválida", "Automated tagging" : "Etiquetamento automático", "Automated tagging of files" : "Etiquetagem automática de arquivos", "File is changed" : "O arquivo foi alterado", "Automatically tag files based on factors such as filetype, user group memberships, time and more." : "Etiquetar arquivos automaticamente baseado em fatores como tipo de arquivo, membros de grupo de usuários, horário e mais.", "Each rule group consists of one or more rules. A request matches a group if all rules evaluate to true. On uploading a file all defined groups are evaluated and when matching, the given collaborative tags are assigned to the file." : "Cada grupo de regras consiste em uma ou mais regras. Uma solicitação corresponderá a um grupo se todas as regras avaliadas forem verdadeiras. Ao enviar um arquivo, todos os grupos definidos são avaliados e quando encontrar correspondência, as etiquetas colaborativas fornecidas serão atribuídas ao arquivo. ", "Files automated tagging" : "Etiquetamento automático de arquivos", "Automatically assign collaborative tags to files based on conditions" : "Atribuir etiquetas colaborativas automaticamente aos arquivos, com base em condições", "An app for Nextcloud that automatically assigns tags to newly uploaded files based on some conditions.\n\nThe tags can later be used to control retention, file access, automatic script execution and more.\n\n## How it works\nTo define tags, administrators can create and manage a set of rule groups. Each rule group consists of one or more rules combined through operators. Rules can include criteria like file type, size, time and more. A request matches a group if all rules evaluate to true. On uploading a file all defined groups are evaluated and when matching, the given tags are assigned to the file." : "Um aplicativo para o Nextcloud que atribui automaticamente tags a arquivos recém-carregados com base em algumas condições.\n\nAs tags podem ser usadas posteriormente para controlar a retenção, o acesso a arquivos, a execução automática de scripts e muito mais.\n\n## Como funciona\nPara definir tags, os administradores podem criar e gerenciar um conjunto de grupos de regras. Cada grupo de regras consiste em uma ou mais regras combinadas por meio de operadores. As regras podem incluir critérios como tipo de arquivo, tamanho, tempo e mais. Uma solicitação corresponde a um grupo se todas as regras forem avaliadas como verdadeiras. Ao fazer o upload de um arquivo, todos os grupos definidos são avaliados e quando coincidentes, as tags fornecidas são atribuídas ao arquivo.", "Tag a file" : "Etiquetar um arquivo", "Tags to assign…" : "Etiquetas para endereçar..." },"pluralForm" :"nplurals=2; plural=(n > 1);" }dataset/khvn/19531114.json version https://git-lfs.github.com/spec/v1 oid sha256:2201c70cccbb478e0b52c49c6ad82e5d03694bf8df8ed9054370ae43e5e2d3cc size 8689 Menci/hexo-theme-journal { "name": "hexo-theme-journal", "version": "2.0.3", "description": "Moments got piled up.", "main": "index.js", "scripts": { "test": "yarn build", "build:css": "node-sass source/scss/journal.scss --output-style compressed -o source/css && postcss source/css/journal.css -u autoprefixer -r --no-map", "build:js": "swc -q source/js/journal.js | terser --compress --mangle --toplevel > source/js/journal.min.js", "build": "yarn build:css && yarn build:js" }, "repository": { "type": "git", "url": "https://github.com/SumiMakito/hexo-theme-journal" }, "keywords": [ "hexo", "theme", "journal" ], "author": "Makito <> (https://makito.cc/)", "license": "Apache-2.0", "bugs": { "url": "https://github.com/SumiMakito/hexo-theme-journal/issues" }, "homepage": "https://github.com/SumiMakito/hexo-theme-journal/README.md", "devDependencies": { "@swc/cli": "^0.1.55", "@swc/core": "^1.2.127", "autoprefixer": "^10.4.2", "node-sass": "^7.0.1", "postcss": "^8.4.5", "postcss-cli": "^9.1.0", "scss-compile": "^0.1.7", "terser": "^5.10.0" } } config.json {"key":{"filename":"openshift_rsa","name":"openshift-deployment_Hennings-MacBook-Pro.fritz.box","comment":"created by openshift-deployment","password":""}}{ "schema_version": "1.2.0", "id": "GHSA-6c69-f4qc-5997", "modified": "2022-05-13T01:50:31Z", "published": "2022-05-13T01:50:31Z", "aliases": [ "CVE-2018-17188" ], "details": "Prior to CouchDB version 2.3.0, CouchDB allowed for runtime-configuration of key components of the database. In some cases, this lead to vulnerabilities where CouchDB admin users could access the underlying operating system as the CouchDB user. Together with other vulnerabilities, it allowed full system entry for unauthenticated users. Rather than waiting for new vulnerabilities to be discovered, and fixing them as they come up, the CouchDB development team decided to make changes to avoid this entire class of vulnerabilities.", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.0/AV:N/AC:L/PR:H/UI:N/S:U/C:H/I:H/A:H" } ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2018-17188" }, { "type": "WEB", "url": "https://blog.couchdb.org/2018/12/17/cve-2018-17188/" }, { "type": "WEB", "url": "https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/S5FPHVVU5KMRFKQTJPAM3TBGC7LKCWQS/" }, { "type": "WEB", "url": "https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/X3JOUCX7LHDV4YWZDQNXT5NTKKRANZQW/" }, { "type": "WEB", "url": "https://support.hpe.com/hpsc/doc/public/display?docLocale=en_US&docId=emr_na-hpesbmu03935en_us" } ], "database_specific": { "cwe_ids": [ ], "severity": "HIGH", "github_reviewed": false } }Mattlk13/dependent-packages ["browsermob-proxy-runner","dashku-web","delegator","nodejs-selenium-runner","selenium-launcher","serc.js","skate.js"]{"derivation": "from a derivative of G911 (\u03b2\u03b1\u03c0\u03c4\u03af\u03b6\u03c9);", "kjv_def": "Baptist, baptize, wash", "lemma": "\u03b2\u03b1\u03c0\u03c4\u03af\u03b6\u03c9", "frequency": 87, "strongs_def": " to immerse, submerge; to make whelmed (i.e. fully wet); used only (in the New Testament) of ceremonial ablution, especially (technically) of the ordinance of Christian baptism", "outline": "

  1. to dip repeatedly, to immerse, to submerge (of vessels sunk)
  2. to cleanse by dipping or submerging, to wash, to make clean with water, to wash one's self, bathe
  3. to overwhelm
"}{ "name": "@relayd/monorepo-root", "description": "Monorepo root project for the relay of Chainlink Oracles data to Secret Network contracts", "version": "0.1.0", "author": " <>", "license": "MIT", "private": true, "workspaces": [ "common", "eth-client", "scrt-client", "feed-handler", "node-aio" ], "main": "dist/index.js", "types": "types/index.d.ts", "files": [ "dist/", "types/", "*.md", "!*.spec.*", "!**/testdata/" ], "repository": { "type": "git", "url": "https://github.com/ja88a/Chainlink-eth-SecretNetwork" }, "publishConfig": { "access": "public" }, "scripts": { "build": "lerna run build", "clean": "shx rm -rf ./dist ./*.tsbuildinfo", "reset": "yarn clean && shx rm -rf ./node_modules ./yarn.lock ./package-lock.json", "docs": "lerna run docs", "format": "lerna run format", "format-text": "prettier --write --prose-wrap always --print-width 80 \"./*.md\" \"./docs/**/*.md\" \"./scripts/**/*.{json,md}\" && lerna run format-text", "lint": "lerna run lint", "lint:fix": "lerna run lint-fix", "setup": "yarn wsrun -mre -t setup && yarn wsrun -mre -t setup", "test:unit": "lerna run test:unit", "test:integration": "lerna run test:integration", "test:example": "yarn workspace @relayd/eth-client test", "test:example-start-server": "node ./helpers/server.js", "test": "lerna run test" }, "devDependencies": { "@tsconfig/node12": "^1.0.7", "@types/eslint": "^7.2.7", "eslint": "^7.2.0", "eslint-config-prettier": "^6.11.0", "eslint-config-standard": "^14.1.1", "eslint-plugin-import": "^2.22.0", "eslint-plugin-node": "^11.1.0", "eslint-plugin-prettier": "^3.1.4", "eslint-plugin-promise": "^4.2.1", "eslint-plugin-standard": "^4.0.1", "lerna": "^4.0.0", "prettier": "^2.0.5", "shx": "^0.3.3", "ts-node": "^8.10.2", "typedoc": "^0.20.36", "typescript": "^4.2.4", "webpack": "^5.37.1", "wsrun": "^5.2.4" } } { "name" : "Resize Dialog", "version" : "v1.0.1", "description" : "Resize Dialog resizes an Apex modal dialog to fit its contents", "keywords" : ["dialog","resize","dynamic action"], "homepage" : "https://github.com/dickdral/apex-resize_dialog", "bugs" : { "url" : "https://github.com/dickdral/apex-resize_dialog/issues", "email" : "" }, "license" : "MIT", "author" : { "name" : "", "email" : "", "url" : "http://www.detora.nl", "twitter" : "dickdral", "donationUrl" : "https://www.paypal.me/dickdral/3" }, "repository" : { "type" : "git", "url" : "https://github.com/dickdral/apex-resize_dialog.git" }, "oracle" : { "versions" : ["172.16.31.10", "192.168.127.12"], "apex" : { "versions" : ["5.0.0","5.1.0"], "plugin" : { "internalName" : "NL.DETORA.APEX.RESIZE_DIALOG", "type" : "dynamic action", "demo" : "http://www.speech2form.com/ords/f?p=OPFG:RESIZE_DIALOG", "previewImage" : "https://raw.githubusercontent.com/dickdral/apex-resize_dialog/master/resize_dialog_example.gif" } } } } { "name": "wisdom-npm-runner-maven-plugin", "version":"0.7-SNAPSHOT", "description": "Executes any NPM as part of the Wisdom app build process and watch mode.", "repository": { "type": "git", "url": "https://github.com/wisdom-framework/wisdom/tree/master/extensions/wisdom-npm-runner-maven-plugin" }, "author": "The Wisdom Team", "license": { "type": "Apache", "url": "https://github.com/wisdom-framework/wisdom/blob/master/extensions/wisdom-npm-runner-maven-plugin/LICENSE.txt" }, "homepage": "https://github.com/wisdom-framework/wisdom/tree/master/extensions/wisdom-npm-runner-maven-plugin", "keywords": [ "node", "npm", "javascript" ] } http://data.doremus.org/expression/8b416efb-352f-3a49-9c91-5af814d861e7 http://data.doremus.org/expression/ecc5bb59-5ac7-392c-aa77-a8c72c274982 http://data.doremus.org/expression/ca430dda-3ec6-3c11-b9fa-0ca43fc33ce3 http://data.doremus.org/expression/bafeee1b-3cc9-3d78-8992-31363e03d998 http://data.doremus.org/expression/ff7c909c-17d0-34b7-a502-c3052fe35f74 http://data.doremus.org/expression/297e4357-0c45-301a-b475-944dc99ba5b9 http://data.doremus.org/expression/d8db3914-58ca-31e6-b48d-3ef64a2432ba http://data.doremus.org/expression/31031db0-db39-3a01-bd62-fa3ee9741493 http://data.doremus.org/expression/221d4737-e547-3745-b06f-85a1f30987a8 http://data.doremus.org/expression/464d8dda-34e3-3ef5-90fd-5a5de033a040{ "directions": [ "Place the butter lettuce in a salad bowl; add the pear, dried cherries, wasabi peas, sesame seeds, and poppy seed dressing. Toss to coat." ], "ingredients": [ "1 head butter lettuce - rinsed, and torn", "1 large pear, cored and sliced", "1/2 cup dried cherries", "1/2 cup wasabi peas", "2 teaspoons toasted sesame seeds (optional)", "1/4 cup poppy seed dressing" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Wasabi Secret Salad", "url": "http://allrecipes.com/recipe/160307/wasabi-secret-salad/" } {"type":"Feature","id":"way/207232349","properties":{"amenity":"marketplace","name":"Place du Marché des Résidences","opening_hours":"We 07:00-12:00","opening_hours:covid19":"off","source":"Bing;survey","id":"way/207232349"},"geometry":{"type":"Point","coordinates":[6.8437639,47.6289888]}}ressources/AustralianAddresses/address78.json {"DocumentId": "http://GreenKirlinandSchmeler.com", "StreetName": "Banksia Place", "Longitude": "150.5077919", "Latitude": "-34.081961139999997", "City": "Oakdale", "Postcode": "2570", "State": "New South Wales", "Phone": "+62 912 954 5377", "BusinessName": "", "FileType": ".txt", "Department": "Outdoors", "Email": "", "Price": 523020.58, "Ratings": 1.1, "MarkerId": "7239aw231y3wn5"}[ { "Id": "244851", "ThreadId": "71756", "Html": "

I set up a data grid to edit the children of a parent i.e.:

\r\n

<tk:DataGrid ItemsSource="{Binding Parent.Children}"  CanUserDeleteRows="True">

\r\n

I'm using ADO.NET Entity Framework to an SQL Lite database.

\r\n

When I delete a record and then go Entity.SaveChanges() I get::

\r\n

A relationship is being added or deleted from an AssociationSet 'FK_Children_0'. With cardinality constraints, a corresponding 'Children' must also be added or deleted.

\r\n

Which makes me suspect the the datagrid is deleting the Parent record insead of the Child record.  Or am I missing something?

", "PostedDate": "2009-10-12T15:52:25.01-07:00", "UserRole": null, "MarkedAsAnswerDate": null } ]0 {"ast":null,"code":"import { jsxDEV as _jsxDEV } from \"react/jsx-dev-runtime\";\nvar _jsxFileName = \"/Users/dohome/dohome/tail-kit/components/kit/components/pagesection/index.tsx\";\nimport React from 'react';\nimport SectionDesc from '../../../site/section/SectionDesc';\n\nconst Pagesection = () => {\n const pageSections = [{\n title: 'CTAs',\n items: 15,\n img: 'images/sections/cta.png',\n link: '/components/cta'\n }, {\n title: 'Testimonials',\n items: 7,\n img: 'images/sections/testimonial.png',\n link: '/components/testimonial'\n }, {\n title: 'Profiles',\n items: 8,\n img: 'images/sections/profile.png',\n link: '/components/profile'\n }, {\n title: 'Teams',\n items: 6,\n img: 'images/sections/team.png',\n link: '/components/team'\n }, {\n title: 'FAQs',\n items: 2,\n img: 'images/sections/faq.png',\n link: '/components/faq'\n }, {\n title: 'Features',\n items: 5,\n img: 'images/sections/feature.png',\n link: '/components/feature'\n }, {\n title: 'Blogs',\n items: 5,\n img: 'images/sections/blog.png',\n link: '/components/blog'\n }];\n return /*#__PURE__*/_jsxDEV(SectionDesc, {\n id: \"pagesection\",\n items: pageSections,\n title: \"Page sections\"\n }, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 50,\n columnNumber: 12\n }, this);\n};\n\nexport default Pagesection;","map":{"version":3,"sources":["/Users/dohome/dohome/tail-kit/components/kit/components/pagesection/index.tsx"],"names":["React","SectionDesc","Pagesection","pageSections","title","items","img","link"],"mappings":";;AAAA,OAAOA,KAAP,MAA0B,OAA1B;AACA,OAAOC,WAAP,MAAwB,mCAAxB;;AAEA,MAAMC,WAAe,GAAG,MAAM;AAC1B,QAAMC,YAAY,GAAG,CACjB;AACIC,IAAAA,KAAK,EAAE,MADX;AAEIC,IAAAA,KAAK,EAAE,EAFX;AAGIC,IAAAA,GAAG,EAAE,yBAHT;AAIIC,IAAAA,IAAI,EAAE;AAJV,GADiB,EAOjB;AACIH,IAAAA,KAAK,EAAE,cADX;AAEIC,IAAAA,KAAK,EAAE,CAFX;AAGIC,IAAAA,GAAG,EAAE,iCAHT;AAIIC,IAAAA,IAAI,EAAE;AAJV,GAPiB,EAajB;AACIH,IAAAA,KAAK,EAAE,UADX;AAEIC,IAAAA,KAAK,EAAE,CAFX;AAGIC,IAAAA,GAAG,EAAE,6BAHT;AAIIC,IAAAA,IAAI,EAAE;AAJV,GAbiB,EAmBjB;AACIH,IAAAA,KAAK,EAAE,OADX;AAEIC,IAAAA,KAAK,EAAE,CAFX;AAGIC,IAAAA,GAAG,EAAE,0BAHT;AAIIC,IAAAA,IAAI,EAAE;AAJV,GAnBiB,EAyBjB;AACIH,IAAAA,KAAK,EAAE,MADX;AAEIC,IAAAA,KAAK,EAAE,CAFX;AAGIC,IAAAA,GAAG,EAAE,yBAHT;AAIIC,IAAAA,IAAI,EAAE;AAJV,GAzBiB,EA+BjB;AACIH,IAAAA,KAAK,EAAE,UADX;AAEIC,IAAAA,KAAK,EAAE,CAFX;AAGIC,IAAAA,GAAG,EAAE,6BAHT;AAIIC,IAAAA,IAAI,EAAE;AAJV,GA/BiB,EAqCjB;AACIH,IAAAA,KAAK,EAAE,OADX;AAEIC,IAAAA,KAAK,EAAE,CAFX;AAGIC,IAAAA,GAAG,EAAE,0BAHT;AAIIC,IAAAA,IAAI,EAAE;AAJV,GArCiB,CAArB;AA6CA,sBAAO,QAAC,WAAD;AAAa,IAAA,EAAE,EAAC,aAAhB;AAA8B,IAAA,KAAK,EAAEJ,YAArC;AAAmD,IAAA,KAAK,EAAC;AAAzD;AAAA;AAAA;AAAA;AAAA,UAAP;AACH,CA/CD;;AAiDA,eAAeD,WAAf","sourcesContent":["import React, { FC } from 'react';\nimport SectionDesc from '../../../site/section/SectionDesc';\n\nconst Pagesection: FC = () => {\n const pageSections = [\n {\n title: 'CTAs',\n items: 15,\n img: 'images/sections/cta.png',\n link: '/components/cta',\n },\n {\n title: 'Testimonials',\n items: 7,\n img: 'images/sections/testimonial.png',\n link: '/components/testimonial',\n },\n {\n title: 'Profiles',\n items: 8,\n img: 'images/sections/profile.png',\n link: '/components/profile',\n },\n {\n title: 'Teams',\n items: 6,\n img: 'images/sections/team.png',\n link: '/components/team',\n },\n {\n title: 'FAQs',\n items: 2,\n img: 'images/sections/faq.png',\n link: '/components/faq',\n },\n {\n title: 'Features',\n items: 5,\n img: 'images/sections/feature.png',\n link: '/components/feature',\n },\n {\n title: 'Blogs',\n items: 5,\n img: 'images/sections/blog.png',\n link: '/components/blog',\n },\n ];\n\n return ;\n};\n\nexport default Pagesection;\n"]},"metadata":{},"sourceType":"module"}showorhide/testconnection { "name": "AzureSqlDatabase1", "type": "Microsoft.DataFactory/factories/linkedservices", "properties": { "annotations": [], "type": "AzureSqlDatabase", "typeProperties": { "connectionString": "integrated security=False;encrypt=True;connection timeout=30;data source=stevesqldatabase.database.windows.net;initial catalog=steve", "servicePrincipalId": "", "tenant": "e4c9ab4e-bd27-40d5-8459-230ba2a757fb", "encryptedCredential": " } } }[{"win_team_name":"海の幸","win_team_mvp":"VOLF"},{"win_team_name":"山の幸","win_team_mvp":"かちゃん.It"},{"win_team_name":"山の幸","win_team_mvp":"トキ"},{"win_team_name":"海の幸","win_team_mvp":"フェノメナール"},{"win_team_name":"海の幸","win_team_mvp":"ゆうた"},{"win_team_name":"海の幸","win_team_mvp":"やるきだけはあります"},{"win_team_name":"海の幸","win_team_mvp":"しろれん"},{"win_team_name":"海の幸","win_team_mvp":"クー。"},{"win_team_name":"山の幸","win_team_mvp":"エドナ KFS"},{"win_team_name":"海の幸","win_team_mvp":"K"},{"win_team_name":"海の幸","win_team_mvp":"マサキ"},{"win_team_name":"海の幸","win_team_mvp":"FM"},{"win_team_name":"山の幸","win_team_mvp":"クロサキ"},{"win_team_name":"海の幸","win_team_mvp":"あおやま"},{"win_team_name":"海の幸","win_team_mvp":"けんいち"},{"win_team_name":"山の幸","win_team_mvp":"けむり"},{"win_team_name":"山の幸","win_team_mvp":"むのう"},{"win_team_name":"山の幸","win_team_mvp":"bemo(イカ)"},{"win_team_name":"山の幸","win_team_mvp":"aki"},{"win_team_name":"海の幸","win_team_mvp":"AYA(*^^*)"},{"win_team_name":"海の幸","win_team_mvp":"はるな"},{"win_team_name":"山の幸","win_team_mvp":"はるこー"},{"win_team_name":"山の幸","win_team_mvp":"な"},{"win_team_name":"海の幸","win_team_mvp":"ぱんふ"},{"win_team_name":"海の幸","win_team_mvp":"ハリアップ\u0026ヒロト"},{"win_team_name":"海の幸","win_team_mvp":"ぺん"},{"win_team_name":"山の幸","win_team_mvp":"しょうた"},{"win_team_name":"山の幸","win_team_mvp":"ニジンスキー"},{"win_team_name":"海の幸","win_team_mvp":"ごましお"},{"win_team_name":"海の幸","win_team_mvp":"せんぎょマンすずき"},{"win_team_name":"海の幸","win_team_mvp":"けんすけ"},{"win_team_name":"海の幸","win_team_mvp":"ガラな"},{"win_team_name":"山の幸","win_team_mvp":"しらたき"},{"win_team_name":"山の幸","win_team_mvp":"ミワ"},{"win_team_name":"海の幸","win_team_mvp":"ちょぴはる"},{"win_team_name":"海の幸","win_team_mvp":"ういち"},{"win_team_name":"山の幸","win_team_mvp":"ぶっちー"},{"win_team_name":"海の幸","win_team_mvp":"TXH"},{"win_team_name":"海の幸","win_team_mvp":"†つくもっぎょ†"},{"win_team_name":"海の幸","win_team_mvp":"れん"},{"win_team_name":"海の幸","win_team_mvp":"YDK!なお"},{"win_team_name":"山の幸","win_team_mvp":"うらブリ_かふぇ"},{"win_team_name":"山の幸","win_team_mvp":"そら"},{"win_team_name":"山の幸","win_team_mvp":"よっしー"},{"win_team_name":"海の幸","win_team_mvp":"そうき"},{"win_team_name":"山の幸","win_team_mvp":"びっとωsbn"},{"win_team_name":"海の幸","win_team_mvp":"ゆーぢ"},{"win_team_name":"山の幸","win_team_mvp":"ルクタムス"},{"win_team_name":"山の幸","win_team_mvp":"ぞの"},{"win_team_name":"海の幸","win_team_mvp":"βlue"},{"win_team_name":"海の幸","win_team_mvp":"タピオカきぞく"},{"win_team_name":"山の幸","win_team_mvp":"ゆったん"},{"win_team_name":"海の幸","win_team_mvp":"ようた☆しょうた"},{"win_team_name":"山の幸","win_team_mvp":"すーちゃん"},{"win_team_name":"海の幸","win_team_mvp":"やきいもあいす"},{"win_team_name":"山の幸","win_team_mvp":"とちぎkeen"},{"win_team_name":"海の幸","win_team_mvp":"タマノクス"},{"win_team_name":"山の幸","win_team_mvp":"しりゅ"},{"win_team_name":"海の幸","win_team_mvp":"ceu"},{"win_team_name":"山の幸","win_team_mvp":"ぶりお"},{"win_team_name":"山の幸","win_team_mvp":"めがねなおしました☆"},{"win_team_name":"海の幸","win_team_mvp":"おけ"},{"win_team_name":"海の幸","win_team_mvp":"みうら"},{"win_team_name":"山の幸","win_team_mvp":"アッキー"},{"win_team_name":"山の幸","win_team_mvp":"MM∴Cross"},{"win_team_name":"山の幸","win_team_mvp":"ヨーデ"},{"win_team_name":"海の幸","win_team_mvp":"L3DS きさたく"},{"win_team_name":"山の幸","win_team_mvp":"きかんしたのびた★"},{"win_team_name":"山の幸","win_team_mvp":"はすみ"},{"win_team_name":"海の幸","win_team_mvp":"たいら"},{"win_team_name":"海の幸","win_team_mvp":"まーき"},{"win_team_name":"山の幸","win_team_mvp":"きらぼし!!"},{"win_team_name":"海の幸","win_team_mvp":"あき"},{"win_team_name":"海の幸","win_team_mvp":"しず"},{"win_team_name":"海の幸","win_team_mvp":"キョン"},{"win_team_name":"海の幸","win_team_mvp":"かるた"},{"win_team_name":"山の幸","win_team_mvp":"ふー"},{"win_team_name":"海の幸","win_team_mvp":"きゅうりのあさづけ。"},{"win_team_name":"山の幸","win_team_mvp":"かなで ていとく"},{"win_team_name":"山の幸","win_team_mvp":"エリー"},{"win_team_name":"海の幸","win_team_mvp":"つくりかけのドミノ"},{"win_team_name":"山の幸","win_team_mvp":"とろろ"},{"win_team_name":"海の幸","win_team_mvp":"BK201"},{"win_team_name":"山の幸","win_team_mvp":"くろすーも"},{"win_team_name":"山の幸","win_team_mvp":"じゃく"},{"win_team_name":"海の幸","win_team_mvp":"オレンジガール"},{"win_team_name":"海の幸","win_team_mvp":"じぶりーる"},{"win_team_name":"海の幸","win_team_mvp":"teruなんな"},{"win_team_name":"海の幸","win_team_mvp":"いかれたれたこるね"},{"win_team_name":"海の幸","win_team_mvp":"いなっつ"},{"win_team_name":"海の幸","win_team_mvp":"りるを"}]Ryebread4/Rustionary {"word":"gazon","definition":"One of the pieces of sod used to line or cover parapets and the faces of earthworks."}{"cart.js":","cart.min.js":","rivets-cart.js":","rivets-cart.min.js":"sha}beldar/prism { "editor.tabSize": 2, "editor.renderWhitespace": "boundary", "editor.cursorStyle": "line", "editor.rulers": [ 100 ], "editor.formatOnSave": true, "editor.formatOnSaveTimeout": 3000, "npm.packageManager": "yarn", "tslint.packageManager": "yarn", "tslint.jsEnable": true, "prettier.tslintIntegration": true, "eslint.enable": false, "files.trimTrailingWhitespace": true, "files.insertFinalNewline": true, "files.watcherExclude": { "**/.git/objects/**": true, "**/node_modules/**": true, "**/dist/**": true, "**/.cache-loader/**": true }, "files.exclude": { "**/.git": true, "**/.DS_Store": true, "**/lib": true, }, "search.exclude": { "**/.git/objects": true, "**/node_modules": true, "**/yarn.lock": true, "**/dist": true, "**/.cache-loader/**": true }, "typescript.tsdk": "./node_modules/typescript/lib" } {"citations": [{"Paper_link": "https://books.google.com/books?hl=en&lr=&id=wXgrBAAAQBAJ&oi=fnd&pg=PA1&ots=yPht9QXq8A&sig=NI9Rffwo7O0e2gwVVigG51lKCXY", "Title": "Crystal Chemistry of High-Tc Superconducting Copper Oxides", "authors": ["", "", "", " "], "Citations_Link": "https://scholar.google.com/scholar?cites=14900646316072904662&as_sdt=5,39&sciodt=0,39&hl=en&num=20"}, {"Paper_link": "http://pubs.acs.org/doi/abs/10.1021/ar00112a003", "Title": "Intergrowth structures: the chemistry of solid-solid interfaces", "authors": ["", " "], "Citations_Link": "https://scholar.google.com/scholar?cites=1331283366115625363&as_sdt=5,39&sciodt=0,39&hl=en&num=20"}, {"Paper_link": "http://www.sciencedirect.com/science/article/pii/0022459687901411", "Title": "The structures of intergrowth tungsten bronzes of Ba, Sn, Pb, and Sb", "authors": ["", "", " "], "Citations_Link": "https://scholar.google.com/scholar?cites=1227088232909467667&as_sdt=5,39&sciodt=0,39&hl=en&num=20"}, {"Paper_link": "http://link.springer.com/article/10.1007/BF02747572", "Title": "Intergrowth structures in inorganic solids: a new class of materials", "authors": [" "], "Citations_Link": "https://scholar.google.com/scholar?cites=12757112436222460240&as_sdt=5,39&sciodt=0,39&hl=en&num=20"}, {"Paper_link": "http://www.sciencedirect.com/science/article/pii/S0040609009008104", "Title": "Evidence of hexagonal WO 3 structure stabilization on mica substrate", "authors": ["", "", "", " "], "Citations_Link": "https://scholar.google.com/scholar?cites=12913578055145684289&as_sdt=5,39&sciodt=0,39&hl=en&num=20"}, {"Paper_link": "http://www.sciencedirect.com/science/article/pii/0022459688903490", "Title": "Low-temperature synthesis of novel layered alkali metal-MoO 3 bronzes and hexagonal bronzes of the type K y W 1\u2212 x Mo x O 3", "authors": ["", "", " "], "Citations_Link": "https://scholar.google.com/scholar?cites=10408777092149690603&as_sdt=5,39&sciodt=0,39&hl=en&num=20"}, {"Paper_link": "http://link.springer.com/chapter/10.1007/978-94-009-4582-1_26", "Title": "Transition metal bronzes: properties and reactivity", "authors": [" "], "Citations_Link": "https://scholar.google.com/scholar?cites=14098000901655640328&as_sdt=5,39&sciodt=0,39&hl=en&num=20"}, {"Paper_link": "http://rspa.royalsocietypublishing.org/content/429/1876/91.short", "Title": "On the interpretation of electron diffraction patterns from materials containing planar boundaries: the intergrowth tungsten bronzes", "authors": ["", " "], "Citations_Link": "https://scholar.google.com/scholar?cites=13720247161109167733&as_sdt=5,39&sciodt=0,39&hl=en&num=20"}, {"Paper_link": "http://onlinelibrary.wiley.com/doi/10.1002/zaac.201300592/full", "Title": "CNR Rao and the Growth of Solid State and Materials Chemistry as a Central Domain of Research", "authors": [" "], "Citations_Link": "https://scholar.google.com/scholar?q=related:G-kUH2JTt8oJ:scholar.google.com/&hl=en&num=20&as_sdt=0,39&sciodt=0,39"}], "paper_link": "https://scholar.google.com/citations?view_op=view_citation&hl=en&user=Zs9227oAAAAJ&cstart=1259&pagesize=100&citation_for_view=Zs9227oAAAAJ:1_W9tMSvGuwC", "authors": ["", "", "MK Uppal", "", "CNR Rao"], "title": "Bismuth-tungsten oxide bronzes: a study of intergrowth phases and related aspects", "publication": "Proceedings of the Royal Society of London A: Mathematical, Physical and ..., 1984"}niiknow/zipcode-us {"localities": ["Cosperville, IN", "Waldron Lake, IN", "Brimfield, IN", "Diamond Lake, IN", "Brimfld, IN", "Wawaka, IN"], "state": "IN", "postal_code": "46794", "locality": "Wawaka, IN", "lat": 41.46219, "region": {"fips": "18", "abbr": "IN", "name": "Indiana"}, "city": "Wawaka", "type": "STANDARD", "lng": -85.452201, "counties": [{"fips": "113", "name": "Noble County"}]}{ "componentChunkName": "component---src-templates-contact-template-js", "path": "/contact/", "result": {"data":{"markdownRemark":{"html":"

Get in Touch.

\n

If you’ve got an inquiry for a job or project,drop me a line at or fill the form right here.

","frontmatter":{"title":"Contact Me"}}},"pageContext":{"slug":"/contact/"}}, "staticQueryHashes": ["2555585279","2841359383","3159585216"]}["ably","aframe","aframe-core","aframe-extras","aframe-firebase-component","aframe-gearvr-controls-component","aframe-physics-system","aframe-src-fit-component","aframe-testras","angular-fireproof","bitmessage","bpmn-js","capot","crds-core","django-tornado-websockets-client","ethereumjs-block","ethereumjs-tx","ethereumjs-util","ethereumjs-util-nwjs","flashpoint","merkle-patricia-tree","metaco-client","phenix-web-sdk","plasmatic","secp256k1","secp256k1.js","solstice","stargatejs","tinder-api-promise"]{ "name": "flow-babel-boilerplate", "version": "0.1.0", "description": "Minimalistic boilerplate with Flow and Babel", "scripts": { "flow-check": "flow check", "flow-coverage": "flow-coverage-report -i src/**/*.js -o coverage -t text -t html", "test": "npm run flow-check && npm run flow-coverage", "build": "npm test && babel src -d dist" }, "keywords": [ "flow", "babel", "javascript", "boilerplate", "template" ], "author": { "name": "", "email": "", "url": "https://lnfnunes.com.br" }, "license": "MIT", "devDependencies": { "babel-cli": "^6.26.0", "babel-preset-env": "^1.6.0", "babel-preset-flow": "^6.23.0", "flow-coverage-report": "^0.3.0" } } { "extends": "@deviltea/eslint-config-ts", "parserOptions": { "project": "./tsconfig.json", "extraFileExtensions": [ ".md" ] }, "rules": { "no-useless-return": "off", "@typescript-eslint/no-unused-vars": "off" } } 10-100 { "name": "leaflet-clipper", "version": "1.1.0", "description": "Allows Union, Difference, Xor, and Intersection operations on two polygons.", "keywords": [ "leaflet", "polygon", "clipper", "union", "difference", "intersection" ], "homepage":"https://willfarrell.github.io/Leaflet.Clipper", "bugs": { "url": "https://github.com/willfarrell/Leaflet.Clipper/issues" }, "license": "MIT", "author": " <>", "files":["dist","svg"], "main": "dist/L.Clipper.min.js", "repository": { "type": "git", "url": ":willfarrell/Leaflet.Clipper.git" }, "scripts": { "example-setup":"cp src/Leaflet.Clipper.js example/js/L.Clipper.js", "lint": "eslint ./src/", "build-setup":"rm -rf dist && mkdir -p dist", "build-js": "cp src/Leaflet.Clipper.js dist/L.Clipper.js", "compress": "uglifyjs ./dist/L.Clipper.js -o ./dist/L.Clipper.min.js -m --comments", "build": "npm run lint && npm run build-setup && npm run build-js && npm run compress && npm run example-setup", "release": "git push origin master && git checkout gh-pages && git merge master && git push origin gh-pages && git checkout master && npm publish" }, "dependencies": { "clipper-lib": "^6.2.1" }, "devDependencies": { "eslint": "^4.3.0", "leaflet": "^1.1.0", "uglify-js": "^3.0.25" } } {"track":"dart","exercise":"beer-song","id":"43af89442ad5478d919c2376fb4e5cbf","url":"https://exercism.io/my/solutions/43af89442ad5478d919c2376fb4e5cbf","handle":"mrkajetanp","is_requester":true,"auto_approve":false}{ "name": "vqmovie", "authors": [ "tosuke" ], "description": "Create movie on vector quantization.", "copyright": "Copyright © 2016, tosuke", "license": "MIT", "dependencies":{ "derelict-sdl2":"*" }, "targetPath":"bin", "workingDirectory":"bin", "sourcePaths":[], "configurations":[ { "name":"encoder", "targetPath":"bin", "sourcePaths":["source/encoder"], "targetType":"executable", "buildOptions":["inline", "optimize"] } ] } { "Actions": [ { "Icon": "actions/watcher/images/actionIcon", "Name": "Watcher", "PropertyInspectorPath": "actions/watcher/property_inspector/property_inspector.html", "States": [ { "Image": "actions/watcher/images/actionDefaultImage", "ShowTitle": false } ], "SupportedInMultiActions": false, "Tooltip": "The plugin give you real time coin value", "UUID": "com.vincidev.crypto.WatcherPlugin" } ], "Category": "VinciDev", "CategoryIcon": "images/categoryIcon", "Disabled": false, "Author": "VinciDev", "CodePath": "Crypto", "CodePathWin": "Crypto.exe", "CodePathMac": "Crypto", "Description": "A set of crypto actions for your stream deck", "Name": "Crypto", "Icon": "images/pluginIcon", "URL": "https://github.com/GurYN/StreamDeckPlugins", "Version": "0.1", "SDKVersion": 2, "Software": { "MinimumVersion": "4.1" }, "OS": [ { "Platform": "mac", "MinimumVersion": "10.11" }, { "Platform": "windows", "MinimumVersion": "10" } ] } { "id": 150548, "name": "VK Clean — приятный ВКонтакте. Без рекламы.", "description": "Делает ВК чище и проще.", "user": { "id": 422896, "name": "nmr1studio", "email": "redacted", "paypal_email": null, "homepage": null, "about": "Freestyler Token: \nYou can contact me via :)", "license": "ccbyncsa" }, "updated": "2017-11-16T13:46:23.000Z", "weekly_install_count": 0, "total_install_count": 190, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/150548_after.png?r=1617609941", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": null, "license": "ccby", "created": "2017-11-02T12:48:44.000Z", "category": "site", "raw_subcategory": "vk", "subcategory": "vk", "additional_info": null, "style_tags": [], "css": "@-moz-document domain(vk.com) {\r\n\r\n._ads_promoted_post_data_w, #ads_left, #side_bar #l_nwsf, #side_bar #l_spr, .left_menu_nav_wrap a[href=\"/blog\"], .left_menu_nav_wrap a[href=\"/dev\"], .left_menu_nav_wrap a[href=\"/ads\"], .left_menu_nav_wrap a[href=\"/about\"], #main_feed .feed_post_field_wrap, #profile_friends_online, #profile_friends a[href=\"/feed?section=updates\"], .im-dialog-select._im_search_croll, ._im_to_unread.im-page--dialogs-filter-wrap, #chat_onl_wrap, #rb_box_fc_clist, .im-page--aside-photo, .nim-dialog--verfifed, .nim-dialog--mute, .im-chat-input .im-chat-input--attach-label, #ui_rmenu_recommended, #ui_rmenu_search + .ui_rmenu_sep, #ui_rmenu_updates, #ui_rmenu_comments, .left_menu_nav_wrap, #groups_filters_wrap, #ui_rmenu_search, #ui_rmenu_phonebook, #ui_rmenu_requests, #friends_possible_block, #invite_button, #ui_rmenu_find + .ui_rmenu_sep, #ui_rmenu_lists, .page_block .page_list_module:not(#public_links) .group_desc, #stories_feed_wrap, #profile_edit_act, .post_like_link._link, .post_reply_link._link, .top_notify_show_all, .top_notify_header._top_notify_header, #submit_post_box:not(.shown):not(.own_field) .ms_item.ms_item_audio, #submit_post_box:not(.shown):not(.own_field) .ms_item.ms_item_photo, #submit_post_box:not(.shown):not(.own_field) .ms_item.ms_item_video, #profile_gifts, .reply_link_wrap, .counts_module, .video_module .video .page_video_play_icon, .top_profile_name, #profile_friends .header_right_link, #profile_photos_module .header_right_link, .im-page .im-page--history_empty.im-page--history .im-page--center-empty, ._im_mess_reply, .friends_field_title + .friends_field {\r\n\tdisplay:none !important;\r\n}\r\n.im-page--stars {\r\n padding-right: 15px;\r\n}\r\n.replies_open, .wr_header {\r\n background-color: #fbfbfb;\r\n}\r\n#top_notify_wrap {\r\n border: none;\r\n}\r\n.page_actions_btn.narrow.stats {\r\n width: 100%;\r\n}\r\n.page_list_module .line_cell .desc_info {\r\n padding-top: 10px;\r\n}\r\n.video_module .video .page_video_play_icon, .top_profile_name\r\n.im-page--stars._im_important_counter {\r\n padding-right: 15px;\r\n}\r\n.im-page .im-page--dialogs {\r\n padding-bottom: 0;\r\n}\r\n.im-page .im-page--dialogs-footer {\r\n background: transparent !important;\r\n border: none;\r\n height: 55px;\r\n pointer-events: none;\r\n}\r\n.im-page .im-page--dialogs-settings {\r\n background-color: #fff;\r\n padding: 10px;\r\n background-repeat: no-repeat;\r\n background-position: center;\r\n border-radius: 50%;\r\n margin: 0;\r\n box-shadow: 0 10px 20px rgba(42, 88, 133, .2);\r\n margin-right: 15px;\r\n pointer-events: auto;\r\n}\r\n#ads_left.ads_left_empty+.left_menu_nav_wrap {\r\n\tborder-top: none !important;\r\n}\r\ninput.text.ts_input {\r\n\twidth: 10px;\r\n\tpadding: 6px 6px 6px 14px;\r\n\toverflow: hidden;\r\n\ttransition: background-color 0.05s, color 0.05s, width .4s;\r\n}\r\ninput.text.ts_input:hover {\r\n\twidth: 230px;\r\n\tpadding: 6px 6px 6px 19px;\r\n\toverflow: auto;\r\n}\r\n#ts_wrap.ts_wrap .input_back_content {\r\n\twidth: 0 !important;\r\n}\r\n#ts_wrap.ts_wrap {\r\n\toverflow: hidden;\r\n}\r\ninput.text.ts_input:hover #ts_wrap.ts_wrap {\r\n\toverflow: auto;\r\n}\r\n\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/150548/vk-clean.user.js", "style_settings": [] }{ "contacts": [], "guideStarURL": "http://www.guidestarindia.org/Summary.aspx?CCReg=10560", "name": "", "primaryEmail": "", "website": "http://www.salaambharat.org", "organisationType": [ "Direct Service" ], "telephone": [ "918826397700" ], "mainAddrress": { "state": "Delhi", "address": [ "A-1/29", "Sewak Park", "Dwarka Mod, Uttam Nagar", "West Delhi", "Delhi", "110059" ] }, "briefDescription": " is Social Organization registered under the Indian Trusts Act, 1882. is a tribute to the nation expressing the faith of unprivileged who wish to bring an end to their sufferings. It aims at reformation of the society where inequality, suffering and customs are shunned and laws of humanity and fraternity are adopted for sustainable development of all.It is a non-profit NGO working pan India fostering to bridge the gap between the haves and the have nots of the society. It neither embodies our belief that patriotism is neither an asset of the resourceful nor exists in exclusivity; its motto is always noble and lies above materialistic barriers. We are a believer of the cult of dreamers who dare to aspire and know how to fabricate their plans into reality. Although our country is scaling to new heights of globalization but nevertheless the conspicuous void between the unprivileged and the people enjoying the surplus hasn’t witnessed any contraction. In addition to the government’s initiatives several NGO’s and other organizations have taken up to answering to the cries of the society in the past. Much fruitful results have come out of their strenuous efforts but owing to the never ceasing demand of our country the supply is not ample to meet the ends. Consequently we felt that now it’s our turn to take the driver’s seat and contribute to the cause of Social Upliftment or Social Re-engineering as we term it.It is not constraint to occupying a portion of digitized text but a movement of few ordinary visionaries with loft ideals and aspirations who are ever ready to nurture their cause with concrete steps.", "yearOfEstablishment": "2010", "regiseredAddrress": { "state": "Delhi", "address": [ "B-547, Sarita Vihar", "Delhi", "South Delhi", "110019" ] } }data/beaconrestapi/src/integration-test/resources/tech/pegasys/teku/beaconrestapi/beacon/paths/_eth_v1_beacon_pool_attester_slashings.json { "get" : { "tags" : [ "Beacon" ], "summary" : "Get AttesterSlashings", "description" : "Retrieves attester slashings known by the node but not necessarily incorporated into any block.", "operationId" : "getEthV1BeaconPoolAttester_slashings", "responses" : { "200" : { "description" : "OK", "content" : { "application/json" : { "schema" : { "$ref" : "#/components/schemas/GetAttesterSlashingsResponse" } } } }, "500" : { "description" : "Server Error" } } }, "post" : { "tags" : [ "Beacon" ], "summary" : "Submit attester slashing object", "description" : "Submits attester slashing object to node's pool and if passes validation node MUST broadcast it to network.", "operationId" : "postEthV1BeaconPoolAttester_slashings", "requestBody" : { "content" : { "application/json" : { "schema" : { "$ref" : "#/components/schemas/AttesterSlashing" } } } }, "responses" : { "200" : { "description" : "Attester Slashing has been successfully validated, added to the pool, and broadcast." }, "400" : { "description" : "Invalid attester slashing, it will never pass validation so it's rejected" }, "500" : { "description" : "Server Error" } } } }zzwzzhao/dawn0 { "config": { "title": "Dn Middleware Docz", "description": "My awesome app using docz", "themeConfig": {}, "ordering": "descending", "version": "1.0.0", "repository": null, "native": false }, "entries": {} }{ "apple_name": "Maça", "apple_desc": "Uma fruta comum encontrada em árvores ao redor no Prado", "garlic_name": "Alho", "garlic_desc": "ㅤ", "milk_bottle_name": "", "milk_bottle_desc": "ㅤ", "pepper_name": "Pimenta", "pepper_desc": "ㅤ", "PepperSeeds_name": "Semente de Pimenta", "PepperSeeds_desc": "ㅤ", "potato_name": "Batata", "potato_desc": "ㅤ", "pumpkin_name": "Abóbora", "pumpkin_desc": "ㅤ", "raw_pasta_name": "", "raw_pasta_desc": "ㅤ", "rice_name": "Arroz", "rice_desc": "ㅤ", "rice_vinegar_name": "Vinagre de Arroz", "rice_vinegar_desc": "ㅤ", "salt_name": "", "salt_desc": "ㅤ", "tomato_name": "Tomate", "tomato_desc": "ㅤ" }{ "name": "YYNavigation", "version": "0.1.0", "summary": "YYNavigation is a custom navigation controller, it is very simple to use.", "homepage": "https://git.oschina.net/YownYang/yynavigation.git", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "": "" }, "source": { "git": "https://git.oschina.net/YownYang/yynavigation.git", "tag": "0.1.0" }, "platforms": { "ios": "8.0" }, "source_files": "YYNavigation/Classes/**/*" } GetOccasion/mitragyna { "name": "@getoccasion/mitragyna", "version": "0.2.4", "description": "A library for managing ActiveResource.js as React components", "main": "build/mitragyna.js", "scripts": { "test": "grunt spec", "develop": "grunt", "build": "grunt build" }, "repository": { "type": "git", "url": "git+https://github.com/getoccasion/mitragyna.git" }, "keywords": [ "react", "active-resource" ], "author": ", & ", "license": "MIT", "bugs": { "url": "https://github.com/getoccasion/mitragyna/issues" }, "homepage": "https://github.com/getoccasion/mitragyna#readme", "devDependencies": { "babel-core": "^6.26.0", "babel-plugin-transform-class-properties": "^6.24.1", "babel-plugin-transform-es2015-modules-umd": "^6.24.1", "babel-plugin-transform-object-rest-spread": "^6.26.0", "babel-plugin-transform-react-jsx": "^6.24.1", "babel-preset-env": "^1.6.1", "grunt": "0.x.x", "grunt-babel": "^7.0.0", "grunt-contrib-clean": "0.5.x", "grunt-contrib-concat": "^1.0.1", "grunt-contrib-connect": "0.4.x", "grunt-contrib-uglify": "^3.3.0", "grunt-contrib-watch": "0.5.x", "grunt-umd": "^2.4.0", "load-grunt-tasks": "^3.5.2" }, "dependencies": { "active-resource": "GetOccasion/activeresource.js#track_local_changes", "classnames": "^2.2.5", "prop-types": "^15.6.0", "react": "^16.2.0", "shallowequal": "^1.0.2", "underscore": "^1.8.3" } } {"name":"Southern Tier IPA","abv":6.5,"ibu":0.0,"srm":0.0,"upc":0,"type":"beer","brewery_id":"southern_tier_brewing_co","updated":"2010-07-22 20:00:20","description":"IPA stands for India Pale Ale and ours is an American version of the classic style. IPA's namesake lies in its colonial roots. British soldiers on their way to India drank a lot of beer, but found it would go stale on the long voyages. Meanwhile, brewers knew that by adding more hops the beer would stay fresh. Voila! A new style was born and it is one we are proud to brew. Southern Tier IPA is triple-hopped on its journey to your glass for a fully aromatic experience.","style":"American-Style India Pale Ale","category":"North American Ale"}{"status":{"code":200,"http":"Fetched (ring) 200 226 and parsed 2/44 entries","nextFetch":1505849,"entriesCountSinceLastMaintenance":8,"velocity":68.1,"popularity":3.971509844915876,"generatedIds":true,"period":226,"lastFetch":1505848,"lastParse":1505848,"lastMaintenanceAt":1505817,"feed":"http://mlb.mlb.com/partnerxml/gen/news/rss/mlb.xml"},"permalinkUrl":"http://mlb.mlb.com","standardLinks":{"alternate":[{"title":"MLB News Index","rel":"alternate","href":"http://mlb.mlb.com","type":"text/html"}]},"title":"MLB News Index","updated":1505862948,"id":"mlb-news-index-2017-9-19-23","items":[{"id":"http://mlb.mlb.com/news/article/mlb/todays-mlb-starting-lineups-sept-19?ymd=20170919&content_id=255050078&vkey=news_mlb","published":1505846867,"updated":1505846867,"title":"Today's starting lineups: Sept. 19","summary":"Every lineup, every day, as they are made public by the clubs.","content":"Every lineup, every day, as they are made public by the clubs.","permalinkUrl":"http://mlb.mlb.com/news/article/mlb/todays-mlb-starting-lineups-sept-19?ymd=20170919&content_id=255050078&vkey=news_mlb","standardLinks":{"alternate":[{"title":"Today's starting lineups: Sept. 19","rel":"alternate","href":"http://mlb.mlb.com/news/article/mlb/todays-mlb-starting-lineups-sept-19?ymd=20170919&content_id=255050078&vkey=news_mlb","type":"text/html"}]},"language":"en"},{"id":"http://mlb.mlb.com/news/article/mlb/fantasy411-podcast-looks-at-rhys-hoskins?ymd=20170919&content_id=255042668&vkey=news_mlb","published":1505840979,"updated":1505840979,"title":"Fantasy411 Podcast: Investing in Hoskins","summary":"A transcript of a segment from this week's Fantasy411 podcast looks at Rhys Hoskins.","content":"A transcript of a segment from this week's Fantasy411 podcast looks at Rhys Hoskins.","permalinkUrl":"http://mlb.mlb.com/news/article/mlb/fantasy411-podcast-looks-at-rhys-hoskins?ymd=20170919&content_id=255042668&vkey=news_mlb","standardLinks":{"alternate":[{"title":"Fantasy411 Podcast: Investing in Hoskins","rel":"alternate","href":"http://mlb.mlb.com/news/article/mlb/fantasy411-podcast-looks-at-rhys-hoskins?ymd=20170919&content_id=255042668&vkey=news_mlb","type":"text/html"}]},"language":"en"}]}{"topic": "Waldram projection camera views.", "category": "radiance-general", "attachments": [], "created_by_name": "", "created_at": "January 27, 2005 at 09:47AM", "body": "Good morning,\n\n\nI am interested to know if it is possible to generate a camera view that \nreplicates the projection of a waldram diagram? I then assume it would be \npossible to overlay the relevant waldram diagram using Pcomb.\n\n\nIn particular I am looking to address the calculations of Rights to Light \nissues in the UK. I am confident of being able to calculate the sky \ncomponent within a scene at reference points, but as a nervious beginner \nwould like to be able to generate the waldram diagram from the same \nreference points to double check the accuracy [and convince potential \nclients].\n\n\nMany thanks..\n\n\nNickd\n___\nAutomatically generated content from [radiance mailing-list](https://radiance-online.org/pipermail/radiance-general/2005-January/002341.html).", "id": "radiance-general_002341", "created_by": "nick_devlin"}0 { "name": "javascript-exercises", "version": "1.0.0", "description": "A series of exercises in Javascript 🔥", "main": "utils.js", "scripts": { "test": "jest" }, "repository": { "type": "git", "url": "git+https://github.com/from-zero-to-fullstack/javascript-exercises.git" }, "keywords": [], "author": "", "license": "MIT", "bugs": { "url": "https://github.com/from-zero-to-fullstack/javascript-exercises/issues" }, "homepage": "https://github.com/from-zero-to-fullstack/javascript-exercises#readme", "devDependencies": { "jest": "^27.5.1", "jest-each": "^27.5.1" } } bacora03/YDM-Data { "name": "", "number": "92361635", "is_illegal": false, "text": "1 \"Scrap\" Tuner + 2 or more non-Tuner monsters\nOnce per turn, you can select 1 card you control and up to 3 cards in your opponent's Graveyard. Destroy your selected card and return the opponent's cards to the Deck. When this card is destroyed by your opponent's card (either by battle or by card effect) and sent to the Graveyard, select 1 non-Synchro \"Scrap\" monster from your Graveyard, and Special Summon it.", "type": "Monster", "is_monster": true, "is_spell": false, "is_trap": false, "species": "Dragon", "attack": "3200", "defense": "2400", "attribute": "EARTH", "is_pendulum": false, "is_xyz": false, "is_synchro": true, "is_fusion": false, "is_link": false, "is_extra_deck": true, "monster_types": [ "Synchro" ], "stars": "10" }{"PREVALENCE_BY_GENDER_AGE_YEAR":{"TRELLIS_NAME":[],"SERIES_NAME":[],"X_CALENDAR_YEAR":[],"Y_PREVALENCE_1000PP":[]},"PREVALENCE_BY_MONTH":{"X_CALENDAR_MONTH":[],"Y_PREVALENCE_1000PP":[]},"CONDITIONS_BY_TYPE":{"CONCEPT_NAME":["Claim- Inpatient: Primary diagnosis","Claim- Inpatient: Secondary diagnosis"],"COUNT_VALUE":[9,174]},"AGE_AT_FIRST_DIAGNOSIS":{"CATEGORY":["MALE","FEMALE"],"MIN_VALUE":[26,57],"P10_VALUE":[57,61],"P25_VALUE":[62,75],"MEDIAN_VALUE":[74,81],"P75_VALUE":[81,87],"P90_VALUE":[86,300],"MAX_VALUE":[306,300]}} packages/react-linked-input/package.json { "name": "react-linked-input", "version": "1.0.0", "description": "LinkedInput supports the ReactLink semantics", "main": "LinkedInput.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "repository": { "type": "git", "url": "git+https://github.com/facebook/react.git" }, "keywords": [ "react", "linkedinput", "input", "linked", "reactlink" ], "author": "", "license": "BSD-3-Clause", "peerDependencies": { "react": "^15.0.0-rc.2" } } dailyscape/rsdataitems/38035.json {"id":38035,"value":1,"name":"Surfboard shield token","limit":2,"name_pt":"","price":2840686,"last":2840686} Nova-develoment-team/Nova-api0 [ "Yes for sure", "ew no", "yup", "welp i don't know. Maybe ask another time", "nope you are not", "prob", "50/50", "yeaa", "i don't know what i do know is Duckey is awsome", "damn yes", "damn no", "An error occurred, please ask another question", "My sources say no", "Yes", "No", "Maybe", "Not sure", "Yes, definitely", "Ask again later", "Better not tell you now", "You will never reach the truth", "Concentrate and ask again", "THAT'S A SOLID NO", "When you grow one braincell, yes", "Shut up! You're damn annoying!", "I'm short on magical energy so I can't give you an answer yet", "Give this poor 8ball a break", "Sure, why not?", "There's a 50-50 chance it's true", "Shut up you rat!", "Idk", "https://dashboard.nova-bot.tk", "You may rely on it", "As I see it, yes", "Yeah, when Pigs fly", "Nah that sucks tbh", "Most likely", "Signs point to yes", "Outlook not so good." ]0 { "body": "Given a spack package, have a way to create a spack.yaml for development of that package\r\n\r\n### Rationale\r\n\r\nPeople have put a lot of work into describing their packages in the package.py file, we should be able to reuse that work\r\n\r\n### Description\r\n\r\nSomething like `spack env create --from-package foo`\r\n\r\nCreates a named environment `foo` with all the dependencies of package `foo`, that developers can then copy into their project under version control.\r\n\r\n### Additional information\r\nThis probably won't be fully functional until environments are co-concretized.", "user": "gonsie", "url": "https://api.github.com/repos/spack/spack/issues/10048", "updated_at": "2018-12-13 17:36:24", "created_at": "2018-12-07 00:41:01", "closed_at": "None", "state": "open", "title": "spack environmentize my package", "number": 10048, "milestone": null, "labels": [ "environments" ], "id": 388458134, "html_url": "https://github.com/spack/spack/issues/10048", "assignees": [], "comments": 4 }{ "@odata.type": "#SecureBootDatabase.v1_0_0.SecureBootDatabase", "Id": "db", "Name": "db - Authorized Signature Database", "Description": "UEFI db Secure Boot Database", "DatabaseId": "db", "Certificates": { "@odata.id": "/redfish/v1/Systems/437XR1138R2/SecureBoot/SecureBootDatabases/db/Certificates/" }, "Signatures": { "@odata.id": "/redfish/v1/Systems/437XR1138R2/SecureBoot/SecureBootDatabases/db/Signatures/" }, "Actions": { "#SecureBootDatabase.ResetKeys": { "target": "/redfish/v1/Systems/437XR1138R2/SecureBoot/SecureBootDatabases/db/Actions/SecureBootDatabase.ResetKeys", "ResetKeysTypeableValues": [ "ResetAllKeysToDefault", "DeleteAllKeys" ] }, "Oem": {} }, "Oem": {}, "@odata.id": "/redfish/v1/Systems/437XR1138R2/SecureBoot/SecureBootDatabases/db", "@Redfish.Copyright": "Copyright 2014-2021 DMTF. For the full DMTF copyright policy, see http://www.dmtf.org/about/policies/copyright." }amaajemyfren/data { "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "This talk will focus on how one can build complex data pipelines in\nPython. I will introduce Luigi and show how it solves problems while\nrunning multiple chain of batch jobs like dependency resolution,\nworkflow management, visualisation, failure handling etc.\n\nAfter that, I will present how to package Luigi pipelines as Docker\nimage for easier testing and deployment. Finally, I will go through way\nto deploy them on Kubernetes cluster, thus making it possible to scale\nBig Data pipelines on- demand and reduce infrastructure costs. I will\nalso give tips and tricks to make Luigi Scheduler play well with\nKubernetes batch execution feature.\n\nThis talk will be accompanied by demo project. It will be very\nbeneficial for audience who have some experience in running batch jobs\n(not necessarily in Python), typically people who work in Big Data\nsphere like data scientists, data engineers, BI devs and software\ndevelopers. Familiarity with Python is helpful but not needed.", "duration": 1764, "language": "eng", "recorded": "2019-07-11", "related_urls": [ { "label": "Conference schedule", "url": "https://ep2019.europython.eu/schedule/" }, { "label": "slides", "url": "https://ep2019.europython.eu/media/conference/slides/UteEqy2-building-data-workflows-with-luigi-and-kubernetes.pdf" } ], "speakers": [ "" ], "tags": [ "Architecture", "Big Data", "Data", "Distributed Systems", "Scaling" ], "thumbnail_url": "https://i.ytimg.com/vi/41ubI6a3jzI/maxresdefault.jpg", "title": "Building Data Workflows with Luigi and Kubernetes", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=41ubI6a3jzI" } ] } package.json { "name": "@andas/twitch-channel-sensor", "version": "1.0.0", "description": "", "main": "index.js", "scripts": { "start": "node dist/main.js", "build": "tsc -p .", "buildrun": "tsc -p . && npm start", "dev": "ts-node-dev --respawn --transpile-only src/*.ts", "test": "echo \"Error: no test specified\" && exit 1" }, "repository": { "type": "git", "url": "git+https://github.com/andas-tech/twitch-chatbot.git" }, "author": "nicseltzer", "license": "MIT", "bugs": { "url": "https://github.com/andas-tech/twitch-chatbot/issues" }, "homepage": "https://github.com/andas-tech/twitch-chatbot#readme", "dependencies": { "@andas/streaming-events": "github:andas-tech/streaming-event-streaming", "@tsconfig/node16": "^1.0.2", "@twurple/auth": "^5.0.4", "@twurple/chat": "^5.0.4", "dotenv": "^10.0.0", "kafka-node": "^5.0.0", "pino": "^7.0.0-rc.3", "typescript": "^4.3.5" }, "devDependencies": { "eslint": "^7.2.0", "eslint-config-airbnb": "^18.2.1", "eslint-config-airbnb-base": "^14.2.1", "eslint-plugin-import": "^2.22.1", "eslint-plugin-jsx-a11y": "^6.4.1", "eslint-plugin-react": "^7.21.5", "eslint-plugin-react-hooks": "^1.7.0", "ts-node-dev": "^1.1.8" } } 19/Schriftliche Frage/19-244382.json { "vorgangId": "244382", "VORGANG": { "WAHLPERIODE": "19", "VORGANGSTYP": "Schriftliche Frage", "TITEL": "Formulierung von Rechtsvorschriften", "AKTUELLER_STAND": "Beantwortet", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "WICHTIGE_DRUCKSACHE": { "DRS_HERAUSGEBER": "BT", "DRS_NUMMER": "19/7341", "DRS_TYP": "Schriftliche Fragen", "DRS_LINK": "http://dipbt.bundestag.de:80/dip21/btd/19/073/1907341.pdf" }, "EU_DOK_NR": "", "SCHLAGWORT": [ "Gesetzgebung", { "_fundstelle": "true", "__cdata": "Rechtsverordnung" } ], "ABSTRAKT": "Originaltext der Frage(n): \r\n \r\nWelche Position bezieht die Bundesregierung gegenüber Bedenken (vgl. u. a. www.noz.de/ deutschland-welt/politik/artikel/1610446/gute-kita- gesetz-ein-unverschaemter-name-kommentiert- die-noz), dass die Verwendung von normativ-wertenden und von der tatsächlichen Bezeichnung abweichenden Gesetzesbezeichnungen im Rahmen ihrer Öffentlichkeitsarbeit (z. B. \"Gute-Kita-Gesetz\" oder \"Starke-Familien-Gesetz\") die kritische Auseinandersetzung mit bzw. die öffentliche Meinungsbildung über die Arbeit der Bundesregierung in nicht neutral-informierender Art und Weise beeinflusst, und wie begründet sie dies vor dem Hintergrund der von ihr selbst im Handbuch der Rechtsförmlichkeit festgehaltenen, allgemeinen Empfehlungen für das Formulieren von Rechtsvorschriften (Bundesanzeiger Nr. 160a vom 22. September 2008), wonach Gesetze und Rechtsvorschriften redlich formuliert sein müssen? " }, "VORGANGSABLAUF": { "VORGANGSPOSITION": { "ZUORDNUNG": "BT", "URHEBER": "Schriftliche Frage/Schriftliche Antwort ", "FUNDSTELLE": "25.01.2019 - BT-Drucksache 19/7341, Nr. 74", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/19/073/1907341.pdf", "PERSOENLICHER_URHEBER": [ { "VORNAME": "Otto", "NACHNAME": "Fricke", "FUNKTION": "MdB", "FRAKTION": "FDP", "AKTIVITAETSART": "Frage" }, { "VORNAME": "Caren", "NACHNAME": "Marks", "FUNKTION": "Parl. Staatssekr.", "RESSORT": "Bundesministerium für Familie, Senioren, Frauen und Jugend", "AKTIVITAETSART": "Antwort" } ] } } } { "directions": [ "In a slow-cooker or a large pot over low heat, combine apple cider, orange juice and lemon juice. Season with cloves, ginger and nutmeg. Bring to a simmer. If using a slow cooker, allow to simmer all day. Serve hot." ], "ingredients": [ "2 quarts apple cider", "2 cups orange juice", "1/2 cup lemon juice", "12 whole cloves", "4 cinnamon sticks", "1 pinch ground ginger", "1 pinch ground nutmeg" ], "language": "en-US", "source": "allrecipes.com", "tags": [], "title": "Wassail Punch", "url": "http://allrecipes.com/recipe/20577/wassail-punch/" } { "api_key": "secretapikey", "title": "BARONTESTER Simple Test Invoice", "text": "Instructions for this invoice may be written here, which may include HTML links.", "currency": "BTC", "min_confirmations": 1, "line_items": [ { "description": "Line Item #1", "quantity": 1, "amount": 50 } ], "webhooks":{ "token": "token", "paid": {"url": "http://localhost:9242/paid"}, "invalid": {"url": "http://localhost:9242/invalid" } } } public/locales/en/RequestAutoRenewal.json { "Title": "Automotive Policy Renewal Agreement", "FirstName": "First name:", "LastName": "Last name:", "Email": "Email", "SubmitButton": "Submit for renewal", "SubmitButtonClicked": "Requesting...", "Error": { "FirstName": "First name is required.", "LastName": "Last name is required.", "Email": "Email is required." }, "ApiDecription": { "SeeMore": "- See behind the scenes", "CodeFlow": "

This sample features:

  • Creating a clickwrap
  • Embedding a clickwrap on your site

Code flow:

Step 1

When the form is submitted, the Click API is used to create a new clickwrap. A clickwrap is a DocuSign technology that enables you to have a webpage where you can ask your users to click to agree to something. To create the clickwrap, you need to provide the name, the displaySettings that control what the UI experience will look like, and a document that will be shown to the user when they accept.

To create the clickwrap, send an API request to:

POST /v1/accounts/{accountId}/clickwraps
Step 2

The call from step 1 returns the clickwrap ID, which we need. We then add this to the HTML on our website so that it now looks like this:

<div id=\"ds-terms-of-service\"></div><script src=\"https://demo.docusign.net/clickapi/sdk/latest/docusign-click.js\"></script><script>docuSignClick.Clickwrap.render({    environment: 'https://demo.docusign.net',    accountId: '<your account ID>',    clickwrapId: '<your clickwrap ID',    clientUserId: 'UNIQUE_USER_ID'}, '#ds-terms-of-service');</script>
" }, "Renewal": { "TermsName": "Renewal Terms", "DisplayName": "Insurance Renewal Terms", "TermsRenewal": "By accepting these conditions, you are giving your consent to a digital-only delivery of the policy renewal documents." } } 1-10 [{"File":"\/geo\/municipios.zip","Basename":"municipios","Extension":"zip","Type":"geo","Subtype":"municipios","Last_Modified":"2020-11-15 18:11:26","Date":null,"Month":null,"State":null,"State_Name":null},{"File":"\/geo\/estados.zip","Basename":"estados","Extension":"zip","Type":"geo","Subtype":"estados","Last_Modified":"2020-11-15 18:11:26","Date":null,"Month":null,"State":null,"State_Name":null}]10-100 {"title": "It's Creepy, But it Doesn't Bother Me.", "fields": ["data collection", "social media", "social consciousness", "feeling", "sentence"], "abstract": "Undergraduates interviewed about privacy concerns related to online data collection made apparently contradictory statements. The same issue could evoke concern or not in the span of an interview, sometimes even a single sentence. Drawing on dual-process theories from psychology, we argue that some of the apparent contradictions can be resolved if privacy concern is divided into two components we call intuitive concern, a \"gut feeling,\" and considered concern, produced by a weighing of risks and benefits. Consistent with previous explanations of the so-called privacy paradox, we argue that people may express high considered concern when prompted, but in practice act on low intuitive concern without a considered assessment. We also suggest a new explanation: a considered assessment can override an intuitive assessment of high concern without eliminating it. Here, people may choose rationally to accept a privacy risk but still express intuitive concern when prompted.", "citation": "Citations (10)", "year": "2016", "departments": ["University of Michigan", "University of Michigan", "University of Michigan"], "conf": "chi", "authors": [".....http://dblp.org/pers/hd/p/Phelan:Chanda", ".....http://dblp.org/pers/hd/l/Lampe:Cliff", ".....http://dblp.org/pers/hd/r/Resnick:Paul"], "pages": 12}{ "confirm_anim": "M Dad Idle", "idle_anim": "BaldimongMenu", "position": [ 93, -6 ], "scale": 0.6, "image": "Menu_Baldimong" }ourway/dingo [ { "correct_order": [ "A scientist is sick.", "He lost his memory and does not know where the kitchen is", "But when he is hungry at night, he is automatically finding the location of the kitchen.", "The scientists have studied it confirmed that 100 years ago." ] }, { "sources": [ "He lost his memory and does not know where the kitchen is", "A scientist is sick.", "But when he is hungry at night, he is automatically finding the location of the kitchen.", "The scientists have studied it confirmed that 100 years ago." ] } ]0 { "Error(if MashUp)": false, "Mashup": false, "Original Song Names": [ "Rise" ], "Switch Vocals Vid": "Nightcore - Rise (Switching Vocals_lyrics)" }Nyxiie/nhentai-extension-ff { "name": "N-Hentai Extension", "description": "Redirect your 6 digit code to nhentai", "version": "1.0", "manifest_version": 2, "background": { "scripts": ["main.js"] }, "permissions": ["contextMenus","tabs"], "icons": { "16": "/images/logo.png", "32": "/images/logo.png", "48": "/images/logo.png", "128": "/images/logo.png" }, "browser_specific_settings": { "gecko": { "id": "" } } } 10-100 { "commentStamp" : "", "super" : "Object", "category" : "Tarantalk-Core", "classinstvars" : [ ], "pools" : [ ], "classvars" : [ ], "instvars" : [ "request", "response", "waitTimeoutSeconds", "semaphore", "value" ], "name" : "TrSyncValue", "type" : "normal" }{"survey.css":","survey.min.css":","survey.react.js":","survey.react.min.js":"}{"template":{"small":"https://static-cdn.jtvnw.net/emoticons/v1/{image_id}/1.0","medium":"https://static-cdn.jtvnw.net/emoticons/v1/{image_id}/2.0","large":"https://static-cdn.jtvnw.net/emoticons/v1/{image_id}/3.0"},"channels":{"ballewga_whale":{"title":"Ballewga_Whale","channel_id":64579478,"link":"http://twitch.tv/ballewga_whale","desc":null,"plans":{"$4.99":"232919","$9.99":"232920","$24.99":"232921"},"id":"ballewga_whale","first_seen":"2017-07-08 01:15:06","badge":null,"badge_starting":null,"badge_3m":null,"badge_6m":null,"badge_12m":null,"badge_24m":null,"badges":null,"bits_badges":null,"cheermote1":null,"cheermote100":null,"cheermote1000":null,"cheermote5000":null,"cheermote10000":null,"set":232919,"emotes":[{"code":"ballewThink","image_id":220565,"set":232919}]}}} rominirani/devlibrary { "version": 1, "source": "github", "authorIds": [ "sridhar-sp" ], "owner": "sridhar-sp", "repo": "compose-neumorphism", "name": "Neumorphism using Jetpack Compose", "shortDescription": "A Jetpack compose library to create \"Neumorphic\" design.\n\"Neumorphism\" is a ui design concept which uses shadows to create awesome looking UI.", "longDescription": "A Jetpack compose library to create \"Neumorphic\" design.\n\"Neumorphism\" is a ui design concept which uses shadows to create awesome looking UI.", "content": "README.md", "pages": [], "tags": [ "compose", "ui", "kotlin" ] } {"h":[{"d":[{"e":["`Sapikelikelingaw~ `ako~ `papipalalen~ `canira~.我想搖鈴弄醒他們。"],"f":"想搖鈴。"}]}],"tag":"[疊 2]","stem":"keling","t":"sapikelikelingaw"}{"id":"1038733","name":"hexo-theme-nexmoe","font_family":"nexmoefont","css_prefix_text":"icon-","description":"","glyphs":[{"icon_id":"10214","name":"图","font_class":"tuchong","unicode":"e600","unicode_decimal":58880},{"icon_id":"1314961","name":"站酷","font_class":"zhanku","unicode":"e66f","unicode_decimal":58991},{"icon_id":"13553517","name":"douban-fill","font_class":"douban-fill","unicode":"eeae","unicode_decimal":61102},{"icon_id":"13253915","name":"coffee","font_class":"coffee","unicode":"e618","unicode_decimal":58904},{"icon_id":"929736","name":"rss","font_class":"rss","unicode":"e7ea","unicode_decimal":59370},{"icon_id":"4936584","name":"app store-fill","font_class":"appstore-fill","unicode":"e853","unicode_decimal":59475},{"icon_id":"4936690","name":"mail-fill","font_class":"mail-fill","unicode":"e870","unicode_decimal":59504},{"icon_id":"1899043","name":"steam","font_class":"steam","unicode":"e722","unicode_decimal":59170},{"icon_id":"3743130","name":"battlenet","font_class":"battlenet","unicode":"e604","unicode_decimal":58884},{"icon_id":"3780842","name":"youtube","font_class":"youtube","unicode":"e733","unicode_decimal":59187},{"icon_id":"4766297","name":"area chart","font_class":"areachart","unicode":"e7af","unicode_decimal":59311},{"icon_id":"4904445","name":"telegram","font_class":"telegram","unicode":"e646","unicode_decimal":58950},{"icon_id":"4936957","name":"zhihu","font_class":"zhihu","unicode":"e87c","unicode_decimal":59516},{"icon_id":"4936984","name":"QQ","font_class":"QQ","unicode":"e882","unicode_decimal":59522},{"icon_id":"4936987","name":"weibo","font_class":"weibo","unicode":"e883","unicode_decimal":59523},{"icon_id":"4936991","name":"wechat-fill","font_class":"wechat-fill","unicode":"e884","unicode_decimal":59524},{"icon_id":"5004906","name":"Github","font_class":"github","unicode":"e60b","unicode_decimal":58891},{"icon_id":"6742514","name":"dribbble","font_class":"dribbble","unicode":"eaab","unicode_decimal":60075},{"icon_id":"6845552","name":"twitter","font_class":"twitter","unicode":"e617","unicode_decimal":58903},{"icon_id":"6978316","name":"bilibili_tv","font_class":"bilibili","unicode":"e60f","unicode_decimal":58895},{"icon_id":"4936512","name":"time-circle-fill","font_class":"time-circle-fill","unicode":"e848","unicode_decimal":59464},{"icon_id":"4936544","name":"calendar-fill","font_class":"calendar-fill","unicode":"e84d","unicode_decimal":59469},{"icon_id":"4936668","name":"eye-fill","font_class":"eye-fill","unicode":"e869","unicode_decimal":59497},{"icon_id":"4936683","name":"tag-fill","font_class":"tag-fill","unicode":"e86e","unicode_decimal":59502},{"icon_id":"4936685","name":"tags-fill","font_class":"tags-fill","unicode":"e86f","unicode_decimal":59503},{"icon_id":"4767011","name":"right","font_class":"right","unicode":"e7eb","unicode_decimal":59371},{"icon_id":"4767012","name":"left","font_class":"left","unicode":"e7ec","unicode_decimal":59372},{"icon_id":"4767059","name":"menu","font_class":"menu","unicode":"e7f5","unicode_decimal":59381},{"icon_id":"4767060","name":"unordered list","font_class":"unorderedlist","unicode":"e7f4","unicode_decimal":59380},{"icon_id":"4767094","name":"ellipsis","font_class":"ellipsis","unicode":"e7fc","unicode_decimal":59388},{"icon_id":"4765727","name":"info-circle","font_class":"info-circle","unicode":"e77e","unicode_decimal":59262},{"icon_id":"4766438","name":"container","font_class":"container","unicode":"e7b0","unicode_decimal":59312},{"icon_id":"4766685","name":"home","font_class":"home","unicode":"e7c6","unicode_decimal":59334}]}{ "citations" : [ { "textCitation" : "[See xpfi on Metamath](http://us.metamath.org/mpegif/xpfi.html)" } ], "names" : [ "xpfi" ], "language" : "METAMATH_SET_MM", "lookupTerms" : [ "#T_cA", "#T_wcel", "#T_cfn", "#T_wa", "#T_cB", "#T_wcel", "#T_cfn", "#T_wi", "#T_cA", "#T_cxp", "#T_cB", "#T_wcel", "#T_cfn" ], "metaLanguage" : "METAMATH", "remarks" : " The Cartesian product of two finite sets is finite. (Contributed by , 2-Sep-2009.) (Revised by , 12-Mar-2015.) ", "statement" : "xpfi $p |- ( ( A e. Fin /\\ B e. Fin ) -> ( A X. B ) e. Fin ) $." }{"word":"sinuate","definition":"Having the margin alternately curved inward and outward; having rounded lobes separated by rounded sinuses; sinuous; wavy.\n\nTo bend or curve in and out; to wind; to turn; to be sinusous. Woodward."}{ "key": "graph", "name": "Graph", "authors": [ " and a University of Notre Dame team; now maintained by and ." ], "description": "The BGL graph interface and graph components are generic, in the same sense as the the Standard Template Library (STL).", "category": [ "Algorithms", "Containers", "Iterators" ], "maintainers": [ " " ] } { "723253f-a": { "width": 600, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/723253f-a.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/723253f-a.jpg", "title": "", "date": "1930", "text": "49, 50 and 51-52 South Street, west side, north from Gouverneur (left) to Jone's (right) Lanes.\nOctober 13, 1930.\nP. L. Sperr.\n", "folder": "South Street & Gouverneur Lane, Manhattan, NY", "height": 398, "original_title": "Manhattan: South Street - Gouverneur Lane", "years": [ "1930" ] }, "723250f-a": { "width": 600, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/723250f-a.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/723250f-a.jpg", "title": "", "date": "1930", "text": "South Street, west side, south from, but not including Gouverneur Lane to O1d Slip, showing across the barge at the river's edge, the cleared area, upon which, the U.S. Assay Office building, will rise. Shown on the right, are Nos. 46 and 47 South Street; two 4-story buildings, just south of Gouverneur Lane. The white structure (left center), is the lst Precinct Police Station.\nDecember 13, 1930.\nP.L. Sperr.\n", "folder": "South Street & Gouvernueur Street, Manhattan, NY", "height": 402, "original_title": "Manhattan: South Street - Gouvernueur Street", "years": [ "1930" ] }, "723251f-a": { "width": 399, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/723251f-a.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/723251f-a.jpg", "title": "", "date": "1936", "text": "9 South Street, at the N.W. corner of Gouverneur Lane.\nApril 14, 1936.\nP. L. Sperr.\n", "folder": "South Street & Gouverneur Lane, Manhattan, NY", "height": 600, "original_title": "Manhattan: South Street - Gouverneur Lane", "years": [ "1936" ] }, "720026f-b": { "width": 398, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/720026f-b.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/720026f-b.jpg", "title": "", "date": "", "text": "(1)\nGouverneur Lane, west from, but not including South, to and including Water Streets.\nApril 14, 1936.\nP. L. Sperr.\n(2)\nThe same, slightly westward.\nNovember 10, 1930.\nP. L. Sperr.\n(3)\nThe same, from a point further westward. The view includes Front Street.\nNovember 10, 1930.\nP. L. Sperr.\n", "folder": "Gouverneur Lane & South Street, Manhattan, NY", "height": 600, "original_title": "Manhattan: Gouverneur Lane - South Street", "years": [ "" ] }, "720026f-c": { "width": 400, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/720026f-c.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/720026f-c.jpg", "title": "", "date": "", "text": "(1)\nGouverneur Lane, west from, but not including South, to and including Water Streets.\nApril 14, 1936.\nP. L. Sperr.\n(2)\nThe same, slightly westward.\nNovember 10, 1930.\nP. L. Sperr.\n(3)\nThe same, from a point further westward. The view includes Front Street.\nNovember 10, 1930.\nP. L. Sperr.\n", "folder": "Gouverneur Lane & South Street, Manhattan, NY", "height": 600, "original_title": "Manhattan: Gouverneur Lane - South Street", "years": [ "" ] }, "720026f-a": { "width": 402, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/720026f-a.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/720026f-a.jpg", "title": "", "date": "", "text": "(1)\nGouverneur Lane, west from, but not including South, to and including Water Streets.\nApril 14, 1936.\nP. L. Sperr.\n(2)\nThe same, slightly westward.\nNovember 10, 1930.\nP. L. Sperr.\n(3)\nThe same, from a point further westward. The view includes Front Street.\nNovember 10, 1930.\nP. L. Sperr.\n", "folder": "Gouverneur Lane & South Street, Manhattan, NY", "height": 600, "original_title": "Manhattan: Gouverneur Lane - South Street", "years": [ "" ] }, "723252f-a": { "width": 405, "thumb_url": "http://oldnyc-assets.nypl.org/thumb/723252f-a.jpg", "image_url": "http://oldnyc-assets.nypl.org/600px/723252f-a.jpg", "title": "", "date": "", "text": "South Street, west side, north from, but not including Gouverneur Lane. Prominent building shown is No. 120 Wall Street.\nMay 30, 1936.\nP. L. Sperr.\n", "folder": "South Street & Gouverneur Lane, Manhattan, NY", "height": 600, "original_title": "Manhattan: South Street - Gouverneur Lane", "years": [ "" ] } }{"appid": 575910, "name": "Trump Simulator 2017", "windows": true, "mac": false, "linux": false, "early_access": false, "lookup_time": 1491008471}{"packages":{"wpackagist-plugin\/the-loops":{"0.1":{"name":"wpackagist-plugin\/the-loops","version":"0.1","version_normalized":"0.1.0.0","uid":351296,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/the-loops.0.1.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/the-loops\/","reference":"tags\/0.1"},"homepage":"https:\/\/wordpress.org\/plugins\/the-loops\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"0.2":{"name":"wpackagist-plugin\/the-loops","version":"0.2","version_normalized":"0.2.0.0","uid":351297,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/the-loops.0.2.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/the-loops\/","reference":"tags\/0.2"},"homepage":"https:\/\/wordpress.org\/plugins\/the-loops\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"0.3":{"name":"wpackagist-plugin\/the-loops","version":"0.3","version_normalized":"0.3.0.0","uid":351298,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/the-loops.0.3.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/the-loops\/","reference":"tags\/0.3"},"homepage":"https:\/\/wordpress.org\/plugins\/the-loops\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"0.3.1":{"name":"wpackagist-plugin\/the-loops","version":"0.3.1","version_normalized":"0.3.1.0","uid":351299,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/the-loops.0.3.1.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/the-loops\/","reference":"tags\/0.3.1"},"homepage":"https:\/\/wordpress.org\/plugins\/the-loops\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"0.4":{"name":"wpackagist-plugin\/the-loops","version":"0.4","version_normalized":"0.4.0.0","uid":351300,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/the-loops.0.4.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/the-loops\/","reference":"tags\/0.4"},"homepage":"https:\/\/wordpress.org\/plugins\/the-loops\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.0.0":{"name":"wpackagist-plugin\/the-loops","version":"1.0.0","version_normalized":"1.0.0.0","uid":351301,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/the-loops.1.0.0.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/the-loops\/","reference":"tags\/1.0.0"},"homepage":"https:\/\/wordpress.org\/plugins\/the-loops\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.0.1":{"name":"wpackagist-plugin\/the-loops","version":"1.0.1","version_normalized":"1.0.1.0","uid":351302,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/the-loops.1.0.1.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/the-loops\/","reference":"tags\/1.0.1"},"homepage":"https:\/\/wordpress.org\/plugins\/the-loops\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"1.0.2":{"name":"wpackagist-plugin\/the-loops","version":"1.0.2","version_normalized":"1.0.2.0","uid":351303,"dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/the-loops.1.0.2.zip"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/the-loops\/","reference":"tags\/1.0.2"},"homepage":"https:\/\/wordpress.org\/plugins\/the-loops\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"},"dev-trunk":{"name":"wpackagist-plugin\/the-loops","version":"dev-trunk","version_normalized":"9999999-dev","uid":351304,"time":"2014-05-18 10:02:49","dist":{"type":"zip","url":"https:\/\/downloads.wordpress.org\/plugin\/the-loops.zip?timestamp=1400407369"},"source":{"type":"svn","url":"https:\/\/plugins.svn.wordpress.org\/the-loops\/","reference":"trunk"},"homepage":"https:\/\/wordpress.org\/plugins\/the-loops\/","require":{"composer\/installers":"~1.0"},"type":"wordpress-plugin"}}}}projects/lib/package.json { "name": "ngx-remotedata", "version": "3.0.3", "description": "RemoteData: Slaying a UI Antipattern with Angular", "repository": { "type": "git", "url": "https://github.com/joanllenas/ngx-remotedata" }, "keywords": [ "angular", "ngx", "angular2", "ng2", "pipes", "remote-data", "elm" ], "author": { "name": "", "email": "", "url": "http://joanllenas.com" }, "bugs": { "url": "https://github.com/joanllenas/ngx-remotedata/issues" }, "homepage": "https://github.com/joanllenas/ngx-remotedata#readme", "dependencies": { "tslib": "^2.0.0" }, "peerDependencies": { "@angular/common": ">=6", "@angular/core": ">=6" } } 0 { "_id": "1Bp9w3mHIGlo5s7e", "name": "", "type": "spell", "data": { "type": "", "ability": "", "abilityMods": { "parts": [] }, "actionTarget": "", "actionType": "save", "activation": { "type": "move", "condition": "", "cost": 1 }, "allowedClasses": { "myst": true, "tech": false, "wysh": false }, "area": { "effect": "", "shape": "", "units": "none", "value": null }, "attackBonus": 0, "chatFlavor": "", "concentration": false, "critical": { "effect": "", "parts": [] }, "damage": { "parts": [] }, "damageNotes": "", "description": { "chat": "", "gmnotes": "", "short": "", "unidentified": "", "value": "

You inspire a moment of confusion that causes a creature to forget what occurred over the last round if it fails its saving throw. If you have failed a skill check against that target since the beginning of your last turn, you can attempt that check again without a penalty (if you have the appropriate number of actions remaining), or simply avoid the normal consequences of that failed skill check. If the target was in the middle of a task that requires longer than a single round to accomplish, it must begin again. If you target a creature casting a spell with a casting time of 1 round or longer, they must attempt a second Will save to continue casting the spell or the spell fails.

\n

A creature you have injured in the last hour gains a +4 circumstance bonus to its Will saving throw.

" }, "descriptors": [], "dismissible": false, "duration": { "units": "", "value": "instantaneous" }, "formula": "", "isActive": null, "isVariableLevel": false, "level": 1, "materials": { "consumed": false, "cost": 0, "supply": 0, "value": "" }, "modifiers": [], "preparation": { "mode": "", "prepared": false }, "range": { "additional": "5 ft.", "per": "2 levels", "units": "ft", "value": 25 }, "rollNotes": "", "save": { "type": "will", "dc": "", "descriptor": "negate" }, "school": "enc", "source": "AP #25 pg. 53", "sr": true, "target": { "type": "", "value": "one living creature" }, "uses": { "max": null, "per": "", "value": null } }, "effects": [], "flags": { "core": {} }, "img": "systems/sfrpg/icons/spells/memory_lapse.webp" }{"component.js":","component.min.js":"}{ "vorgangId": "142876", "VORGANG": { "WAHLPERIODE": "12", "VORGANGSTYP": "Bericht, Gutachten, Programm", "TITEL": "Sozialbericht 1993 (G-SIG: 12002044)", "INITIATIVE": "Bundesregierung", "AKTUELLER_STAND": "Nicht abgeschlossen - Einzelheiten siehe Vorgangsablauf", "SIGNATUR": "", "GESTA_ORDNUNGSNUMMER": "", "WICHTIGE_DRUCKSACHE": [ { "DRS_HERAUSGEBER": "BR", "DRS_NUMMER": "250/94", "DRS_TYP": "Unterrichtung" }, { "DRS_HERAUSGEBER": "BT", "DRS_NUMMER": "12/7130", "DRS_TYP": "Unterrichtung", "DRS_LINK": "http://dipbt.bundestag.de:80/dip21/btd/12/071/1207130.pdf" } ], "PLENUM": { "PLPR_KLARTEXT": "BR-Sitzung", "PLPR_HERAUSGEBER": "BR", "PLPR_NUMMER": "672", "PLPR_SEITEN": "410B - 410C", "PLPR_LINK": "http://dipbt.bundestag.de:80/dip21/brp/672.pdf#P.410" }, "EU_DOK_NR": "", "SACHGEBIET": [ "Gesellschaftspolitik, soziale Gruppen", "Soziale Sicherung" ], "SCHLAGWORT": [ "Alter Mensch", "Arbeitsmarkt", "Ausländischer Arbeitnehmer", "Behinderter", "Deutsche Einheit", "Familienpolitik", "Frauenpolitik", "Gesundheitspolitik", "Jugendlicher", "Rehabilitation", { "_fundstelle": "true", "__cdata": "Sozialbericht der Bundesregierung" }, "Sozialbudget", "Sozialpolitik", "Sozialversicherung", "Steuerpolitik", "Wohnungsbau" ], "ABSTRAKT": "Maßnahmen im Bereich der Gesellschafts- und Sozialpolitik: deutsche Einheit im Sozialbereich, Arbeitsmarkt, Arbeitsrecht, Alterssicherung, Gesundheitswesen, Rehabilitation, soziale Sicherheit, Familien-, Frauen-, Jugend- und Seniorenpolitik, Ausländerbeschäftigung, Wohnungsbau, europäische und internationale Zusammenarbeit; Sozialbudget 1993: Grundannahmen, Ergebnisse, Definitionen und Rechtsgrundlagen " }, "VORGANGSABLAUF": { "VORGANGSPOSITION": [ { "ZUORDNUNG": "BR", "URHEBER": "Unterrichtung, Urheber : Bundesregierung ", "FUNDSTELLE": "22.03.1994 - BR-Drucksache 250/94\nAnl. Übersicht über Maßnahmen und Vorhaben; Auszug aus dem Materialband ", "ZUWEISUNG": { "AUSSCHUSS_KLARTEXT": "Ausschuss für Arbeit und Sozialpolitik", "FEDERFUEHRUNG": "federführend" } }, { "ZUORDNUNG": "BR", "URHEBER": "Empfehlungen der Ausschüsse, Urheber : Ausschuss für Arbeit und Sozialpolitik ", "FUNDSTELLE": "27.06.1994 - BR-Drucksache 250/1/94", "VP_ABSTRAKT": "Stellungnahme" }, { "ZUORDNUNG": "BR", "URHEBER": "Plenarantrag, Urheber : Thüringen ", "FUNDSTELLE": "07.07.1994 - BR-Drucksache 250/2/94", "VP_ABSTRAKT": "Stellungnahme" }, { "ZUORDNUNG": "BR", "URHEBER": "BR-Sitzung", "FUNDSTELLE": "08.07.1994 - BR-Plenarprotokoll 672, S. 410B - 410C", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/brp/672.pdf#P.410", "BESCHLUSS": { "BESCHLUSSSEITE": "410C", "BESCHLUSSTENOR": "Kenntnisnahme" } }, { "ZUORDNUNG": "BR", "URHEBER": "Beschlussdrucksache, Urheber : Bundesrat ", "FUNDSTELLE": "08.07.1994 - BR-Drucksache 250/94(B)" }, { "ZUORDNUNG": "BT", "URHEBER": "Unterrichtung, Urheber : Bundesregierung ", "FUNDSTELLE": "23.03.1994 - BT-Drucksache 12/7130", "FUNDSTELLE_LINK": "http://dipbt.bundestag.de:80/dip21/btd/12/071/1207130.pdf" } ] } } ekayis/Movie-App {"ast":null,"code":"var _jsxFileName = \"/Users/ertugrulkayis/Desktop/Movie-App/src/App.js\";\nimport React, { Component } from 'react';\nimport './App.css';\nimport 'semantic-ui-css/semantic.min.css';\nimport { Container } from 'semantic-ui-react';\nimport MoviesPage from './components/pages/MoviesPage';\nimport Footer from './components/Footer';\nimport Header from './components/Header';\nimport { Link, Route } from 'react-router-dom';\n\nclass App extends Component {\n render() {\n return React.createElement(\"div\", {\n className: \"App\",\n __source: {\n fileName: _jsxFileName,\n lineNumber: 18\n },\n __self: this\n }, React.createElement(Header, {\n __source: {\n fileName: _jsxFileName,\n lineNumber: 20\n },\n __self: this\n }), React.createElement(Container, {\n text: true,\n __source: {\n fileName: _jsxFileName,\n lineNumber: 21\n },\n __self: this\n }, React.createElement(Route, {\n path: \"/movies\",\n component: MoviesPage,\n __source: {\n fileName: _jsxFileName,\n lineNumber: 24\n },\n __self: this\n }), React.createElement(\"hr\", {\n __source: {\n fileName: _jsxFileName,\n lineNumber: 26\n },\n __self: this\n }), React.createElement(\"div\", {\n __source: {\n fileName: _jsxFileName,\n lineNumber: 27\n },\n __self: this\n }, \"askldjfaslkdfjasldkfjsdlkf\")), React.createElement(Footer, {\n __source: {\n fileName: _jsxFileName,\n lineNumber: 31\n },\n __self: this\n }));\n }\n\n}\n\nexport default App;","map":{"version":3,"sources":["/Users/ertugrulkayis/Desktop/Movie-App/src/App.js"],"names":["React","Component","Container","MoviesPage","Footer","Header","Link","Route","App","render"],"mappings":";AAAA,OAAOA,KAAP,IAAgBC,SAAhB,QAAiC,OAAjC;AACA,OAAO,WAAP;AACA,OAAO,kCAAP;AAEA,SACCC,SADD,QAEO,mBAFP;AAIA,OAAOC,UAAP,MAAuB,+BAAvB;AACA,OAAOC,MAAP,MAAmB,qBAAnB;AACA,OAAOC,MAAP,MAAmB,qBAAnB;AAEA,SAASC,IAAT,EAAeC,KAAf,QAA4B,kBAA5B;;AAEA,MAAMC,GAAN,SAAkBP,SAAlB,CAA4B;AAC1BQ,EAAAA,MAAM,GAAG;AACT,WACI;AAAK,MAAA,SAAS,EAAC,KAAf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAEE,oBAAC,MAAD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAFF,EAGF,oBAAC,SAAD;AAAW,MAAA,IAAI,MAAf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAGC,oBAAC,KAAD;AAAO,MAAA,IAAI,EAAC,SAAZ;AAAsB,MAAA,SAAS,EAAEN,UAAjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAHD,EAKM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MALN,EAMM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oCANN,CAHE,EAaF,oBAAC,MAAD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAbE,CADJ;AAiBC;;AAnByB;;AAsB5B,eAAeK,GAAf","sourcesContent":["import React, { Component } from 'react';\nimport './App.css';\nimport 'semantic-ui-css/semantic.min.css';\n\nimport {\n\tContainer\n} from 'semantic-ui-react';\n\nimport MoviesPage from './components/pages/MoviesPage';\nimport Footer from './components/Footer';\nimport Header from './components/Header';\n\nimport { Link, Route } from 'react-router-dom';\n\nclass App extends Component {\n render() {\n\t\treturn (\n
\n\t\t\t\t\n
\n\t\t\t\t\n\t\t\t\t\t\n\n\t\t\t\t\t\n\n
\n
askldjfaslkdfjasldkfjsdlkf
\n\n
\n\n\t\t\t\t
\n
\n );\n }\n}\n\nexport default App;\n"]},"metadata":{},"sourceType":"module"}1-10 {"name":" Priv. Gymnasium für Jungen und Mädchen","id":"NRW-168040","address":"Schloßstraße 1, 59073 Hamm","school_type":"Gymnasium","fax":"02381 685155","phone":"02381 6850","website":" http://www.schloss-heessen.de","email":" ","state":"NRW","programs":{"programs":["Be smart - don't Start"]},"full_time_school":false,"lon":7.847198,"lat":51.69917} { "qText": { "desc": "Text related to the attribute expression value.\nThis property is optional. No text is returned if the attribute expression value is a numeric.\n", "class": "String", "type": "primitive" }, "qNum": { "desc": "Numeric value of the attribute expression.\nThis property is set to NaN (Not a Number) if the attribute expression value is not a numeric.\nNumerical values are not returned as text.\n", "class": "Double precision floating point", "type": "primitive" } }1-10 {"id": 10338, "date": "2013-05-09 15:52:04", "user": "RobertClarke", "post": "Robert here, bringing you two new fantastic LEB deals, for 256mb and 512mb servers. These servers are great for IRC bouncers, DNS, and other light-weight tasks, but also run well in a variety of other tasks, being powered by fast LSI RAID 10 arrays, and Intel E3v2 CPUs. All VPSs are deployed in Dallas, in either the CoreXChange, Equinix and or Telx datacenters. Servers are equipped with redundant networking and redundant power.\r\n\r\n**256mb deal**\r\n- 256 RAM + 256 VSwap\r\n- 1,000GB bandwidth @ 1Gbp/s\r\n- 5GB RAID 10 SSD space\r\n- 1 E3v2 core (fair use)\r\n- 1 IPv4 address\r\n- Unmanaged\r\n- OpenVZ\r\n- Price: $2.95/mo\r\n**[Order here.](https://billing.servercrate.com/cart.php?a=add&pid=21)**\r\n\r\n**512mb deal**\r\n- 512 RAM + 512 VSwap\r\n- 2,000GB bandwidth @ 1Gbp/s\r\n- 10GB RAID 10 SSD space\r\n- 1 E3v2 core (fair use)\r\n- 1 IPv4 address\r\n- Unmanaged\r\n- OpenVZ\r\n- Price: $3.95/mo\r\n**[Order here.](https://billing.servercrate.com/cart.php?a=add&pid=14)**\r\n\r\n**Addons**\r\nAdditional IPv4 addresses are $1/IPv4.\r\nAdditional bandwidth is $5/TB.\r\n\r\n**Test IPs/100mb test file/Looking glass**\r\n172.16.31.10\r\nhttp://172.16.31.10/100MB.test\r\nhttp://lg.servercrate.com/\r\n\r\n**Operating Systems**\r\nCustomers can change their VPS operating system in the control panel. We support a variety of popular Linux OSs including CentOS, Ubuntu, and Fedora.\r\n\r\n**Control Panel**\r\nServerCrate utilizes the SolusVM control panel.\r\n\r\n**Payment Methods**\r\nYou can pay for your hosting service with Paypal, and now, bitcoins.\r\n\r\nThank you for looking, you can find our regular offerings on our website."}i18n/esn/src/vs/workbench/api/browser/viewsContainersExtensionPoint.i18n.json { "": [ "--------------------------------------------------------------------------------------------", "Copyright (c) Microsoft Corporation. All rights reserved.", "Licensed under the MIT License. See License.txt in the project root for license information.", "--------------------------------------------------------------------------------------------", "Do not edit this file. It is machine generated." ], "vscode.extension.contributes.views.containers.id": "Identificador único utilizado para identificar el contenedor en el que se pueden aportar vistas mediante el punto de contribución \"vistas\"", "vscode.extension.contributes.views.containers.title": "Cadena de texto en lenguaje natural usada para mostrar el contenedor. ", "vscode.extension.contributes.views.containers.icon": "Ruta para el icono del contenedor. Los iconos son de 24x24 centrados en un cuadrado 50x40 y tienen un color de relleno de ' RGB (215, 218, 224) ' o ' #d7dae0 '. Se recomienda que los iconos estén en SVG, aunque se acepte cualquier tipo de archivo de imagen.", "vscode.extension.contributes.viewsContainers": "Contribuye con vistas de contenedores al editor ", "views.container.activitybar": "Contribuir vistas de contenedores a la barra de actividades", "test": "Prueba", "proposed": "la contribución ' viewsContainers ' sólo está disponible cuando se está ejecutando fuera de desarrollo con el siguiente modificador de línea de comandos: --enable-proposed-API {0}", "requirearray": "contenedores de vistas deben ser una matriz", "requireidstring": "la propiedad ' {0} ' es obligatoria y debe ser de tipo 'String'. Sólo se permiten letras alfanuméricas, ' _ ', '-'.", "requirestring": "la propiedad `{0}` es obligatoria y debe ser de tipo \"string\"", "showViewlet": "Mostrar {0}", "view": "Ver" }{"artist_id":"ARDDWUI1187FB379F7","artist_latitude":60.17116,"artist_location":"Helsinki, Finland","artist_longitude":24.93258,"artist_name":"Amoral","duration":306.38975,"num_songs":1,"song_id":"SOAEHWD12A58A7E36C","title":"Metamorphosis","year":2001}dataset/khvn/19860715.json version https://git-lfs.github.com/spec/v1 oid sha256:181066bafa675abc3832fa75e15efd0f7f8f82bf8f81c625b29af7f0487b3f67 size 14019 1-10 { "id": "d524-82", "text": "'S3u;| uojssiuisuDjj jjoddns suuinjos pstpjy\nk\nuo;40AJ3sqo uiooj |OJ4uo3 puo aBuno-|\n•dsjd uojjDAjasqo iuojj j344;uisudj4 40 joey\napouauiojd uoij\njsUiuisudj; jo |uojj puo uiooj (ojfuo^\n•*3np |otaW-Nna ONiaiM -uaAup\n-auiBua sdB 'yAX F6-a3MOd ADN30H3W3 \\,%Z-S3NI1 NOIS\n-SIWSNVU1 •uoiiaas ssoja uuojjun jo|n6uo;j4 ',()*■£-£—SVNN31NV\ns3A-ldnD3 lOdlNOD VNN3INV WV MX 0S-a311IWSNVai\n3Ninno lNiwdinoa\n'Bujp|mq 3j[4u3 joj jid pajaqij p3||OJ4U03-X|ip!uinq .'dsjd\n|3uuosjdd 6ui4DJ3do joj uiajsXs uo;|Dj36uj3jj—ONINOliiaNOD\nHIV -uistsXs J3JDM 40H-ONI1V3H ‘luauissoa pa+S-SMOONIM\n•3|j4 uinsdXQ—NOIlVinSNI ‘dn^ing—JOOd -3*aJ3uo3 pssjojuisy\n-Wld3IVW lVaniDfimS -aja^uos p33JOjui3jj—(sioiIVQNnOd\n3NllJ.no NOIlDnmSNOD\n*Buip|mq Bu;ao3| S3ui| uoissjuisudjj joj sjjoddns\n3AJ43DJ44D SO 3AJ3S SUUin|03 p3q3jy \"JID 3l|4 UO Sj UOj4D|S 3||qM\n4U3uidmb3 Bu;44;uisudj4 jo S4jun ||D ms|a 04 SJ04JSIA smo||d suiooj |OJ4\n-U03 puo j344iujsudj4 6u;punojjns spousiuojd uoj4DAj3sqo jD|n3j;3\nS3UniV3i\n•uo|d Joojj 335—dOOld ONOD3S ‘sosjo ssiajss puo S4|noA\nJ3UIJOJSUDJ4 'sBdjdq—jjoOld lSdld ’Z-SdOOld ‘ON ON-1N3W\n-3SVa Wl-nma 'U!»«W D aBjoso-aoiDVaiNOD -6UIUJ03\nuo4Jng -g-lDSlIHDUV tunH 'W apXp--QN3 d3IHD •puBMjna f\n|JDD —-aOW 1/N30 -:>ul 'luatsXg Bu^sDspoojg oiquin^—H3NMO\nJ043340JJ 34jjXq_j—y\n(MX 0?) |!°D uo|4opjD43jj—o\n(MX Off) JStMPSd s°!9-N\njaiujojsuojj^ 36d4|oA qB|H—W\n(MX S) l!°D uoj4DpjD43i|-i\n(MX S) Jasu3puo3 J34|!d-X\njaUlPay a6»i|OA h6;h—r\n(MX OS) Jasu3puo3 Ja4|!d~l\n(MXff) soia-H\n43uiqD3 uoi4nqiJ4s;Q jsmoj—Q\n4U3uid;nb3 10J4U03 ouu34uy—j\nJ3ljj|diuy J3MOJ—3\nNVId aooid CJNOD3S\nJ3jj||duiy 6uj4D|npoyy—q\nJ3jj||diuy jo4D||I3SO—0\n4!Ufl |0J4U03—g\n4U3Uidmb3 4S3J. puo oipny—y\nA3X !N3Wdin03\nsoNianna ONiaNvismo-Ai noiid3S" }g0v/amis-moedict {"t":"milafinan","h":[{"d":[{"f":"(在某處)過夜了,住宿過。","e":["`Milafin~`a~`n~ `niyam~ `toya~ `toloay~ `a~ `dadaya~ `i~ `pala~.我們野營三個晚上了。"]},{"f":"指過夜的、住宿的、留宿的地方。","e":["`Oya~ `niyaro'~ `ko~ `milafin~`a~`n~ `naira~.他們借宿的是那個村子。"]}]}],"stem":"lafin"}{"Department":"Новомосковська місцева прокуратура Дніпропетровської області","Name":"","Position":"прокурор Новомосковської місцевої прокуратури Дніпропетровської області","Region":"Дніпропетровська область","analytics":[{"c":1,"fi":15082,"i":109047,"y":2015},{"c":1,"fh":28.35,"fha":1,"fi":25332,"i":151786,"k":13.53,"ka":1,"y":2016},{"c":1,"fh":28.35,"fha":1,"fi":96740,"i":264452,"k":13.53,"ka":1,"y":2017},{"c":1,"fh":28.35,"fha":1,"fi":355529,"i":376629,"k":13.53,"ka":1,"m":87700.8,"y":2018},{"c":1,"fi":3725643,"h":9.64,"ha":1,"k":13.53,"ka":1,"m":100988.8,"y":2019}],"declarationsLinks":[{"id":"nacp_66c9f30a-f606-44cb-a396-537b321dedee","provider":"declarations.com.ua.opendata","year":2015},{"id":"nacp_ae4ee021-cb22-4040-905c-61a958235638","provider":"declarations.com.ua.opendata","year":2016},{"id":"nacp_94aee874-7698-4faf-801f-5ec1ce23becf","provider":"declarations.com.ua.opendata","year":2017},{"id":"nacp_cd4127ef-5578-4258-b456-0c549cec94fa","provider":"declarations.com.ua.opendata","year":2018},{"id":"nacp_a6c48071-5507-448a-b2cd-9cece322fa5a","provider":"declarations.com.ua.opendata","year":2019}],"key":"santalova_viktoriya_rudolfivna","type":"prosecutor","Декларації 2013":"","Декларації 2014":"","Декларації 2015":"https://public.nazk.gov.ua/declaration/66c9f30a-f606-44cb-a396-537b321dedee","Декларації 2016":"https://public.nazk.gov.ua/declaration/ae4ee021-cb22-4040-905c-61a958235638","Декларації доброчесності":"http://www.gp.gov.ua/integrity_profile/files/b9c8e402fe37f196ec26226f08af107e.pdf","Фото":"","Як живе":""}cblanquera/geoph {"type":"Feature","properties":{"type":"barangay","level":"4","label":"Caang, Oslob, Cebu, Central Visayas (Region VII), PH","locale":"ph.central-visayas-region-vii.cebu.oslob.caang","country_id":177,"country_reference":177,"country_name":"Philippines","region_id":"7","region_reference":"7","region_name":"Central Visayas (Region VII)","province_id":"25","province_reference":"25","province_name":"Cebu","city_id":"945","city_reference":"475","city_name":"Oslob","barangay_id":"27039","barangay_reference":"11585","barangay_name":"Caang"},"geometry":{"type":"MultiPolygon","coordinates":[[[[123.365494,9.53793],[123.393509,9.52054],[123.39283,9.51923],[123.389374,9.51337],[123.384277,9.50455],[123.358543,9.51868],[123.360107,9.52311],[123.365494,9.53793]]]]}}0 { "name": "fastify-error-page", "version": "2.0.0", "description": "The developer error page - print errors in structured HTML to the browser", "main": "index.js", "scripts": { "lint": "eslint ./**/*.js", "patch": "np patch --yolo", "minor": "np minor --yolo", "major": "np major --yolo" }, "repository": { "type": "git", "url": "git+https://github.com/hemerajs/fastify-error-page.git" }, "author": " <> (http://www.starptech.de)", "license": "ISC", "bugs": { "url": "https://github.com/hemerajs/fastify-error-page/issues" }, "homepage": "https://github.com/hemerajs/fastify-error-page#readme", "dependencies": { "fastify-plugin": "^0.2.2", "youch": "^2.0.7" }, "devDependencies": { "eslint": "^4.18.2", "eslint-config-hemera": "0.0.2", "fastify": "^1.0.0", "np": "^2.20.1", "prettier": "^1.11.1" } } { "Name": "Sporttasche", "ShortName": "STasche", "Description": "Eine Sporttasche, welche früher von Fitnessbegeisterten oder Fernreisenden benutzt wurde.\nNun eine typische Plündererausrüstung." }{"AGE_AT_FIRST_EXPOSURE":{"CATEGORY":["FEMALE","MALE"],"MIN_VALUE":[0,0],"P10_VALUE":[0,0],"P25_VALUE":[0,0],"MEDIAN_VALUE":[0,0],"P75_VALUE":[0,0],"P90_VALUE":[0,0],"MAX_VALUE":[0,0]},"DAYS_SUPPLY_DISTRIBUTION":{"CATEGORY":[],"MIN_VALUE":[],"P10_VALUE":[],"P25_VALUE":[],"MEDIAN_VALUE":[],"P75_VALUE":[],"P90_VALUE":[],"MAX_VALUE":[]},"DRUGS_BY_TYPE":{"CONCEPT_NAME":"Prescription written","COUNT_VALUE":382},"PREVALENCE_BY_GENDER_AGE_YEAR":{"TRELLIS_NAME":[],"SERIES_NAME":[],"X_CALENDAR_YEAR":[],"Y_PREVALENCE_1000PP":[]},"PREVALENCE_BY_MONTH":{"X_CALENDAR_MONTH":[],"Y_PREVALENCE_1000PP":[]},"DRUG_FREQUENCY_DISTRIBUTION":{"Y_NUM_PERSONS":[0,0,0,0,0,0],"X_COUNT":[1,2,3,4,5,6]},"QUANTITY_DISTRIBUTION":{"CATEGORY":"Quantity","MIN_VALUE":0.075,"P10_VALUE":0.25,"P25_VALUE":0.34,"MEDIAN_VALUE":0.5,"P75_VALUE":1.15,"P90_VALUE":1.73,"MAX_VALUE":3.85},"REFILLS_DISTRIBUTION":{"CATEGORY":[],"MIN_VALUE":[],"P10_VALUE":[],"P25_VALUE":[],"MEDIAN_VALUE":[],"P75_VALUE":[],"P90_VALUE":[],"MAX_VALUE":[]}} { "name": "broadcast-address", "version": "1.0.2", "description": "A simple utility tool to quickly calculate and return the broadcast address for a particular network interface.", "main": "broadcast-address.js", "directories": { "test": "tests" }, "scripts": { "test": "clear && mocha tests/*.spec.js" }, "repository": { "type": "git", "url": "git+https://github.com/aal89/broadcast-address.git" }, "keywords": [ "simple", "small", "lightweight", "typescript", "calculate", "broadcast", "address", "network", "interface", "cidr", "subnet", "ipv4" ], "author": "Alex", "license": "MIT", "bugs": { "url": "https://github.com/aal89/broadcast-address/issues" }, "homepage": "https://github.com/aal89/broadcast-address#readme", "devDependencies": { "chai": "^4.2.0", "mocha": "^5.2.0" }, "dependencies": { "has-deep-value": "1.1.0" } } { "kind": "Property", "name": "PublicKeyCredentialCreationOptions.challenge", "href": "https://developer.mozilla.org/en-US/docs/Web/API/PublicKeyCredentialCreationOptions/challenge", "description": "The challenge property of the PublicKeyCredentialCreationOptions dictionary is a BufferSource used as a cryptographic challenge. This is randomly generated then sent from the relying party's server. This value (among other client data) will be signed by the authenticator, using its private key, and must be sent back for verification to the server as part of AuthenticatorAttestationResponse.attestationObject.", "refs": [ { "name": "Web Authentication: An API for accessing Public Key Credentials Level 1", "href": "https://w3c.github.io/webauthn/#dom-publickeycredentialcreationoptions-challenge", "description": "challenge - Web Authentication: An API for accessing Public Key Credentials Level 1" } ] } {"errors":{},"id":2641409,"source_code":"UN","source_name":"United Nations","code":"POP_22_CAN_FEM_URB_50","name":"Population - Female / Age 50 / Urban Areas - Canada","urlize_name":"Population-Female-Age-50-Urban-Areas-Canada","description":"The United Nations Demographic Yearbook collects, compiles and disseminates official statistics on a wide range of topics. Data have been collected from national statistical authorities since 1948 through a set of questionnaires dispatched annually by the United Nations Statistics Division to over 230 national statistical offices. The Demographic Yearbook disseminates statistics on population size and composition, births, deaths, marriage and divorce on an annual basis. Demographic Yearbook special topics issues cover a wide range of additional topics including economic activity, educational attainment, household characteristics, housing, ethnicity and language, among others.","updated_at":"2013-12-29T04:59:17Z","frequency":"annual","from_date":"1982-12-31","to_date":"2006-12-31","column_names":["Year","Number"],"private":false,"type":null,"display_url":"http://unstats.un.org/unsd/demographic/products/dyb/dybcensusdata.htm"}33kk/uso-archivedata/uso-styles/4816.json { "id": 4816, "name": "painted black - phpinfo", "description": "phpinfo()\r\n\r\n\r\nversion 0.0.1\r\n\r\n\r\n--[description]\r\n\r\nAs a userstyle, for dealing with ones which aren't your own script. If it's your phpinfo script, you should probably just follow the URL below, and use that to see how to exclude default CSS, and paste this in its place.\r\n\r\nhttp://us3.php.net/manual/en/function.phpinfo.php#77705\r\n\r\nOf course, I intentionally blanked the class=\"v\" text for the screenshots.\r\n\r\n\r\n--[relevant]\r\n\r\nSee also:\r\nhttp://userstyles.org/styles/search/painted+black", "user": { "id": 3059, "name": " (Honestly Illustrated)", "email": "redacted", "paypal_email": "", "homepage": "http://honestlyillustrated.com", "about": "

Web and Graphic Design hobbyist and privately-trained student since c. 1996, using Photoshop since 1999.

\r\n\r\n

Author of the second thorough Stylish tutorial, in late 2007 — with credit to user 552 for the first tutorial and inspiration.

\r\n\r\n

Updated:

", "license": null }, "updated": "2013-05-18T16:12:02.000Z", "weekly_install_count": 0, "total_install_count": 604, "rating": null, "after_screenshot_name": "https://userstyles.org/style_screenshots/4816_after.png?r=1604650040", "obsoleting_style_id": null, "obsoleting_style_name": null, "obsolete": 0, "admin_delete_reason_id": null, "obsoletion_message": null, "screenshots": null, "license": null, "created": "2008-01-16T20:02:48.000Z", "category": "site", "raw_subcategory": "php", "subcategory": "php", "additional_info": null, "style_tags": [], "css": "@namespace url(http://www.w3.org/1999/xhtml);\r\n\r\n@-moz-document regexp(\".*/phpinfo.php\")\r\n{\r\n body\r\n {\r\n background-color: #222 !important;\r\n color: #bbb !important;\r\n }\r\n body, td, th, h1, h2\r\n {\r\n font-family: sans-serif !important;\r\n }\r\n pre \r\n {\r\n margin: 0px !important;\r\n font-family: monospace !important;\r\n }\r\n a:link \r\n {\r\n background-color: transparent !important;\r\n color: #9B9D73 !important;\r\n text-decoration: underline !important;\r\n }\r\n a:hover \r\n {\r\n color: #fff !important;\r\n text-decoration: overline underline !important;\r\n }\r\n table \r\n {\r\n border-collapse: collapse !important;\r\n }\r\n table[width=\"600\"]\r\n {\r\n width: 100% !important;\r\n }\r\n .center \r\n {\r\n text-align: center !important;\r\n }\r\n .center table \r\n {\r\n margin-left: auto !important;\r\n margin-right: auto !important;\r\n text-align: left !important;\r\n }\r\n .center th \r\n {\r\n text-align: center !important;\r\n }\r\n td, th \r\n {\r\n border: 1px solid #000000 !important;\r\n font-size: 75% !important;\r\n vertical-align: baseline !important;\r\n }\r\n h1 \r\n {\r\n font-size: 150% !important;\r\n margin: 1px !important;\r\n padding: 0 !important;\r\n }\r\n h2 \r\n {\r\n font-size: 125% !important;\r\n margin: 1px !important;\r\n padding: 0 !important;\r\n }\r\n .p \r\n {\r\n text-align: left !important;\r\n }\r\n .e \r\n {\r\n background-color: #555 !important;\r\n font-weight: bold !important;\r\n color: #ddd !important;\r\n width: 200px !important;\r\n }\r\n .h \r\n {\r\n background-color: #676F76 !important;\r\n font-weight: bold !important;\r\n color: #ddd !important;\r\n }\r\n .v \r\n {\r\n background-color: #3a3a3a !important;\r\n color: #bbb !important;\r\n }\r\n i \r\n {\r\n background-color: transparent !important;\r\n color: #000 !important;\r\n }\r\n img \r\n {\r\n float: right !important;\r\n border: 0px !important;\r\n }\r\n img[alt=\"PHP Logo\"],\r\n img[alt=\"Zend logo\"]\r\n {\r\n display: none !important;\r\n }\r\n hr \r\n {\r\n width: 100% !important;\r\n background-color: #000 !important;\r\n border: 0px !important;\r\n height: 1px !important;\r\n color: #000000 !important;\r\n }\r\n}", "discussions": [], "discussionsCount": 0, "commentsCount": 0, "userjs_url": "/styles/userjs/4816/painted-black-phpinfo.user.js", "style_settings": [] }Sophize/set_mm { "citations" : [ { "textCitation" : "[See eqeqan12d on Metamath](http://us.metamath.org/mpegif/eqeqan12d.html)" } ], "names" : [ "eqeqan12d" ], "language" : "METAMATH_SET_MM", "lookupTerms" : [ "#T_wph", "#T_wi", "#T_cA", "#T_wceq", "#T_cB", "#T_wps", "#T_wi", "#T_cC", "#T_wceq", "#T_cD", "#T_wph", "#T_wa", "#T_wps", "#T_wi", "#T_cA", "#T_wceq", "#T_cC", "#T_wb", "#T_cB", "#T_wceq", "#T_cD" ], "metaLanguage" : "METAMATH", "remarks" : " A useful inference for substituting definitions into an equality. (Contributed by NM, 9-Aug-1994.) (Proof shortened by , 25-May-2011.) (Proof shortened by , 20-Nov-2019.) \n\n---\n\n A useful inference for substituting definitions into an equality. (Contributed by NM, 9-Aug-1994.) (Proof shortened by , 25-May-2011.) ", "statement" : "eqeqan12d.1 $e |- ( ph -> A = B ) $.\neqeqan12d.2 $e |- ( ps -> C = D ) $.\neqeqan12d $p |- ( ( ph /\\ ps ) -> ( A = C <-> B = D ) ) $." }{"intents": [ {"tag": "Capital", "patterns": ["¿Cuál es la capital de Fráncia?", "¿Sabes cuál es la capital para España?", "En las capitales del mundo, ¿Cuál le pertenece a Inglaterra?", "Si te pregunto por la capital de México, ¿Cuál es?", "Háblame de cuál es la capital de Canadá"], "responses": ["La capital es..."] }, {"tag": "Raiz_Cuadrada", "patterns": ["¿Cuál es la raíz cuadrada de 12", "¿Puedes calcular la raíz cuadrada de 34.67?", "Dame la raíz cuadrada de 25", "A que no calculas la raíz de 34.56", "Calcúlame la raiz de 125.90"], "responses": ["La raíz cuadrada es..."] }, {"tag": "Idioma_Austria", "patterns": ["¿Qué idioma se habla en Austria?", "¿En Austria qué idioma hablan?", "¿Sabes cuál es el idioma hablado en austria?", "¿En qué idioma me hablan si viajo a Austria?"], "responses": ["En austria las persona hablan principalmente Alemán, aunque algunas personas hablan Francés", "Las personas en Austria hablan Alemán o Francés dependiendo de la región en la que se encuentren", "Alemán o Francés, aunque se odian las personas que hablan uno con las que hablan otro"] }, {"tag": "Animal_mas_grande", "patterns": ["¿Cuál es el animal más grande del mundo?", "Sabes cuál es el animal más grande en el mundo?", "¿En todo el mundo cuál es el animál más grande?", "Dime cuál animal es más grande en todo el planeta"], "responses": ["El animan más grande en todo el mundo es la ballena azul", "El animal más grande en el mundo conocido por el hombre es la ballena azul", "Ese animal sería la ballena azul"] }, {"tag": "ADN", "patterns": ["¿Qué significa ADN?", "¿Qué significan las siglas de ADN?", "¿Las siglas de AND para qué son utilizadas?", "¿Me puedes decir qué significa ADN?"], "responses": ["Significa Ácido Desoxiribonucléico", "Esa palabra es complicada, pero significa Ácido Desoxiribonucléico", "Pues significa Ácido Desoxiribonucléico"] }, {"tag": "Nombre", "patterns": ["¿Cómo te llamas?", "¿Cuál es tu nombre?", "¿Cómo te puedo llamar?", "¿Con quién hablo?", "¿A qué nombre respondes?", "¿Tienes algún nombre?", "¿Cómo te puedo llamar?", "¿Cómo te nombraron?"], "responses": ["Hola soy el ChatBot de ACTUMLOGOS, ¿Qué tal te va?", "Hola, mi nombre es ACTUMLOGOS, ¿Cómo estás?"] }, {"tag": "Paises_Europa", "patterns": ["¿Cuántos países hay en Europa?", "¿Cuantos paises conforman la unión Europea?", "¿Con cuantos paises cuenta europa?", "Cantidad de paises en Europa", "Cantidad de países que hay en la unión europea", "Países europeos"], "responses": ["Claro, en Europa hay 49 países", "Existen 49 países que conforman la unión europea", "La unión europea se conforma por 49 paises distintos", "Si vas a Europa podrás visitar 49 países"] }, {"tag": "Critobal_Colon", "patterns": ["¿Quién descubrió América?", "¿Sabes quién es el descubridor de América?", "¿A quién se le atribuye el descubrimiento de América?", "¿Háblame sobre el descubrimiento de América?", "¿Cuándo fue descubierta América?"], "responses": ["Cristobal Colón descubrió el 12 de Octubre de 1492", "Fue el 12 de Octubre de 1492 cuando critobal colón descubrió América"] } ] }{ "id": 7584, "title": [ "[Ant Nest, Tunnels]" ], "description": [ "The passage ends here in a wall of solid rock, but you notice a tunnel winding up through the dirt next to the slab. Piles of refuse, glowing with the greenish lichen that has spread over them, litter the ground at your feet." ], "paths": [ "Obvious exits: southwest" ], "location": "the dark caverns", "wayto": { "7582": "go tunnel", "7585": "southwest" }, "timeto": { "7582": 0.2, "7585": 0.2 }, "image": "wl-catacombs-1264234799.png", "image_coords": [ 135, 1048, 145, 1058 ] }{"ast":null,"code":"var _jsxFileName = \"/Users/enya/Desktop/style_me_react/client/src/components/Header/index.js\";\nimport React from \"react\";\nimport { Link } from \"react-router-dom\";\nimport \"./style.css\";\nimport { jsxDEV as _jsxDEV } from \"react/jsx-dev-runtime\";\nimport { Fragment as _Fragment } from \"react/jsx-dev-runtime\";\n\nfunction Header() {\n return /*#__PURE__*/_jsxDEV(_Fragment, {\n children: /*#__PURE__*/_jsxDEV(Link, {\n to: \"/home\",\n className: \"logo logo_container\",\n children: \"Style Me\"\n }, void 0, false, {\n fileName: _jsxFileName,\n lineNumber: 8,\n columnNumber: 7\n }, this)\n }, void 0, false);\n}\n\n_c = Header;\nexport default Header;\n\nvar _c;\n\n$RefreshReg$(_c, \"Header\");","map":{"version":3,"sources":["/Users/enya/Desktop/style_me_react/client/src/components/Header/index.js"],"names":["React","Link","Header"],"mappings":";AAAA,OAAOA,KAAP,MAAkB,OAAlB;AACA,SAASC,IAAT,QAAqB,kBAArB;AACA,OAAO,aAAP;;;;AAEA,SAASC,MAAT,GAAkB;AAChB,sBACE;AAAA,2BACE,QAAC,IAAD;AAAM,MAAA,EAAE,EAAC,OAAT;AAAiB,MAAA,SAAS,EAAC,qBAA3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AADF,mBADF;AAOD;;KARQA,M;AAUT,eAAeA,MAAf","sourcesContent":["import React from \"react\";\nimport { Link } from \"react-router-dom\";\nimport \"./style.css\";\n\nfunction Header() {\n return (\n <>\n \n Style Me\n \n \n );\n}\n\nexport default Header;\n"]},"metadata":{},"sourceType":"module"}translations/fr/flarum-nicknames.json { "flarum-nicknames": { "admin": { "permissions": { "edit_own_nickname_label": "Modifier son pseudonyme" }, "settings": { "max_label": "Longueur maximale du pseudonyme", "min_label": "Longueur minimale du pseudonyme", "random_username_label": "Rendre les noms d'utilisateur aléatoires", "random_username_help": "Ceci masquera le champ `username` lors de l'inscription, et utilisera un nombre aléatoire à la place. Cela rendra également le champ `nickname` obligatoire. Cette option ne prendra effet que si le réglage « Permettre de définir des surnoms lors de l'inscription » est activé.", "regex_label": "Expression régulière pour la validation", "set_on_registration_label": "Permettre de définir des surnoms lors de l'inscription", "unique_label": "Exiger des pseudonymes uniques ?" }, "wrong_driver": "Vous devez sélectionner \"nickname\" comme pilote de nom d'affichage sur la page Essentiels pour que cette extension prenne effet." }, "api": { "invalid_nickname_message": "Ce pseudonyme n'est pas valide. Veuillez contacter l'administrateur de votre forum pour plus d'informations sur les règles relatives aux pseudonymes." }, "forum": { "change_nickname": { "submit_button": "=> core.ref.save_changes", "title": "=> flarum-nicknames.ref.change_nickname" }, "edit_user": { "nicknames_heading": "=> flarum-nicknames.ref.change_nickname", "nicknames_text": "Pseudonyme" }, "settings": { "change_nickname_button": "=> flarum-nicknames.ref.change_nickname" }, "sign_up": { "nickname_placeholder": "=> flarum-nicknames.ref.nickname" } }, "ref": { "change_nickname": "Modifier le pseudonyme", "nickname": "Surnom" } } } 0 { "headers": { "Sec-WebSocket-Version": "13", "Sec-WebSocket-Key": " "Connection": "Upgrade", "Upgrade": "websocket", "Sec-WebSocket-Extensions": "permessage-deflate; client_max_window_bits", "Host": "localhost:3001" }, "isBase64Encoded": false, "multiValueHeaders": { "Sec-WebSocket-Version": ["13"], "Sec-WebSocket-Key": ["], "Connection": ["Upgrade"], "Upgrade": ["websocket"], "Sec-WebSocket-Extensions": ["permessage-deflate; client_max_window_bits"], "Host": ["localhost:3001"] }, "requestContext": { "apiId": "private", "connectedAt": 1614717450076, "connectionId": "cklsh2ue30000lhkvb9ai3bgb", "domainName": "localhost", "eventType": "CONNECT", "extendedRequestId": "cklsh2ue40001lhkvclpe4tyg", "identity": { "accessKey": null, "accountId": null, "caller": null, "cognitoAuthenticationProvider": null, "cognitoAuthenticationType": null, "cognitoIdentityId": null, "cognitoIdentityPoolId": null, "principalOrgId": null, "sourceIp": "127.0.0.1", "user": null, "userAgent": null, "userArn": null }, "messageDirection": "IN", "messageId": "cklsh2ue40002lhkvcgshdsb3", "requestId": "cklsh2ue40003lhkvff76en03", "requestTime": "02/Mar/2021:22:37:30 +0200", "requestTimeEpoch": 1614717450076, "routeKey": "$connect", "stage": "local" } } 10-100 {"title": "Facilitating fashion camouflage art.", "fields": ["face detection", "painting", "digital art", "facial recognition system", "camouflage"], "abstract": "Artists and fashion designers have recently been creating a new form of art -- Camouflage Art -- which can be used to prevent computer vision algorithms from detecting faces. This digital art technique combines makeup and hair styling, or other modifications such as facial painting to help avoid automatic face-detection. In this paper, we first study the camouflage interference and its effectiveness on several current state of art techniques in face detection/recognition; and then present a tool that can facilitate digital art design for such camouflage that can fool these computer vision algorithms. This tool can find the prominent or decisive features from facial images that constitute the face being recognized; and give suggestions for camouflage options (makeup, styling, paints) on particular facial features or facial parts. Testing of this tool shows that it can effectively aid the artists or designers in creating camouflage-thwarting designs. The evaluation on suggested camouflages applied on 40 celebrities across eight different face recognition systems (both non-commercial or commercial) shows that 82.5% ~ 100% of times the subject is unrecognizable using the suggested camouflage.", "citation": "Citations (3)", "departments": ["University of Texas at Dallas", "University of Texas at Dallas"], "authors": [".....http://dblp.org/pers/hd/f/Feng:Ranran", ".....http://dblp.org/pers/hd/p/Prabhakaran:Balakrishnan"], "conf": "mm", "year": "2013", "pages": 10}{ "name": "Examples", "author": "", "description": "Example videos for developers to learn and contribute to JsDbg.", "dependencies": ["catalog-viewer", "catalog"] }{ "name": "solitaire", "private": true, "version": "0.0.0", "description": "A game of solitaire", "repository": "https://github.com/g0t4/angular-solitaire", "license": "MIT", "dependencies": {}, "devDependencies": { "bower": "^1.3.1", "eslint": "^1.5.1", "http-server": "^0.8.4", "jasmine-core": "3.4.0", "karma": "4.2.0", "karma-chrome-launcher": "^0.2.0", "karma-firefox-launcher": "^0.1.6", "karma-jasmine": "2.0.1", "karma-junit-reporter": "^0.3.4" }, "scripts": { "postinstall": "node node_modules/bower/bin/bower install", "prestart": "npm install", "start": "http-server -c-1 -o", "pretest": "npm install", "test": "karma start karma.conf.js", "test-single-run": "karma start karma.conf.js --single-run" } } wechat-app/miniprogram/app.json { "pages": [ "pages/index/index", "pages/search/index", "pages/wxLogin/index", "pages/shopcart/index", "pages/mine/index", "pages/product/index", "pages/test/index", "pages/coupon_module/couponcenter/index", "pages/coupon_module/mycoupon/index", "pages/coupon_module/createcoupon/index", "pages/activity_module/activitycenter/index", "pages/activity_module/activitydetail/index", "pages/activity_module/activityorder/index", "pages/activity_module/myActivity/index", "pages/activity_module/activityProfile/index", "pages/activity_module/activitySucc/index", "pages/activity_module/activityTicket/index" ], "window": { "backgroundTextStyle": "light", "navigationBarBackgroundColor": "#d81e06", "navigationBarTitleText": "校园活动助手", "navigationBarTextStyle": "#fff" }, "tabBar": { "color": "#999", "selectedColor": "#d81e06", "backgroundColor": "#fff", "borderStyle": "#e0e0e0", "list": [ { "pagePath": "pages/index/index", "iconPath": "images/hot_no.png", "selectedIconPath": "images/hot_sel_red.png", "text": "最新活动" }, { "pagePath": "pages/product/index", "iconPath": "images/prod_no.png", "selectedIconPath": "images/prod_sel_red.png", "text": "产品中心" }, { "pagePath": "pages/shopcart/index", "iconPath": "images/cart_no.png", "selectedIconPath": "images/cart_sel_red.png", "text": "购物车" }, { "pagePath": "pages/mine/index", "iconPath": "images/mine_no.png", "selectedIconPath": "images/mine_sel_red.png", "text": "我的" } ] }, "plugins": { "myPlugin": { "version": "dev", "provider": "wx9d83b18b3c4683d2" } } }ofZach/landlinesApp {"id":6531,"line-1":"Mary","line-2":"Turkmenistan","attribution":"©2015 CNES / Astrium, Cnes/Spot Image, DigitalGlobe, Landsat","url":"https://www.google.com/maps/@37.058191,62.603531,13z/data=!3m1!1e3"}ruchjowpl/ruchjow.pl {"object":{"id":"860973","dataset":"gminy","object_id":"1585","data":{"powiaty.sejm_okreg_id":"35","nazwa_urzedu":"Urz\u0105d ","wydatki_roczne":"0.00","teryt":"281902","id":"1585","powierzchnia":"0.00","nts":"6285519022","szef_stanowisko_id":"3","nazwa":"Pozezdrze","zadluzenie_roczne":"0.00","dochody_roczne":"0.00","powiaty.nazwa":"w\u0119gorzewski","fax":"0-87 427-93-33","adres":"Pozezdrze, Ul. 1 Maja 1 A 11-610 Pozezdrze","wojewodztwa.id":"14","telefon":"0-87 427-90-06","typ_nazwa":"Gmina wiejska","powiat_id":"340","wojewodztwa.nazwa":"Warmi\u0144sko-mazurskie","bip_www":"www.bip.pozezdrze.pl","liczba_ludnosci":3441,"typ_id":"2","wojewodztwo_id":"14","email":"","powiaty.id":"340","rada_nazwa":"Rada Gminy w Pozezdrzu"},"score":1,"layers":{"enspat":{"ADD":["yz|iIsctdClE`KaFth@hInZnN`GrMx_BhQk@dAxXd]dMjb@uFfq@mb@rOmo@`_@kFkNh[fEpo@tHGwFft@hIxSnEfcAfl@fTvIsU~Tz@rd@zV|UpbAhSvUdJz[uD~G}"],"DIFF":[]},"rada_komitety":null,"wskazniki":null,"radni_powiazania":null,"szef":null,"ostatnie_posiedzenie":null,"dzielnice":null,"dataset":null}}}"3DUfw��������!2CTev�������� 1BSdu��������0ARct�������� /@Qbs�������� .?Par�������� ->O`q�������� ,=N_p�������� +<M^o��������  * ; L ] n  � � � � � � �   ) : K \ m ~ � � � � � � �   ( 9 J [ l } � � � � � � �   ' 8 I Z k | � � � � � � �   & 7 H Y j { � � � � � � � %6GXiz�������$5FWhy�������#4EVgx�������"3DUfw��������!2CTev�������� 1BSdu��������0ARct�������� /@Qbs�������� .?Par�������� ->O`q�������� ,=N_p�������� +<M^o��������*;L]n�������):K\m~�������(9J[l}�������'8IZk|�������&7HYj{�������%6GXiz�������  $ 5 F W h y � � � � � � � !!#!4!E!V!g!x!�!�!�!�!�!�!�!""""3"D"U"f"w"�"�"�"�"�"�"�"�"#!#2#C#T#e#v#�#�#�#�#�#�#�#�#$ $1$B$S$d$u$�$�$�$�$�$�$�$�$%%0%A%R%c%t%�%�%�%�%�%�%�%�% &&/&@&StarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJsonStarcoderdataJson����